source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
GB_unaryop__identity_uint8_int16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_uint8_int16
// op(A') function: GB_tran__identity_uint8_int16
// C type: uint8_t
// A type: int16_t
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
uint8_t z = (uint8_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT8 || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_uint8_int16
(
uint8_t *restrict Cx,
const int16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_uint8_int16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
kmp_stats.h | #ifndef KMP_STATS_H
#define KMP_STATS_H
/** @file kmp_stats.h
* Functions for collecting statistics.
*/
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "kmp_config.h"
#include "kmp_debug.h"
#if KMP_STATS_ENABLED
/* Statistics accumulator.
Accumulates number of samples and computes min, max, mean, standard deviation
on the fly.
Online variance calculation algorithm from
http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#On-line_algorithm
*/
#include "kmp_stats_timing.h"
#include <limits>
#include <math.h>
#include <new> // placement new
#include <stdint.h>
#include <string>
#include <vector>
/* Enable developer statistics here if you want them. They are more detailed
than is useful for application characterisation and are intended for the
runtime library developer. */
#define KMP_DEVELOPER_STATS 0
/* Enable/Disable histogram output */
#define KMP_STATS_HIST 0
/*!
* @ingroup STATS_GATHERING
* \brief flags to describe the statistic (timer or counter)
*
*/
enum stats_flags_e {
noTotal = 1 << 0, //!< do not show a TOTAL_aggregation for this statistic
onlyInMaster = 1 << 1, //!< statistic is valid only for primary thread
noUnits = 1 << 2, //!< statistic doesn't need units printed next to it
notInMaster = 1 << 3, //!< statistic is valid only for non-primary threads
logEvent = 1 << 4 //!< statistic can be logged on the event timeline when
//! KMP_STATS_EVENTS is on (valid only for timers)
};
/*!
* @ingroup STATS_GATHERING
* \brief the states which a thread can be in
*
*/
enum stats_state_e {
IDLE,
SERIAL_REGION,
FORK_JOIN_BARRIER,
PLAIN_BARRIER,
TASKWAIT,
TASKYIELD,
TASKGROUP,
IMPLICIT_TASK,
EXPLICIT_TASK,
TEAMS_REGION
};
/*!
* \brief Add new counters under KMP_FOREACH_COUNTER() macro in kmp_stats.h
*
* @param macro a user defined macro that takes three arguments -
* macro(COUNTER_NAME, flags, arg)
* @param arg a user defined argument to send to the user defined macro
*
* \details A counter counts the occurrence of some event. Each thread
* accumulates its own count, at the end of execution the counts are aggregated
* treating each thread as a separate measurement. (Unless onlyInMaster is set,
* in which case there's only a single measurement). The min,mean,max are
* therefore the values for the threads. Adding the counter here and then
* putting a KMP_BLOCK_COUNTER(name) at the point you want to count is all you
* need to do. All of the tables and printing is generated from this macro.
* Format is "macro(name, flags, arg)"
*
* @ingroup STATS_GATHERING
*/
// clang-format off
#define KMP_FOREACH_COUNTER(macro, arg) \
macro(OMP_PARALLEL,stats_flags_e::onlyInMaster|stats_flags_e::noTotal,arg) \
macro(OMP_NESTED_PARALLEL, 0, arg) \
macro(OMP_LOOP_STATIC, 0, arg) \
macro(OMP_LOOP_STATIC_STEAL, 0, arg) \
macro(OMP_LOOP_DYNAMIC, 0, arg) \
macro(OMP_DISTRIBUTE, 0, arg) \
macro(OMP_BARRIER, 0, arg) \
macro(OMP_CRITICAL, 0, arg) \
macro(OMP_SINGLE, 0, arg) \
macro(OMP_MASTER, 0, arg) \
macro(OMP_MASKED, 0, arg) \
macro(OMP_TEAMS, 0, arg) \
macro(OMP_set_lock, 0, arg) \
macro(OMP_test_lock, 0, arg) \
macro(REDUCE_wait, 0, arg) \
macro(REDUCE_nowait, 0, arg) \
macro(OMP_TASKYIELD, 0, arg) \
macro(OMP_TASKLOOP, 0, arg) \
macro(TASK_executed, 0, arg) \
macro(TASK_cancelled, 0, arg) \
macro(TASK_stolen, 0, arg)
// clang-format on
/*!
* \brief Add new timers under KMP_FOREACH_TIMER() macro in kmp_stats.h
*
* @param macro a user defined macro that takes three arguments -
* macro(TIMER_NAME, flags, arg)
* @param arg a user defined argument to send to the user defined macro
*
* \details A timer collects multiple samples of some count in each thread and
* then finally aggregates all of the samples from all of the threads. For most
* timers the printing code also provides an aggregation over the thread totals.
* These are printed as TOTAL_foo. The count is normally a time (in ticks),
* hence the name "timer". (But can be any value, so we use this for "number of
* arguments passed to fork" as well). For timers the threads are not
* significant, it's the individual observations that count, so the statistics
* are at that level. Format is "macro(name, flags, arg)"
*
* @ingroup STATS_GATHERING2
*/
// clang-format off
#define KMP_FOREACH_TIMER(macro, arg) \
macro (OMP_worker_thread_life, stats_flags_e::logEvent, arg) \
macro (OMP_parallel, stats_flags_e::logEvent, arg) \
macro (OMP_parallel_overhead, stats_flags_e::logEvent, arg) \
macro (OMP_teams, stats_flags_e::logEvent, arg) \
macro (OMP_teams_overhead, stats_flags_e::logEvent, arg) \
macro (OMP_loop_static, 0, arg) \
macro (OMP_loop_static_scheduling, 0, arg) \
macro (OMP_loop_dynamic, 0, arg) \
macro (OMP_loop_dynamic_scheduling, 0, arg) \
macro (OMP_distribute, 0, arg) \
macro (OMP_distribute_scheduling, 0, arg) \
macro (OMP_critical, 0, arg) \
macro (OMP_critical_wait, 0, arg) \
macro (OMP_single, 0, arg) \
macro (OMP_master, 0, arg) \
macro (OMP_masked, 0, arg) \
macro (OMP_task_immediate, 0, arg) \
macro (OMP_task_taskwait, 0, arg) \
macro (OMP_task_taskyield, 0, arg) \
macro (OMP_task_taskgroup, 0, arg) \
macro (OMP_task_join_bar, 0, arg) \
macro (OMP_task_plain_bar, 0, arg) \
macro (OMP_taskloop_scheduling, 0, arg) \
macro (OMP_plain_barrier, stats_flags_e::logEvent, arg) \
macro (OMP_idle, stats_flags_e::logEvent, arg) \
macro (OMP_fork_barrier, stats_flags_e::logEvent, arg) \
macro (OMP_join_barrier, stats_flags_e::logEvent, arg) \
macro (OMP_serial, stats_flags_e::logEvent, arg) \
macro (OMP_set_numthreads, stats_flags_e::noUnits | stats_flags_e::noTotal, \
arg) \
macro (OMP_PARALLEL_args, stats_flags_e::noUnits | stats_flags_e::noTotal, \
arg) \
macro (OMP_loop_static_iterations, \
stats_flags_e::noUnits | stats_flags_e::noTotal, arg) \
macro (OMP_loop_static_total_iterations, \
stats_flags_e::noUnits | stats_flags_e::noTotal, arg) \
macro (OMP_loop_dynamic_iterations, \
stats_flags_e::noUnits | stats_flags_e::noTotal, arg) \
macro (OMP_loop_dynamic_total_iterations, \
stats_flags_e::noUnits | stats_flags_e::noTotal, arg) \
macro (OMP_distribute_iterations, \
stats_flags_e::noUnits | stats_flags_e::noTotal, arg) \
KMP_FOREACH_DEVELOPER_TIMER(macro, arg)
// clang-format on
// OMP_worker_thread_life -- Time from thread becoming an OpenMP thread (either
// initializing OpenMP or being created by a primary
// thread) until the thread is destroyed
// OMP_parallel -- Time thread spends executing work directly
// within a #pragma omp parallel
// OMP_parallel_overhead -- Time thread spends setting up a parallel region
// OMP_loop_static -- Time thread spends executing loop iterations from
// a statically scheduled loop
// OMP_loop_static_scheduling -- Time thread spends scheduling loop iterations
// from a statically scheduled loop
// OMP_loop_dynamic -- Time thread spends executing loop iterations from
// a dynamically scheduled loop
// OMP_loop_dynamic_scheduling -- Time thread spends scheduling loop iterations
// from a dynamically scheduled loop
// OMP_critical -- Time thread spends executing critical section
// OMP_critical_wait -- Time thread spends waiting to enter
// a critical section
// OMP_single -- Time spent executing a "single" region
// OMP_master -- Time spent executing a "master" region
// OMP_masked -- Time spent executing a "masked" region
// OMP_task_immediate -- Time spent executing non-deferred tasks
// OMP_task_taskwait -- Time spent executing tasks inside a taskwait
// construct
// OMP_task_taskyield -- Time spent executing tasks inside a taskyield
// construct
// OMP_task_taskgroup -- Time spent executing tasks inside a taskygroup
// construct
// OMP_task_join_bar -- Time spent executing tasks inside a join barrier
// OMP_task_plain_bar -- Time spent executing tasks inside a barrier
// construct
// OMP_taskloop_scheduling -- Time spent scheduling tasks inside a taskloop
// construct
// OMP_plain_barrier -- Time spent in a #pragma omp barrier construct or
// inside implicit barrier at end of worksharing
// construct
// OMP_idle -- Time worker threads spend waiting for next
// parallel region
// OMP_fork_barrier -- Time spent in a the fork barrier surrounding a
// parallel region
// OMP_join_barrier -- Time spent in a the join barrier surrounding a
// parallel region
// OMP_serial -- Time thread zero spends executing serial code
// OMP_set_numthreads -- Values passed to omp_set_num_threads
// OMP_PARALLEL_args -- Number of arguments passed to a parallel region
// OMP_loop_static_iterations -- Number of iterations thread is assigned for
// statically scheduled loops
// OMP_loop_dynamic_iterations -- Number of iterations thread is assigned for
// dynamically scheduled loops
#if (KMP_DEVELOPER_STATS)
// Timers which are of interest to runtime library developers, not end users.
// These have to be explicitly enabled in addition to the other stats.
// KMP_fork_barrier -- time in __kmp_fork_barrier
// KMP_join_barrier -- time in __kmp_join_barrier
// KMP_barrier -- time in __kmp_barrier
// KMP_end_split_barrier -- time in __kmp_end_split_barrier
// KMP_setup_icv_copy -- time in __kmp_setup_icv_copy
// KMP_icv_copy -- start/stop timer for any ICV copying
// KMP_linear_gather -- time in __kmp_linear_barrier_gather
// KMP_linear_release -- time in __kmp_linear_barrier_release
// KMP_tree_gather -- time in __kmp_tree_barrier_gather
// KMP_tree_release -- time in __kmp_tree_barrier_release
// KMP_hyper_gather -- time in __kmp_hyper_barrier_gather
// KMP_hyper_release -- time in __kmp_hyper_barrier_release
// KMP_dist_gather -- time in __kmp_dist_barrier_gather
// KMP_dist_release -- time in __kmp_dist_barrier_release
// clang-format off
#define KMP_FOREACH_DEVELOPER_TIMER(macro, arg) \
macro(KMP_fork_call, 0, arg) \
macro(KMP_join_call, 0, arg) \
macro(KMP_end_split_barrier, 0, arg) \
macro(KMP_hier_gather, 0, arg) \
macro(KMP_hier_release, 0, arg) \
macro(KMP_hyper_gather, 0, arg) \
macro(KMP_hyper_release, 0, arg) \
macro(KMP_dist_gather, 0, arg) \
macro(KMP_dist_release, 0, arg) \
macro(KMP_linear_gather, 0, arg) \
macro(KMP_linear_release, 0, arg) \
macro(KMP_tree_gather, 0, arg) \
macro(KMP_tree_release, 0, arg) \
macro(USER_resume, 0, arg) \
macro(USER_suspend, 0, arg) \
macro(USER_mwait, 0, arg) \
macro(KMP_allocate_team, 0, arg) \
macro(KMP_setup_icv_copy, 0, arg) \
macro(USER_icv_copy, 0, arg) \
macro (FOR_static_steal_stolen, \
stats_flags_e::noUnits | stats_flags_e::noTotal, arg) \
macro (FOR_static_steal_chunks, \
stats_flags_e::noUnits | stats_flags_e::noTotal, arg)
#else
#define KMP_FOREACH_DEVELOPER_TIMER(macro, arg)
#endif
// clang-format on
/*!
* \brief Add new explicit timers under KMP_FOREACH_EXPLICIT_TIMER() macro.
*
* @param macro a user defined macro that takes three arguments -
* macro(TIMER_NAME, flags, arg)
* @param arg a user defined argument to send to the user defined macro
*
* \warning YOU MUST HAVE THE SAME NAMED TIMER UNDER KMP_FOREACH_TIMER() OR ELSE
* BAD THINGS WILL HAPPEN!
*
* \details Explicit timers are ones where we need to allocate a timer itself
* (as well as the accumulated timing statistics). We allocate these on a
* per-thread basis, and explicitly start and stop them. Block timers just
* allocate the timer itself on the stack, and use the destructor to notice
* block exit; they don't need to be defined here. The name here should be the
* same as that of a timer above.
*
* @ingroup STATS_GATHERING
*/
#define KMP_FOREACH_EXPLICIT_TIMER(macro, arg) KMP_FOREACH_TIMER(macro, arg)
#define ENUMERATE(name, ignore, prefix) prefix##name,
enum timer_e { KMP_FOREACH_TIMER(ENUMERATE, TIMER_) TIMER_LAST };
enum explicit_timer_e {
KMP_FOREACH_EXPLICIT_TIMER(ENUMERATE, EXPLICIT_TIMER_) EXPLICIT_TIMER_LAST
};
enum counter_e { KMP_FOREACH_COUNTER(ENUMERATE, COUNTER_) COUNTER_LAST };
#undef ENUMERATE
/*
* A logarithmic histogram. It accumulates the number of values in each power of
* ten bin. So 1<=x<10, 10<=x<100, ...
* Mostly useful where we have some big outliers and want to see information
* about them.
*/
class logHistogram {
enum {
numBins = 31, /* Number of powers of 10. If this changes you need to change
* the initializer for binMax */
/*
* If you want to use this to analyse values that may be less than 1, (for
* instance times in s), then the logOffset gives you negative powers.
* In our case here, we're just looking at times in ticks, or counts, so we
* can never see values with magnitude < 1 (other than zero), so we can set
* it to 0. As above change the initializer if you change this.
*/
logOffset = 0
};
uint32_t KMP_ALIGN_CACHE zeroCount;
struct {
uint32_t count;
double total;
} bins[numBins];
static double binMax[numBins];
#ifdef KMP_DEBUG
uint64_t _total;
void check() const {
uint64_t t = zeroCount;
for (int i = 0; i < numBins; i++)
t += bins[i].count;
KMP_DEBUG_ASSERT(t == _total);
}
#else
void check() const {}
#endif
public:
logHistogram() { reset(); }
logHistogram(logHistogram const &o) {
for (int i = 0; i < numBins; i++)
bins[i] = o.bins[i];
#ifdef KMP_DEBUG
_total = o._total;
#endif
}
void reset() {
zeroCount = 0;
for (int i = 0; i < numBins; i++) {
bins[i].count = 0;
bins[i].total = 0;
}
#ifdef KMP_DEBUG
_total = 0;
#endif
}
uint32_t count(int b) const { return bins[b + logOffset].count; }
double total(int b) const { return bins[b + logOffset].total; }
static uint32_t findBin(double sample);
logHistogram &operator+=(logHistogram const &o) {
zeroCount += o.zeroCount;
for (int i = 0; i < numBins; i++) {
bins[i].count += o.bins[i].count;
bins[i].total += o.bins[i].total;
}
#ifdef KMP_DEBUG
_total += o._total;
check();
#endif
return *this;
}
void addSample(double sample);
int minBin() const;
int maxBin() const;
std::string format(char) const;
};
class statistic {
double KMP_ALIGN_CACHE minVal;
double maxVal;
double meanVal;
double m2;
uint64_t sampleCount;
double offset;
bool collectingHist;
logHistogram hist;
public:
statistic(bool doHist = bool(KMP_STATS_HIST)) {
reset();
collectingHist = doHist;
}
statistic(statistic const &o)
: minVal(o.minVal), maxVal(o.maxVal), meanVal(o.meanVal), m2(o.m2),
sampleCount(o.sampleCount), offset(o.offset),
collectingHist(o.collectingHist), hist(o.hist) {}
statistic(double minv, double maxv, double meanv, uint64_t sc, double sd)
: minVal(minv), maxVal(maxv), meanVal(meanv), m2(sd * sd * sc),
sampleCount(sc), offset(0.0), collectingHist(false) {}
bool haveHist() const { return collectingHist; }
double getMin() const { return minVal; }
double getMean() const { return meanVal; }
double getMax() const { return maxVal; }
uint64_t getCount() const { return sampleCount; }
double getSD() const { return sqrt(m2 / sampleCount); }
double getTotal() const { return sampleCount * meanVal; }
logHistogram const *getHist() const { return &hist; }
void setOffset(double d) { offset = d; }
void reset() {
minVal = (std::numeric_limits<double>::max)();
maxVal = -minVal;
meanVal = 0.0;
m2 = 0.0;
sampleCount = 0;
offset = 0.0;
hist.reset();
}
void addSample(double sample);
void scale(double factor);
void scaleDown(double f) { scale(1. / f); }
void forceCount(uint64_t count) { sampleCount = count; }
statistic &operator+=(statistic const &other);
std::string format(char unit, bool total = false) const;
std::string formatHist(char unit) const { return hist.format(unit); }
};
struct statInfo {
const char *name;
uint32_t flags;
};
class timeStat : public statistic {
static statInfo timerInfo[];
public:
timeStat() : statistic() {}
static const char *name(timer_e e) { return timerInfo[e].name; }
static bool noTotal(timer_e e) {
return timerInfo[e].flags & stats_flags_e::noTotal;
}
static bool masterOnly(timer_e e) {
return timerInfo[e].flags & stats_flags_e::onlyInMaster;
}
static bool workerOnly(timer_e e) {
return timerInfo[e].flags & stats_flags_e::notInMaster;
}
static bool noUnits(timer_e e) {
return timerInfo[e].flags & stats_flags_e::noUnits;
}
static bool logEvent(timer_e e) {
return timerInfo[e].flags & stats_flags_e::logEvent;
}
static void clearEventFlags() {
for (int i = 0; i < TIMER_LAST; i++) {
timerInfo[i].flags &= (~(stats_flags_e::logEvent));
}
}
};
// Where we need explicitly to start and end the timer, this version can be used
// Since these timers normally aren't nicely scoped, so don't have a good place
// to live on the stack of the thread, they're more work to use.
class explicitTimer {
timeStat *stat;
timer_e timerEnumValue;
tsc_tick_count startTime;
tsc_tick_count pauseStartTime;
tsc_tick_count::tsc_interval_t totalPauseTime;
public:
explicitTimer(timeStat *s, timer_e te)
: stat(s), timerEnumValue(te), startTime(), pauseStartTime(0),
totalPauseTime() {}
// void setStat(timeStat *s) { stat = s; }
void start(tsc_tick_count tick);
void pause(tsc_tick_count tick) { pauseStartTime = tick; }
void resume(tsc_tick_count tick) {
totalPauseTime += (tick - pauseStartTime);
}
void stop(tsc_tick_count tick, kmp_stats_list *stats_ptr = nullptr);
void reset() {
startTime = 0;
pauseStartTime = 0;
totalPauseTime = 0;
}
timer_e get_type() const { return timerEnumValue; }
};
// Where you need to partition a threads clock ticks into separate states
// e.g., a partitionedTimers class with two timers of EXECUTING_TASK, and
// DOING_NOTHING would render these conditions:
// time(EXECUTING_TASK) + time(DOING_NOTHING) = total time thread is alive
// No clock tick in the EXECUTING_TASK is a member of DOING_NOTHING and vice
// versa
class partitionedTimers {
private:
std::vector<explicitTimer> timer_stack;
public:
partitionedTimers();
void init(explicitTimer timer);
void exchange(explicitTimer timer);
void push(explicitTimer timer);
void pop();
void windup();
};
// Special wrapper around the partitioned timers to aid timing code blocks
// It avoids the need to have an explicit end, leaving the scope suffices.
class blockPartitionedTimer {
partitionedTimers *part_timers;
public:
blockPartitionedTimer(partitionedTimers *pt, explicitTimer timer)
: part_timers(pt) {
part_timers->push(timer);
}
~blockPartitionedTimer() { part_timers->pop(); }
};
// Special wrapper around the thread state to aid in keeping state in code
// blocks It avoids the need to have an explicit end, leaving the scope
// suffices.
class blockThreadState {
stats_state_e *state_pointer;
stats_state_e old_state;
public:
blockThreadState(stats_state_e *thread_state_pointer, stats_state_e new_state)
: state_pointer(thread_state_pointer), old_state(*thread_state_pointer) {
*state_pointer = new_state;
}
~blockThreadState() { *state_pointer = old_state; }
};
// If all you want is a count, then you can use this...
// The individual per-thread counts will be aggregated into a statistic at
// program exit.
class counter {
uint64_t value;
static const statInfo counterInfo[];
public:
counter() : value(0) {}
void increment() { value++; }
uint64_t getValue() const { return value; }
void reset() { value = 0; }
static const char *name(counter_e e) { return counterInfo[e].name; }
static bool masterOnly(counter_e e) {
return counterInfo[e].flags & stats_flags_e::onlyInMaster;
}
};
/* ****************************************************************
Class to implement an event
There are four components to an event: start time, stop time
nest_level, and timer_name.
The start and stop time should be obvious (recorded in clock ticks).
The nest_level relates to the bar width in the timeline graph.
The timer_name is used to determine which timer event triggered this event.
the interface to this class is through four read-only operations:
1) getStart() -- returns the start time as 64 bit integer
2) getStop() -- returns the stop time as 64 bit integer
3) getNestLevel() -- returns the nest level of the event
4) getTimerName() -- returns the timer name that triggered event
*MORE ON NEST_LEVEL*
The nest level is used in the bar graph that represents the timeline.
Its main purpose is for showing how events are nested inside eachother.
For example, say events, A, B, and C are recorded. If the timeline
looks like this:
Begin -------------------------------------------------------------> Time
| | | | | |
A B C C B A
start start start end end end
Then A, B, C will have a nest level of 1, 2, 3 respectively.
These values are then used to calculate the barwidth so you can
see that inside A, B has occurred, and inside B, C has occurred.
Currently, this is shown with A's bar width being larger than B's
bar width, and B's bar width being larger than C's bar width.
**************************************************************** */
class kmp_stats_event {
uint64_t start;
uint64_t stop;
int nest_level;
timer_e timer_name;
public:
kmp_stats_event()
: start(0), stop(0), nest_level(0), timer_name(TIMER_LAST) {}
kmp_stats_event(uint64_t strt, uint64_t stp, int nst, timer_e nme)
: start(strt), stop(stp), nest_level(nst), timer_name(nme) {}
inline uint64_t getStart() const { return start; }
inline uint64_t getStop() const { return stop; }
inline int getNestLevel() const { return nest_level; }
inline timer_e getTimerName() const { return timer_name; }
};
/* ****************************************************************
Class to implement a dynamically expandable array of events
---------------------------------------------------------
| event 1 | event 2 | event 3 | event 4 | ... | event N |
---------------------------------------------------------
An event is pushed onto the back of this array at every
explicitTimer->stop() call. The event records the thread #,
start time, stop time, and nest level related to the bar width.
The event vector starts at size INIT_SIZE and grows (doubles in size)
if needed. An implication of this behavior is that log(N)
reallocations are needed (where N is number of events). If you want
to avoid reallocations, then set INIT_SIZE to a large value.
the interface to this class is through six operations:
1) reset() -- sets the internal_size back to 0 but does not deallocate any
memory
2) size() -- returns the number of valid elements in the vector
3) push_back(start, stop, nest, timer_name) -- pushes an event onto
the back of the array
4) deallocate() -- frees all memory associated with the vector
5) sort() -- sorts the vector by start time
6) operator[index] or at(index) -- returns event reference at that index
**************************************************************** */
class kmp_stats_event_vector {
kmp_stats_event *events;
int internal_size;
int allocated_size;
static const int INIT_SIZE = 1024;
public:
kmp_stats_event_vector() {
events =
(kmp_stats_event *)__kmp_allocate(sizeof(kmp_stats_event) * INIT_SIZE);
internal_size = 0;
allocated_size = INIT_SIZE;
}
~kmp_stats_event_vector() {}
inline void reset() { internal_size = 0; }
inline int size() const { return internal_size; }
void push_back(uint64_t start_time, uint64_t stop_time, int nest_level,
timer_e name) {
int i;
if (internal_size == allocated_size) {
kmp_stats_event *tmp = (kmp_stats_event *)__kmp_allocate(
sizeof(kmp_stats_event) * allocated_size * 2);
for (i = 0; i < internal_size; i++)
tmp[i] = events[i];
__kmp_free(events);
events = tmp;
allocated_size *= 2;
}
events[internal_size] =
kmp_stats_event(start_time, stop_time, nest_level, name);
internal_size++;
return;
}
void deallocate();
void sort();
const kmp_stats_event &operator[](int index) const { return events[index]; }
kmp_stats_event &operator[](int index) { return events[index]; }
const kmp_stats_event &at(int index) const { return events[index]; }
kmp_stats_event &at(int index) { return events[index]; }
};
/* ****************************************************************
Class to implement a doubly-linked, circular, statistics list
|---| ---> |---| ---> |---| ---> |---| ---> ... next
| | | | | | | |
|---| <--- |---| <--- |---| <--- |---| <--- ... prev
Sentinel first second third
Node node node node
The Sentinel Node is the user handle on the list.
The first node corresponds to thread 0's statistics.
The second node corresponds to thread 1's statistics and so on...
Each node has a _timers, _counters, and _explicitTimers array to hold that
thread's statistics. The _explicitTimers point to the correct _timer and
update its statistics at every stop() call. The explicitTimers' pointers are
set up in the constructor. Each node also has an event vector to hold that
thread's timing events. The event vector expands as necessary and records
the start-stop times for each timer.
The nestLevel variable is for plotting events and is related
to the bar width in the timeline graph.
Every thread will have a thread local pointer to its node in
the list. The sentinel node is used by the primary thread to
store "dummy" statistics before __kmp_create_worker() is called.
**************************************************************** */
class kmp_stats_list {
int gtid;
timeStat _timers[TIMER_LAST + 1];
counter _counters[COUNTER_LAST + 1];
explicitTimer thread_life_timer;
partitionedTimers _partitionedTimers;
int _nestLevel; // one per thread
kmp_stats_event_vector _event_vector;
kmp_stats_list *next;
kmp_stats_list *prev;
stats_state_e state;
int thread_is_idle_flag;
public:
kmp_stats_list()
: thread_life_timer(&_timers[TIMER_OMP_worker_thread_life],
TIMER_OMP_worker_thread_life),
_nestLevel(0), _event_vector(), next(this), prev(this), state(IDLE),
thread_is_idle_flag(0) {}
~kmp_stats_list() {}
inline timeStat *getTimer(timer_e idx) { return &_timers[idx]; }
inline counter *getCounter(counter_e idx) { return &_counters[idx]; }
inline partitionedTimers *getPartitionedTimers() {
return &_partitionedTimers;
}
inline timeStat *getTimers() { return _timers; }
inline counter *getCounters() { return _counters; }
inline kmp_stats_event_vector &getEventVector() { return _event_vector; }
inline void startLife() { thread_life_timer.start(tsc_tick_count::now()); }
inline void endLife() { thread_life_timer.stop(tsc_tick_count::now(), this); }
inline void resetEventVector() { _event_vector.reset(); }
inline void incrementNestValue() { _nestLevel++; }
inline int getNestValue() { return _nestLevel; }
inline void decrementNestValue() { _nestLevel--; }
inline int getGtid() const { return gtid; }
inline void setGtid(int newgtid) { gtid = newgtid; }
inline void setState(stats_state_e newstate) { state = newstate; }
inline stats_state_e getState() const { return state; }
inline stats_state_e *getStatePointer() { return &state; }
inline bool isIdle() { return thread_is_idle_flag == 1; }
inline void setIdleFlag() { thread_is_idle_flag = 1; }
inline void resetIdleFlag() { thread_is_idle_flag = 0; }
kmp_stats_list *push_back(int gtid); // returns newly created list node
inline void push_event(uint64_t start_time, uint64_t stop_time,
int nest_level, timer_e name) {
_event_vector.push_back(start_time, stop_time, nest_level, name);
}
void deallocate();
class iterator;
kmp_stats_list::iterator begin();
kmp_stats_list::iterator end();
int size();
class iterator {
kmp_stats_list *ptr;
friend kmp_stats_list::iterator kmp_stats_list::begin();
friend kmp_stats_list::iterator kmp_stats_list::end();
public:
iterator();
~iterator();
iterator operator++();
iterator operator++(int dummy);
iterator operator--();
iterator operator--(int dummy);
bool operator!=(const iterator &rhs);
bool operator==(const iterator &rhs);
kmp_stats_list *operator*() const; // dereference operator
};
};
/* ****************************************************************
Class to encapsulate all output functions and the environment variables
This module holds filenames for various outputs (normal stats, events, plot
file), as well as coloring information for the plot file.
The filenames and flags variables are read from environment variables.
These are read once by the constructor of the global variable
__kmp_stats_output which calls init().
During this init() call, event flags for the timeStat::timerInfo[] global
array are cleared if KMP_STATS_EVENTS is not true (on, 1, yes).
The only interface function that is public is outputStats(heading). This
function should print out everything it needs to, either to files or stderr,
depending on the environment variables described below
ENVIRONMENT VARIABLES:
KMP_STATS_FILE -- if set, all statistics (not events) will be printed to this
file, otherwise, print to stderr
KMP_STATS_THREADS -- if set to "on", then will print per thread statistics to
either KMP_STATS_FILE or stderr
KMP_STATS_PLOT_FILE -- if set, print the ploticus plot file to this filename,
otherwise, the plot file is sent to "events.plt"
KMP_STATS_EVENTS -- if set to "on", then log events, otherwise, don't log
events
KMP_STATS_EVENTS_FILE -- if set, all events are outputted to this file,
otherwise, output is sent to "events.dat"
**************************************************************** */
class kmp_stats_output_module {
public:
struct rgb_color {
float r;
float g;
float b;
};
private:
std::string outputFileName;
static const char *eventsFileName;
static const char *plotFileName;
static int printPerThreadFlag;
static int printPerThreadEventsFlag;
static const rgb_color globalColorArray[];
static rgb_color timerColorInfo[];
void init();
static void setupEventColors();
static void printPloticusFile();
static void printHeaderInfo(FILE *statsOut);
static void printTimerStats(FILE *statsOut, statistic const *theStats,
statistic const *totalStats);
static void printCounterStats(FILE *statsOut, statistic const *theStats);
static void printCounters(FILE *statsOut, counter const *theCounters);
static void printEvents(FILE *eventsOut, kmp_stats_event_vector *theEvents,
int gtid);
static rgb_color getEventColor(timer_e e) { return timerColorInfo[e]; }
static void windupExplicitTimers();
bool eventPrintingEnabled() const { return printPerThreadEventsFlag; }
public:
kmp_stats_output_module() { init(); }
void outputStats(const char *heading);
};
#ifdef __cplusplus
extern "C" {
#endif
void __kmp_stats_init();
void __kmp_stats_fini();
void __kmp_reset_stats();
void __kmp_output_stats(const char *);
void __kmp_accumulate_stats_at_exit(void);
// thread local pointer to stats node within list
extern KMP_THREAD_LOCAL kmp_stats_list *__kmp_stats_thread_ptr;
// head to stats list.
extern kmp_stats_list *__kmp_stats_list;
// lock for __kmp_stats_list
extern kmp_tas_lock_t __kmp_stats_lock;
// reference start time
extern tsc_tick_count __kmp_stats_start_time;
// interface to output
extern kmp_stats_output_module __kmp_stats_output;
#ifdef __cplusplus
}
#endif
// Simple, standard interfaces that drop out completely if stats aren't enabled
/*!
* \brief Adds value to specified timer (name).
*
* @param name timer name as specified under the KMP_FOREACH_TIMER() macro
* @param value double precision sample value to add to statistics for the timer
*
* \details Use KMP_COUNT_VALUE(name, value) macro to add a particular value to
* a timer statistics.
*
* @ingroup STATS_GATHERING
*/
#define KMP_COUNT_VALUE(name, value) \
__kmp_stats_thread_ptr->getTimer(TIMER_##name)->addSample((double)value)
/*!
* \brief Increments specified counter (name).
*
* @param name counter name as specified under the KMP_FOREACH_COUNTER() macro
*
* \details Use KMP_COUNT_BLOCK(name, value) macro to increment a statistics
* counter for the executing thread.
*
* @ingroup STATS_GATHERING
*/
#define KMP_COUNT_BLOCK(name) \
__kmp_stats_thread_ptr->getCounter(COUNTER_##name)->increment()
/*!
* \brief Outputs the current thread statistics and reset them.
*
* @param heading_string heading put above the final stats output
*
* \details Explicitly stops all timers and outputs all stats. Environment
* variable, `OMPTB_STATSFILE=filename`, can be used to output the stats to a
* filename instead of stderr. Environment variable,
* `OMPTB_STATSTHREADS=true|undefined`, can be used to output thread specific
* stats. For now the `OMPTB_STATSTHREADS` environment variable can either be
* defined with any value, which will print out thread specific stats, or it can
* be undefined (not specified in the environment) and thread specific stats
* won't be printed. It should be noted that all statistics are reset when this
* macro is called.
*
* @ingroup STATS_GATHERING
*/
#define KMP_OUTPUT_STATS(heading_string) __kmp_output_stats(heading_string)
/*!
* \brief Initializes the partitioned timers to begin with name.
*
* @param name timer which you want this thread to begin with
*
* @ingroup STATS_GATHERING
*/
#define KMP_INIT_PARTITIONED_TIMERS(name) \
__kmp_stats_thread_ptr->getPartitionedTimers()->init(explicitTimer( \
__kmp_stats_thread_ptr->getTimer(TIMER_##name), TIMER_##name))
#define KMP_TIME_PARTITIONED_BLOCK(name) \
blockPartitionedTimer __PBLOCKTIME__( \
__kmp_stats_thread_ptr->getPartitionedTimers(), \
explicitTimer(__kmp_stats_thread_ptr->getTimer(TIMER_##name), \
TIMER_##name))
#define KMP_PUSH_PARTITIONED_TIMER(name) \
__kmp_stats_thread_ptr->getPartitionedTimers()->push(explicitTimer( \
__kmp_stats_thread_ptr->getTimer(TIMER_##name), TIMER_##name))
#define KMP_POP_PARTITIONED_TIMER() \
__kmp_stats_thread_ptr->getPartitionedTimers()->pop()
#define KMP_EXCHANGE_PARTITIONED_TIMER(name) \
__kmp_stats_thread_ptr->getPartitionedTimers()->exchange(explicitTimer( \
__kmp_stats_thread_ptr->getTimer(TIMER_##name), TIMER_##name))
#define KMP_SET_THREAD_STATE(state_name) \
__kmp_stats_thread_ptr->setState(state_name)
#define KMP_GET_THREAD_STATE() __kmp_stats_thread_ptr->getState()
#define KMP_SET_THREAD_STATE_BLOCK(state_name) \
blockThreadState __BTHREADSTATE__(__kmp_stats_thread_ptr->getStatePointer(), \
state_name)
/*!
* \brief resets all stats (counters to 0, timers to 0 elapsed ticks)
*
* \details Reset all stats for all threads.
*
* @ingroup STATS_GATHERING
*/
#define KMP_RESET_STATS() __kmp_reset_stats()
#if (KMP_DEVELOPER_STATS)
#define KMP_COUNT_DEVELOPER_VALUE(n, v) KMP_COUNT_VALUE(n, v)
#define KMP_COUNT_DEVELOPER_BLOCK(n) KMP_COUNT_BLOCK(n)
#define KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(n) KMP_TIME_PARTITIONED_BLOCK(n)
#define KMP_PUSH_DEVELOPER_PARTITIONED_TIMER(n) KMP_PUSH_PARTITIONED_TIMER(n)
#define KMP_POP_DEVELOPER_PARTITIONED_TIMER(n) KMP_POP_PARTITIONED_TIMER(n)
#define KMP_EXCHANGE_DEVELOPER_PARTITIONED_TIMER(n) \
KMP_EXCHANGE_PARTITIONED_TIMER(n)
#else
// Null definitions
#define KMP_COUNT_DEVELOPER_VALUE(n, v) ((void)0)
#define KMP_COUNT_DEVELOPER_BLOCK(n) ((void)0)
#define KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(n) ((void)0)
#define KMP_PUSH_DEVELOPER_PARTITIONED_TIMER(n) ((void)0)
#define KMP_POP_DEVELOPER_PARTITIONED_TIMER(n) ((void)0)
#define KMP_EXCHANGE_DEVELOPER_PARTITIONED_TIMER(n) ((void)0)
#endif
#else // KMP_STATS_ENABLED
// Null definitions
#define KMP_COUNT_VALUE(n, v) ((void)0)
#define KMP_COUNT_BLOCK(n) ((void)0)
#define KMP_OUTPUT_STATS(heading_string) ((void)0)
#define KMP_RESET_STATS() ((void)0)
#define KMP_COUNT_DEVELOPER_VALUE(n, v) ((void)0)
#define KMP_COUNT_DEVELOPER_BLOCK(n) ((void)0)
#define KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(n) ((void)0)
#define KMP_PUSH_DEVELOPER_PARTITIONED_TIMER(n) ((void)0)
#define KMP_POP_DEVELOPER_PARTITIONED_TIMER(n) ((void)0)
#define KMP_EXCHANGE_DEVELOPER_PARTITIONED_TIMER(n) ((void)0)
#define KMP_INIT_PARTITIONED_TIMERS(name) ((void)0)
#define KMP_TIME_PARTITIONED_BLOCK(name) ((void)0)
#define KMP_PUSH_PARTITIONED_TIMER(name) ((void)0)
#define KMP_POP_PARTITIONED_TIMER() ((void)0)
#define KMP_SET_THREAD_STATE(state_name) ((void)0)
#define KMP_GET_THREAD_STATE() ((void)0)
#define KMP_SET_THREAD_STATE_BLOCK(state_name) ((void)0)
#endif // KMP_STATS_ENABLED
#endif // KMP_STATS_H
|
vms_fmt_plug.c | /*
* This file is part of John the Ripper password cracker.
*
* It comes from OpenVMS support 2.4(jtr_vms_2-4.zip) patch
* posted by David Jones.
*
* Converted to OpenVMS format module by David Jones
*
* Copyright (c) 2011 by David L. Jones <jonesd/at/columbus.rr.com>,
* Copyright (c) 2012 by Dhiru Kholia <dhiru/at/openwall.com> and
* is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without
* modifications, are permitted. */
#if !AC_BUILT
#if __GNUC__ && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
#define ARCH_LITTLE_ENDIAN 1
#endif
#endif
#if FMT_EXTERNS_H
#if ARCH_LITTLE_ENDIAN
extern struct fmt_main fmt_VMS;
#endif
#elif FMT_REGISTERS_H
#if ARCH_LITTLE_ENDIAN
john_register_one(&fmt_VMS);
#endif
#else
#include <stdio.h>
#include <string.h>
#include "arch.h"
#include "misc.h"
#include "vms_std.h"
#include "common.h"
#include "formats.h"
#ifdef _OPENMP
static int omp_t = 1;
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 1024 // Tuned on K8-Dual HT
#endif
#endif
#ifdef VMS
#include <ssdef.h>
#define UAIsM_PWDMIX UAI$M_PWDMIX
#else
/*
* Emulate symbols defined for VMS services.
*/
#define UAIsM_PWDMIX 0x2000000
#endif
#include "memdbg.h"
#define FORMAT_LABEL "OpenVMS"
#define FORMAT_NAME "Purdy"
#define FORMAT_TAG "$V$"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 32
#define CIPHERTEXT_LENGTH UAF_ENCODE_SIZE
#define BINARY_SIZE 8
#define BINARY_ALIGN 4
#define SALT_SIZE sizeof(struct uaf_hash_info)
#define SALT_ALIGN sizeof(uaf_qword)
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
static struct fmt_tests tests[] = {
/*
* The following two test vectors: "USER" and "service" are case-insensitive
*/
{"$V$9AYXUd5LfDy-aj48Vj54P-----", "USER"},
{"$V$p1UQjRZKulr-Z25g5lJ-------", "service"},
/*
* The following one test vector: "President#44" is case-sensitive
*/
{"$V$S44zI913bBx-UJrcFSC------D", "President#44"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static uaf_qword (*crypt_out)[BINARY_SIZE / sizeof(uaf_qword)];
static int initialized;
/*
* See if signature of ciphertext (from passwd file) matches the hack
* produced by the uaf_encode routine (starts with $V$)
*/
static int valid(char *ciphertext, struct fmt_main *self )
{
struct uaf_hash_info pwd;
if (!initialized) {
uaf_init();
initialized = 1;
}
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN))
return 0; /* no match */
if ( strlen ( ciphertext ) < (UAF_ENCODE_SIZE-1) )
return 0;
if (!uaf_hash_decode(ciphertext, &pwd))
return 0;
#ifdef VMS_DEBUG
fprintf(stderr, "/VMS_STD/ get_salt decoded '%s' to %x/%x-%x-%x-%x-%x"
" %ld\n", ciphertext, pwd.salt, pwd.alg, pwd.username.r40[0],
pwd.username.r40[1], pwd.username.r40[2], pwd.username.r40[3],
pwd.flags);
#endif
if (pwd.alg < 1 || pwd.alg > 3)
return 0;
return 1;
}
static void fmt_vms_init ( struct fmt_main *self )
{
#ifdef _OPENMP
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
/* Init bin 2 hex table for faster conversions later */
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
if (!initialized) {
uaf_init();
initialized = 1;
}
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
/*
* Save a password (key) for testing. VMS_std_set_key returns position value
* we can use if needed to recall the key by a fmt->get_key request. On get_key
* return a private copy.
*/
static void set_key(char *key, int index)
{
strnzcpyn(saved_key[index], key, sizeof(*saved_key));
}
static char *get_key(int index)
{
return saved_key[index];
}
static int cmp_all(void *binary, int count)
{
int index = 0;
#ifdef _OPENMP
for (; index < count; index++)
#endif
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
/*
* Save salt for producing ciphertext from it and saved keys at next crypt call.
*/
static struct uaf_hash_info *cur_salt;
void VMS_std_set_salt ( void *salt )
{
cur_salt = (struct uaf_hash_info*)salt;
}
#define COMMON_GET_HASH_VAR crypt_out
#include "common-get-hash.h"
/*
* Hash the password and salt saved with VMS_std_set_key and VMS_std_set_salt,
* saving the result in global storage for retrieval by vms_fmt.c module.
*/
int VMS_std_crypt(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
uaf_test_password (cur_salt, saved_key[index], 0, crypt_out[index]);
}
return count;
}
/*
* Extract salt from ciphertext string to static storage and return
* pointer to it. Salt is effectively 70-80 bits (username, salt,
* algorithm, pwdmix flag).
*/
char *VMS_std_get_salt(char *ciphertext)
{
static struct uaf_hash_info pwd;
memset(&pwd, 0, sizeof(pwd));
uaf_hash_decode ( ciphertext, &pwd );
#ifdef VMS_DEBUG
printf("/VMS_STD/ get_salt decoded '%s' to %x/%x-%x-%x-%x-%x %ld\n",
ciphertext, pwd.salt, pwd.alg, pwd.username.r40[0], pwd.username.r40[1],
pwd.username.r40[2], pwd.username.r40[3], pwd.flags );
#endif
return (char *) &pwd;
}
/*
* Extract binary hash from ciphertext into static storage and return
* pointer to it.
*/
VMS_word *VMS_std_get_binary(char *ciphertext)
{
static union {
struct uaf_hash_info pwd;
VMS_word b[16];
} out;
uaf_hash_decode ( ciphertext, &out.pwd );
return out.b;
}
/*
* Class record.
*/
struct fmt_main fmt_VMS = {
{
FORMAT_LABEL, /* .label */
FORMAT_NAME, /* .format_name */
VMS_ALGORITHM_NAME, /* .algorithm_name */
BENCHMARK_COMMENT, /* .benchmark_comment */
BENCHMARK_LENGTH, /* .benchmark_length (pwd break len) */
0,
PLAINTEXT_LENGTH, /* .plaintext_length (max) */
BINARY_SIZE, /* .binary_size (quadword) */
BINARY_ALIGN,
SALT_SIZE, /* .salt_size (word) */
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
/*
* This format supports both case-sensitive and case-insensitive passwords,
* so this format should set FMT_CASE
*/
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
{ FORMAT_TAG },
tests
}, {
fmt_vms_init, /* changed for jumbo */
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
(void *(*)(char *))VMS_std_get_binary,
(void *(*)(char *))VMS_std_get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
(void (*)(void *))VMS_std_set_salt,
set_key,
get_key,
fmt_default_clear_keys,
VMS_std_crypt,
{
#define COMMON_GET_HASH_LINK
#include "common-get-hash.h"
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
NoBorderFilterReferenceImpl.c | // Authors:
// Emanuele Del Sozzo (emanuele.delsozzo@polimi.it), Marcello Pogliani (marcello.pogliani@polimi.it)
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <string.h>
#include "NoBorderFilterReferenceImpl.h"
#include "omp.h"
void noBorderFilterCPUParallel(input_type src[], output_type dst[], size_t dimx, size_t dimy, output_type Kernel[], size_t Kdim)
{
output_type sum = 0;
int start = (int) (Kdim) / 2;
#pragma omp parallel for private(sum)
for (size_t x = start; x < (dimx - start); x++) {
for (size_t y = start; y < (dimy - start); y++) {
sum = 0;
for (int k = -start; k < start; k++) {
for (int j = -start; j < start; j++) {
sum = sum + Kernel[(int)(k + start) * Kdim + (int) (j + start)] * (output_type) src[(int)(x - k) * dimy + y - j];
}
}
dst[x * dimy + y] = sum;
}
}
}
void noBorderFilterCPUReferenceImpl(input_type src[], output_type dst[], size_t dimx, size_t dimy, output_type Kernel[], size_t Kdim)
{
output_type sum = 0;
int start = (int) (Kdim) / 2;
for (size_t y = start; y < (dimy - start); y++) {
for (size_t x = start; x < (dimx - start); x++) {
sum = 0;
for (int k = -start; k < start; k++) {
for (int j = -start; j < start; j++) {
sum = sum + Kernel[(int)(k + start) * Kdim + (int) (j + start)] * (output_type) src[(int)(x - k) * dimy + y - j];
}
}
dst[x * dimy + y] = sum;
}
}
}
|
Tutorial.h | //=================================================================================================
/*!
// \file blaze/Tutorial.h
// \brief Tutorial of the Blaze library
//
// Copyright (C) 2012-2020 Klaus Iglberger - All Rights Reserved
//
// This file is part of the Blaze library. You can redistribute it and/or modify it under
// the terms of the New (Revised) BSD License. Redistribution and use in source and binary
// forms, with or without modification, are permitted provided that the following conditions
// are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of
// conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice, this list
// of conditions and the following disclaimer in the documentation and/or other materials
// provided with the distribution.
// 3. Neither the names of the Blaze development group nor the names of its contributors
// may be used to endorse or promote products derived from this software without specific
// prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
// SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
// TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
// BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
// DAMAGE.
*/
//=================================================================================================
#ifndef _BLAZE_TUTORIAL_H_
#define _BLAZE_TUTORIAL_H_
//=================================================================================================
//
// BLAZE TUTORIAL
//
//=================================================================================================
//**Mainpage***************************************************************************************
/*!\mainpage
//
// \image html blaze300x150.jpg
//
// This is the API for the \b Blaze high performance C++ math library. It gives a complete
// overview of the individual features and sublibraries of \b Blaze. To get a first impression
// on \b Blaze, the short \ref getting_started tutorial is a good place to start. Afterwards,
// the following long tutorial covers the most important aspects of the \b Blaze math library.
// The tabs at the top of the page allow a direct access to the individual modules, namespaces,
// classes, and files of the \b Blaze library.\n\n
//
// \section table_of_content Table of Contents
//
// <ul>
// <li> \ref configuration_and_installation </li>
// <li> \ref getting_started </li>
// <li> \ref vectors
// <ul>
// <li> \ref vector_types
// <ul>
// <li> \ref vector_types_dense_vectors </li>
// <li> \ref vector_types_sparse_vectors </li>
// </ul>
// </li>
// <li> \ref vector_operations
// <ul>
// <li> \ref vector_operations_constructors </li>
// <li> \ref vector_operations_assignment </li>
// <li> \ref vector_operations_element_access </li>
// <li> \ref vector_operations_element_insertion </li>
// <li> \ref vector_operations_element_removal </li>
// <li> \ref vector_operations_element_lookup </li>
// <li> \ref vector_operations_non_modifying_operations </li>
// <li> \ref vector_operations_modifying_operations </li>
// <li> \ref vector_operations_arithmetic_operations </li>
// <li> \ref vector_operations_reduction_operations </li>
// <li> \ref vector_operations_norms </li>
// <li> \ref vector_operations_scalar_expansion </li>
// <li> \ref vector_operations_vector_expansion </li>
// <li> \ref vector_operations_vector_repetition </li>
// <li> \ref vector_operations_statistic_operations </li>
// <li> \ref vector_operations_declaration_operations </li>
// <li> \ref vector_operations_vector_generators </li>
// </ul>
// </li>
// </ul>
// </li>
// <li> \ref matrices
// <ul>
// <li> \ref matrix_types
// <ul>
// <li> \ref matrix_types_dense_matrices </li>
// <li> \ref matrix_types_sparse_matrices </li>
// </ul>
// </li>
// <li> \ref matrix_operations
// <ul>
// <li> \ref matrix_operations_constructors </li>
// <li> \ref matrix_operations_assignment </li>
// <li> \ref matrix_operations_element_access </li>
// <li> \ref matrix_operations_element_insertion </li>
// <li> \ref matrix_operations_element_removal </li>
// <li> \ref matrix_operations_element_lookup </li>
// <li> \ref matrix_operations_non_modifying_operations </li>
// <li> \ref matrix_operations_modifying_operations </li>
// <li> \ref matrix_operations_arithmetic_operations </li>
// <li> \ref matrix_operations_reduction_operations </li>
// <li> \ref matrix_operations_norms </li>
// <li> \ref matrix_operations_scalar_expansion </li>
// <li> \ref matrix_operations_matrix_repetition </li>
// <li> \ref matrix_operations_statistic_operations </li>
// <li> \ref matrix_operations_declaration_operations </li>
// <li> \ref matrix_operations_matrix_generators </li>
// <li> \ref matrix_operations_matrix_inversion </li>
// <li> \ref matrix_operations_matrix_exponential </li>
// <li> \ref matrix_operations_decomposition </li>
// <li> \ref matrix_operations_linear_systems </li>
// <li> \ref matrix_operations_eigenvalues </li>
// <li> \ref matrix_operations_singularvalues </li>
// </ul>
// </li>
// </ul>
// </li>
// <li> \ref adaptors
// <ul>
// <li> \ref adaptors_symmetric_matrices </li>
// <li> \ref adaptors_hermitian_matrices </li>
// <li> \ref adaptors_triangular_matrices </li>
// </ul>
// </li>
// <li> \ref views
// <ul>
// <li> \ref views_subvectors </li>
// <li> \ref views_element_selections </li>
// <li> \ref views_submatrices </li>
// <li> \ref views_rows </li>
// <li> \ref views_row_selections </li>
// <li> \ref views_columns </li>
// <li> \ref views_column_selections </li>
// <li> \ref views_bands </li>
// </ul>
// </li>
// <li> \ref arithmetic_operations
// <ul>
// <li> \ref addition </li>
// <li> \ref subtraction </li>
// <li> \ref scalar_multiplication </li>
// <li> \ref vector_vector_multiplication
// <ul>
// <li> \ref componentwise_multiplication </li>
// <li> \ref inner_product </li>
// <li> \ref outer_product </li>
// <li> \ref cross_product </li>
// <li> \ref vector_kronecker_product </li>
// </ul>
// </li>
// <li> \ref vector_vector_division </li>
// <li> \ref matrix_vector_multiplication </li>
// <li> \ref matrix_matrix_multiplication
// <ul>
// <li> \ref schur_product </li>
// <li> \ref matrix_product </li>
// <li> \ref matrix_kronecker_product </li>
// </ul>
// </li>
// </ul>
// </li>
// <li> \ref bitwise_operations
// <ul>
// <li> \ref bitwise_shift </li>
// <li> \ref bitwise_and </li>
// <li> \ref bitwise_or </li>
// <li> \ref bitwise_xor </li>
// </ul>
// </li>
// <li> \ref logical_operations
// <ul>
// <li> \ref logical_not </li>
// <li> \ref logical_and </li>
// <li> \ref logical_or </li>
// </ul>
// </li>
// <li> \ref shared_memory_parallelization
// <ul>
// <li> \ref hpx_parallelization </li>
// <li> \ref cpp_threads_parallelization </li>
// <li> \ref boost_threads_parallelization </li>
// <li> \ref openmp_parallelization </li>
// <li> \ref serial_execution </li>
// </ul>
// </li>
// <li> \ref serialization
// <ul>
// <li> \ref vector_serialization </li>
// <li> \ref matrix_serialization </li>
// </ul>
// </li>
// <li> \ref customization
// <ul>
// <li> \ref configuration_files </li>
// <li> \ref vector_and_matrix_customization
// <ul>
// <li> \ref custom_data_members </li>
// <li> \ref custom_operations </li>
// <li> \ref custom_data_types </li>
// </ul>
// </li>
// <li> \ref grouping_tagging </li>
// <li> \ref error_reporting_customization </li>
// </ul>
// </li>
// <li> \ref blas_functions </li>
// <li> \ref lapack_functions </li>
// <li> \ref block_vectors_and_matrices </li>
// <li> \ref intra_statement_optimization </li>
// <li> \ref faq </li>
// <li> \ref issue_creation_guidelines </li>
// <li> \ref blaze_references </li>
// </ul>
*/
//*************************************************************************************************
//**Configuration and Installation*****************************************************************
/*!\page configuration_and_installation Configuration and Installation
//
// \tableofcontents
//
//
// Since \b Blaze is a header-only library, setting up the \b Blaze library on a particular system
// is a fairly easy two step process. In the following, this two step process is explained in
// detail, preceded only by a short summary of the requirements.
//
//
// \n \section requirements Requirements
// <hr>
//
// For maximum performance the \b Blaze library expects you to have a BLAS library installed
// (<a href="http://software.intel.com/en-us/articles/intel-mkl/">Intel MKL</a>,
// <a href="http://developer.amd.com/libraries/acml/">ACML</a>,
// <a href="http://math-atlas.sourceforge.net">Atlas</a>,
// <a href="http://www.tacc.utexas.edu/tacc-projects/gotoblas2">Goto</a>, ...). If you don't
// have a BLAS library installed on your system, \b Blaze will still work and will not be reduced
// in functionality, but performance may be limited. Thus it is strongly recommended to install a
// BLAS library.
//
// Additionally, for computing the determinant of a dense matrix, for the decomposition of dense
// matrices, for the dense matrix inversion, and for the computation of eigenvalues and singular
// values \b Blaze requires <a href="https://en.wikipedia.org/wiki/LAPACK">LAPACK</a>. When either
// of these features is used it is necessary to link the LAPACK library to the final executable.
// If no LAPACK library is available the use of these features will result in a linker error.
//
// Furthermore, it is possible to use Boost threads to run numeric operations in parallel. In this
// case the Boost library is required to be installed on your system. It is recommended to use the
// newest Boost library available, but \b Blaze requires at minimum the Boost version 1.54.0. If
// you don't have Boost installed on your system, you can download it for free from
// <a href="http://www.boost.org">www.boost.org</a>.
//
//
// \n \section step_1_installation Step 1: Installation
// <hr>
//
// \subsection step_1_cmake Installation via CMake
//
// The first step is the installation of the \b Blaze header files. The most convenient way
// to do this is via <a href="https://cmake.org">CMake</a>. Linux and macOS users can use the
// following two lines to copy the \b Blaze headers in the <tt>./blaze</tt> subdirectory to
// the directory \c ${CMAKE_INSTALL_PREFIX}/include and the package configuration files to
// \c ${CMAKE_INSTALL_PREFIX}/share/blaze/cmake.
\code
cmake -DCMAKE_INSTALL_PREFIX=/usr/local/
sudo make install
\endcode
// Windows users can do the same via the cmake-gui. Alternatively, it is possible to include
// \b Blaze by adding the following lines in any \c CMakeLists.txt file:
\code
find_package( blaze )
if( blaze_FOUND )
add_library( blaze_target INTERFACE )
target_link_libraries( blaze_target INTERFACE blaze::blaze )
endif()
\endcode
// Alternatively \b Blaze provides the <tt>./cmake/Blaze_Import</tt> CMake function to import
// the \b Blaze library into CMake based projects. This approach includes the configuration
// step (see \ref step_2_configuration). To do so you need to import the function file like
// any other module/function into your CMake project:
\code
list(APPEND CMAKE_MODULE_PATH ${BLAZE_LIBRARY_PATH}/cmake)
include(Blaze_Import)
\endcode
// After importing the function script you can import and use the \b Blaze library:
\code
Blaze_Import(ARGUMENTS)
target_link_libraries(TARGET Blaze)
\endcode
// In this example, \c TARGET is the executable/library using \b Blaze and \c ARGUMENTS is the
// configuration you want for building \b Blaze. To configure \b Blaze using the import function
// you can set the input arguments like this example:
\code
Blaze_Import(
QUIET
BLAS on
LAPACK on
THREADING Boost
CACHE_SIZE auto
VECTORIZATION on
STORAGE_ORDER rowMajor
THRESHOLD_DMATDVECMULT 100000UL
THRESHOLD_SMP_DVECDVECADD 1000000UL
)
\endcode
// For more details about available configuration options please have a look at
// \ref configuration_files and the <tt>Blaze_Import.cmake</tt> function script.
//
// \n \subsection step_1_vcpkg Installation via the VC++ Packaging Tool
//
// An alternate way to install \b Blaze for Windows users is Microsoft's
// <a href="https://github.com/Microsoft/vcpkg">VC++ Packaging Tool (vcpkg)</a>. \b Blaze can
// be installed via the command line:
\code
C:\src\vcpkg> .\vcpkg install blaze
\endcode
// The tool automatically downloads the latest \b Blaze release and copies the header files to
// the common include directory. Please note that since \b Blaze is a header-only library the
// attempt to install any static or dynamic library will fail!
//
// \n \subsection step_1_installation_unix Manual Installation on Linux/macOS
//
// Since \b Blaze only consists of header files, the <tt>./blaze</tt> subdirectory can be simply
// copied to a standard include directory (note that this requires root privileges):
\code
cp -r ./blaze /usr/local/include
\endcode
// Alternatively, on Unix-based machines (which includes Linux and Mac OS X) the
// \c CPLUS_INCLUDE_PATH environment variable can be set. The specified directory will be
// searched after any directories specified on the command line with the option \c -I and
// before the standard default directories (such as \c /usr/local/include and \c /usr/include).
// Assuming a user named 'Jon', the environment variable can be set as follows:
\code
CPLUS_INCLUDE_PATH=/usr/home/jon/blaze
export CPLUS_INCLUDE_PATH
\endcode
// Last but not least, the <tt>./blaze</tt> subdirectory can be explicitly specified on the
// command line. The following example demonstrates this by means of the GNU C++ compiler:
\code
g++ -I/usr/home/jon/blaze -o BlazeTest BlazeTest.cpp
\endcode
// \n \subsection step_1_installation_windows Manual Installation on Windows
//
// Windows doesn't have a standard include directory. Therefore the \b Blaze header files can be
// copied to any other directory or simply left in the default \b Blaze directory. However, the
// chosen include directory has to be explicitly specified as include path. In Visual Studio,
// this is done via the project property pages, configuration properties, C/C++, General settings.
// Here the additional include directories can be specified.
//
//
// \n \section step_2_configuration Step 2: Configuration
// <hr>
//
// The second step is the configuration and customization of the \b Blaze library. Many aspects
// of \b Blaze can be adapted to specific requirements, environments and architectures. The most
// convenient way to configure \b Blaze is to modify the headers in the <tt>./blaze/config/</tt>
// subdirectory by means of <a href="https://cmake.org">CMake</a>. Alternatively these header
// files can be customized manually. In both cases, however, the files are modified. If this is
// not an option it is possible to configure \b Blaze via the command line (see the tutorial
// section \ref configuration_files or the documentation in the configuration files).
//
// Since the default settings are reasonable for most systems this step can also be skipped.
// However, in order to achieve maximum performance a customization of at least the following
// configuration files is required:
//
// - <b><tt><blaze/config/BLAS.h></tt></b>: Via this configuration file \b Blaze can be enabled
// to use a third-party BLAS library for several basic linear algebra functions (such as for
// instance dense matrix multiplications). In case no BLAS library is used, all linear algebra
// functions use the default implementations of the \b Blaze library and therefore BLAS is not a
// requirement for the compilation process. However, please note that performance may be limited.
// - <b><tt><blaze/config/CacheSize.h></tt></b>: This file contains the hardware specific cache
// settings. \b Blaze uses this information to optimize its cache usage. For maximum performance
// it is recommended to adapt these setting to a specific target architecture.
// - <b><tt><blaze/config/Thresholds.h></tt></b>: This file contains all thresholds for the
// customization of the \b Blaze compute kernels. In order to tune the kernels for a specific
// architecture and to maximize performance it can be necessary to adjust the thresholds,
// especially for a parallel execution (see \ref shared_memory_parallelization).
//
// For an overview of other customization options and more details, please see the section
// \ref configuration_files.
//
//
// \n \section blaze_version Blaze Version
// <hr>
//
// The current major and minor version number of the \b Blaze library can be found in the
// <b><tt><blaze/system/Version.h></tt></b> header file. It is automatically included via the
// <b><tt><blaze/Blaze.h></tt></b> header file. The file contains the two following macros,
// which can for instance be used for conditional compilation:
\code
#define BLAZE_MAJOR_VERSION 3
#define BLAZE_MINOR_VERSION 9
#define BLAZE_PATCH_VERSION 0
\endcode
// \n Next: \ref getting_started
*/
//*************************************************************************************************
//**Getting Started********************************************************************************
/*!\page getting_started Getting Started
//
// This short tutorial serves the purpose to give a quick overview of the way mathematical
// expressions have to be formulated in \b Blaze. Starting with \ref vector_types, the following
// long tutorial covers the most important aspects of the \b Blaze math library.
//
//
// \n \section getting_started_vector_example A First Example
//
// \b Blaze is written such that using mathematical expressions is as close to mathematical
// textbooks as possible and therefore as intuitive as possible. In nearly all cases the seemingly
// easiest solution is the right solution and most users experience no problems when trying to
// use \b Blaze in the most natural way. The following example gives a first impression of the
// formulation of a vector addition in \b Blaze:
\code
#include <iostream>
#include <blaze/Math.h>
using blaze::StaticVector;
using blaze::DynamicVector;
int main()
{
// Instantiation of a static 3D column vector. The vector is directly initialized as
// ( 4 -2 5 )
StaticVector<int,3UL> a{ 4, -2, 5 };
// Instantiation of a dynamic 3D column vector. Via the subscript operator the values are set to
// ( 2 5 -3 )
DynamicVector<int> b( 3UL );
b[0] = 2;
b[1] = 5;
b[2] = -3;
// Adding the vectors a and b
DynamicVector<int> c = a + b;
// Printing the result of the vector addition
std::cout << "c =\n" << c << "\n";
}
\endcode
// Note that the entire \b Blaze math library can be included via the \c blaze/Math.h header
// file. Alternatively, the entire \b Blaze library, including both the math and the entire
// utility module, can be included via the \c blaze/Blaze.h header file. Also note that all
// classes and functions of \b Blaze are contained in the blaze namespace.\n\n
//
// Assuming that this program resides in a source file called \c FirstExample.cpp, it can be
// compiled for instance via the GNU C++ compiler:
\code
g++ -std=c++14 -O3 -DNDEBUG -mavx -o FirstExample FirstExample.cpp
\endcode
// Note the definition of the \c NDEBUG preprocessor symbol. In order to achieve maximum
// performance, it is necessary to compile the program in release mode, which deactivates
// all debugging functionality inside \b Blaze. It is also strongly recommended to specify
// the available architecture specific instruction set (as for instance the AVX instruction
// set, which if available can be activated via the \c -mavx flag). This allows \b Blaze
// to optimize computations via vectorization.\n\n
//
// When running the resulting executable \c FirstExample, the output of the last line of
// this small program is
\code
c =
( 6 )
( 3 )
( 2 )
\endcode
// \n \section getting_started_matrix_example An Example Involving Matrices
//
// Similarly easy and intuitive are expressions involving matrices:
\code
#include <iostream>
#include <blaze/Math.h>
using namespace blaze;
int main()
{
// Instantiating a dynamic 3D column vector
DynamicVector<int> x{ 4, -1, 3 };
// Instantiating a dynamic 2x3 row-major matrix, preinitialized with 0. Via the function call
// operator three values of the matrix are explicitly set to get the matrix
// ( 1 0 4 )
// ( 0 -2 0 )
DynamicMatrix<int> A( 2UL, 3UL, 0 );
A(0,0) = 1;
A(0,2) = 4;
A(1,1) = -2;
// Performing a matrix/vector multiplication
DynamicVector<int> y = A * x;
// Printing the resulting vector
std::cout << "y =\n" << y << "\n";
// Instantiating a static column-major matrix. The matrix is directly initialized as
// ( 3 -1 )
// ( 0 2 )
// ( -1 0 )
StaticMatrix<int,3UL,2UL,columnMajor> B{ { 3, -1 }, { 0, 2 }, { -1, 0 } };
// Performing a matrix/matrix multiplication
DynamicMatrix<int> C = A * B;
// Printing the resulting matrix
std::cout << "C =\n" << C << "\n";
}
\endcode
// The output of this program is
\code
y =
( 16 )
( 2 )
C =
( -1 -1 )
( 0 -4 )
\endcode
// \n \section getting_started_complex_example A Complex Example
//
// The following example is much more sophisticated. It shows the implementation of the Conjugate
// Gradient (CG) algorithm (http://en.wikipedia.org/wiki/Conjugate_gradient) by means of the
// \b Blaze library:
//
// \image html cg.jpg
//
// In this example it is not important to understand the CG algorithm itself, but to see the
// advantage of the API of the \b Blaze library. In the \b Blaze implementation we will use a
// sparse matrix/dense vector multiplication for a 2D Poisson equation using \f$ N \times N \f$
// unknowns. It becomes apparent that the core of the algorithm is very close to the mathematical
// formulation and therefore has huge advantages in terms of readability and maintainability,
// while the performance of the code is close to the expected theoretical peak performance:
\code
#include <blaze/Math.h>
int main()
{
const size_t N ( 1000UL );
const size_t iterations( 10UL );
const size_t NN( N*N );
blaze::CompressedMatrix<double,rowMajor> A( NN, NN );
blaze::DynamicVector<double,columnVector> x( NN, 1.0 ), b( NN, 0.0 ), r( NN ), p( NN ), Ap( NN );
double alpha, beta, delta;
// ... Initializing the sparse matrix A
// Performing the CG algorithm
r = b - A * x;
p = r;
delta = (r,r);
for( size_t iteration=0UL; iteration<iterations; ++iteration )
{
Ap = A * p;
alpha = delta / (p,Ap);
x += alpha * p;
r -= alpha * Ap;
beta = (r,r);
if( std::sqrt( beta ) < 1E-8 ) break;
p = r + ( beta / delta ) * p;
delta = beta;
}
}
\endcode
// \n Hopefully this short tutorial gives a good first impression of how mathematical expressions
// are formulated with \b Blaze. The following long tutorial, starting with \ref vector_types,
// will cover all aspects of the \b Blaze math library, i.e. it will introduce all vector and
// matrix types, all possible operations on vectors and matrices, and of course all possible
// mathematical expressions.
//
// \n Previous: \ref configuration_and_installation Next: \ref vectors
*/
//*************************************************************************************************
//**Vectors****************************************************************************************
/*!\page vectors Vectors
//
// \tableofcontents
//
//
// \n \section vectors_general General Concepts
// <hr>
//
// The \b Blaze library currently offers five dense vector types (\ref vector_types_static_vector,
// \ref vector_types_dynamic_vector, \ref vector_types_hybrid_vector, \ref vector_types_custom_vector,
// and \ref vector_types_uniform_vector) and two sparse vector types (\ref vector_types_compressed_vector
// and \ref vector_types_zero_vector). All vectors can be specified as either column vectors or row
// vectors:
\code
using blaze::DynamicVector;
using blaze::columnVector;
using blaze::rowVector;
// Setup of the 3-dimensional dense column vector
//
// ( 1 )
// ( 2 )
// ( 3 )
//
DynamicVector<int,columnVector> a{ 1, 2, 3 };
// Setup of the 3-dimensional dense row vector
//
// ( 4 5 6 )
//
DynamicVector<int,rowVector> b{ 4, 5, 6 };
\endcode
// Per default, all vectors in \b Blaze are column vectors:
\code
// Instantiation of a 3-dimensional column vector
blaze::DynamicVector<int> c( 3UL );
\endcode
// \n \section vectors_details Vector Details
// <hr>
//
// - \ref vector_types
// - \ref vector_operations
//
//
// \n \section vectors_examples Examples
// <hr>
\code
using blaze::StaticVector;
using blaze::DynamicVector;
using blaze::CompressedVector;
using blaze::rowVector;
using blaze::columnVector;
StaticVector<int,6UL> a; // Instantiation of a 6-dimensional static column vector
CompressedVector<int,rowVector> b; // Instantiation of a compressed row vector
DynamicVector<int,columnVector> c; // Instantiation of a dynamic column vector
// ... Resizing and initialization
c = a + trans( b );
\endcode
// \n Previous: \ref getting_started Next: \ref vector_types
*/
//*************************************************************************************************
//**Vector Types***********************************************************************************
/*!\page vector_types Vector Types
//
// \tableofcontents
//
//
// \n \section vector_types_dense_vectors Dense Vectors
// <hr>
//
// \subsection vector_types_static_vector StaticVector
//
// The blaze::StaticVector class template is the representation of a fixed size vector with
// statically allocated elements of arbitrary type. It can be included via the header files
\code
#include <blaze/Blaze.h>
// or
#include <blaze/Math.h>
// or
#include <blaze/math/StaticVector.h>
\endcode
// and forward declared via the header file
\code
#include <blaze/Forward.h>
\endcode
// The type of the elements, the number of elements, the transpose flag, the alignment, the
// padding, and the group tag of the vector can be specified via the six template parameters:
\code
namespace blaze {
template< typename Type, size_t N, bool TF, AlignmentFlag AF, PaddingFlag PF, typename Tag >
class StaticVector;
} // namespace blaze
\endcode
// - \c Type: specifies the type of the vector elements. StaticVector can be used with any
// non-cv-qualified, non-reference, non-pointer element type.
// - \c N : specifies the total number of vector elements. It is expected that StaticVector is
// only used for tiny and small vectors.
// - \c TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column
// vector (\c blaze::columnVector). The default value is \c blaze::defaultTransposeFlag.
// - \c AF : specifies whether the first element of the vector is properly aligned with
// respect to the available instruction set (SSE, AVX, ...). Possible values are
// \c blaze::aligned and \c blaze::unaligned. The default value is
// \c blaze::defaultAlignmentFlag.
// - \c PF : specifies whether the vector should be padded to maximize the efficiency of
// vectorized operations. Possible values are \c blaze::padded and \c blaze::unpadded.
// The default value is \c blaze::defaultPaddingFlag.
// - \c Tag : optional type parameter to tag the vector. The default type is \c blaze::Group0.
// See \ref grouping_tagging for details.
//
// The blaze::StaticVector is perfectly suited for small to medium vectors whose size is known at
// compile time:
\code
// Definition of a 3-dimensional integral column vector
blaze::StaticVector<int,3UL> a;
// Definition of a 4-dimensional single precision column vector
blaze::StaticVector<float,4UL,blaze::columnVector> b;
// Definition of an unaligned, unpadded 6-dimensional double precision row vector
blaze::StaticVector<double,6UL,blaze::rowVector,blaze::unaligned,blaze::unpadded> c;
\endcode
// \subsubsection vector_types_static_vector_alignment Alignment
//
// In case \c AF is set to \c blaze::aligned, the elements of a blaze::StaticVector are possibly
// over-aligned to meet the alignment requirements of the available instruction set (SSE, AVX,
// AVX-512, ...). The alignment for fundamental types (\c short, \c int, \c float, \c double, ...)
// and complex types (\c complex<float>, \c complex<double>, ...) is 16 bytes for SSE, 32 bytes
// for AVX, and 64 bytes for AVX-512. All other types are aligned according to their intrinsic
// alignment:
\code
struct Int { int i; };
using VT1 = blaze::StaticVector<double,3UL>;
using VT2 = blaze::StaticVector<complex<float>,2UL>;
using VT3 = blaze::StaticVector<Int,5UL>;
alignof( VT1 ); // Evaluates to 16 for SSE, 32 for AVX, and 64 for AVX-512
alignof( VT2 ); // Evaluates to 16 for SSE, 32 for AVX, and 64 for AVX-512
alignof( VT3 ); // Evaluates to 'alignof( Int )'
\endcode
// Note that an aligned blaze::StaticVector instance may be bigger than the sum of its data
// elements:
\code
sizeof( VT1 ); // Evaluates to 32 for both SSE and AVX
sizeof( VT2 ); // Evaluates to 16 for SSE and 32 for AVX
sizeof( VT3 ); // Evaluates to 20; no special alignment requirements
\endcode
// Please note that for this reason an aligned blaze::StaticVector cannot be used in containers
// using dynamic memory such as \c std::vector without additionally providing an allocator that
// can provide over-aligned memory:
\code
using Type = blaze::StaticVector<double,3UL>;
using Allocator = blaze::AlignedAllocator<Type>;
std::vector<Type> v1; // Might be misaligned for AVX or AVX-512
std::vector<Type,Allocator> v2; // Properly aligned for AVX or AVX-512
\endcode
// \subsubsection vector_types_static_vector_padding Padding
//
// Adding padding elements to the end of a blaze::StaticVector can have a significant impact on
// the performance. For instance, assuming that AVX is available, then two padded 3-dimensional
// vectors of double precision values can be added via a single SIMD addition operation:
\code
using blaze::StaticVector;
using blaze::columnVector;
using blaze::aligned;
using blaze::unaligned;
using blaze::padded;
using blaze::unpadded;
StaticVector<double,3UL,columnVector,aligned,padded> a1, b1, c1;
StaticVector<double,3UL,columnVector,unaligned,unpadded> a2, b2, c2;
// ... Initialization
c1 = a1 + b1; // AVX-based vector addition; maximum performance
c2 = a2 + b2; // Scalar vector addition; limited performance
sizeof( a1 ); // Evaluates to 32 for SSE and AVX, and 64 for AVX-512
sizeof( a2 ); // Evaluates to 24 for SSE, AVX, and AVX-512 (minimum size)
\endcode
// Due to padding, the first addition will run at maximum performance. On the flip side, the size
// of each vector instance is increased due to the padding elements. The total size of an instance
// depends on the number of elements and width of the available instruction set (16 bytes for
// SSE, 32 bytes for AVX, and 64 bytes for AVX-512).
//
// The second addition will be limited in performance since due to the number of elements some of
// the elements need to be handled in a scalar operation. However, the size of an \c unaligned,
// \c unpadded blaze::StaticVector instance is guaranteed to be the sum of its elements.
//
// Please also note that \b Blaze will zero initialize the padding elements in order to achieve
// maximum performance!
//
//
// \n \subsection vector_types_dynamic_vector DynamicVector
//
// The blaze::DynamicVector class template is the representation of an arbitrary sized vector
// with dynamically allocated elements of arbitrary type. It can be included via the header files
\code
#include <blaze/Blaze.h>
// or
#include <blaze/Math.h>
// or
#include <blaze/math/DynamicVector.h>
\endcode
// and forward declared via the header file
\code
#include <blaze/Forward.h>
\endcode
// The type of the elements, the transpose flag, the type of the allocator, and the group tag of
// the vector can be specified via the four template parameters:
\code
namespace blaze {
template< typename Type, bool TF, typename Alloc, typename Tag >
class DynamicVector;
} // namespace blaze
\endcode
// - \c Type : specifies the type of the vector elements. DynamicVector can be used with any
// non-cv-qualified, non-reference, non-pointer element type.
// - \c TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column
// vector (\c blaze::columnVector). The default value is \c blaze::defaultTransposeFlag.
// - \c Alloc: specifies the type of allocator used to allocate dynamic memory. The default type
// of allocator is \c blaze::AlignedAllocator.
// - \c Tag : optional type parameter to tag the vector. The default type is \c blaze::Group0.
// See \ref grouping_tagging for details.
//
// The blaze::DynamicVector is the default choice for all kinds of dense vectors and the best
// choice for medium to large vectors. Its size can be modified at runtime:
\code
// Definition of a 3-dimensional integral column vector
blaze::DynamicVector<int> a( 3UL );
// Definition of a 4-dimensional single precision column vector
blaze::DynamicVector<float,blaze::columnVector> b( 4UL );
// Definition of a double precision row vector with size 0
blaze::DynamicVector<double,blaze::rowVector> c;
\endcode
// \subsubsection vector_types_dynamic_vector_allocators Allocators
//
// Via the third template parameter it is possible to customize the memory allocation of a
// \c blaze::DynamicVector. The provided allocator is expected to represent an implementation of
// the allocator concept of the standard library (see for instance
// <a href="https://en.cppreference.com/w/cpp/container/vector">std::vector</a> and
// <a href="https://en.cppreference.com/w/cpp/memory/allocator">std::allocator</a>). In
// addition, the provided allocator is also required to provide properly (over-)aligned memory
// for fundamental and complex numbers. For instance, in case SSE vectorization is possible, the
// returned memory must be at least 16-byte aligned. In case AVX is active, the memory must be at
// least 32-byte aligned, and in case of AVX-512 the memory must be even 64-byte aligned.
//
//
// \n \subsection vector_types_hybrid_vector HybridVector
//
// The blaze::HybridVector class template combines the advantages of the blaze::StaticVector and
// the blaze::DynamicVector class templates. It represents a fixed size vector with statically
// allocated elements, but still can be dynamically resized (within the bounds of the available
// memory). It can be included via the header files
\code
#include <blaze/Blaze.h>
// or
#include <blaze/Math.h>
// or
#include <blaze/math/HybridVector.h>
\endcode
// and forward declared via the header file
\code
#include <blaze/Forward.h>
\endcode
// The type of the elements, the maximum number of elements, the transpose flag, the alignment,
// the padding, and the group tag of the vector can be specified via the six template parameters:
\code
namespace blaze {
template< typename Type, size_t N, bool TF, AlignmentFlag AF, PaddingFlag PF, typename Tag >
class HybridVector;
} // namespace blaze
\endcode
// - \c Type: specifies the type of the vector elements. HybridVector can be used with any
// non-cv-qualified, non-reference, non-pointer element type.
// - \c N : specifies the maximum number of vector elements. It is expected that HybridVector
// is only used for tiny and small vectors.
// - \c TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column
// vector (\c blaze::columnVector). The default value is \c blaze::defaultTransposeFlag.
// - \c AF : specifies whether the first element of the vector is properly aligned with
// respect to the available instruction set (SSE, AVX, ...). Possible values are
// \c blaze::aligned and \c blaze::unaligned. The default value is
// \c blaze::defaultAlignmentFlag.
// - \c PF : specifies whether the vector should be padded to maximize the efficiency of
// vectorized operations. Possible values are \c blaze::padded and \c blaze::unpadded.
// The default value is \c blaze::defaultPaddingFlag.
// - \c Tag : optional type parameter to tag the vector. The default type is \c blaze::Group0.
// See \ref grouping_tagging for details.
//
// The blaze::HybridVector is a suitable choice for small to medium vectors, whose size is not
// known at compile time or not fixed at runtime, but whose maximum size is known at compile
// time:
\code
// Definition of a 3-dimensional integral column vector with a maximum size of 6
blaze::HybridVector<int,6UL> a( 3UL );
// Definition of a 4-dimensional single precision column vector with a maximum size of 16
blaze::HybridVector<float,16UL,blaze::columnVector> b( 4UL );
// Definition of a unaligned, unpadded double precision row vector with size 0 and a maximum size of 6
blaze::HybridVector<double,6UL,blaze::rowVector,blaze::unaligned,blaze::unpadded> c;
\endcode
// \subsubsection vector_types_hybrid_vector_alignment Alignment
//
// In case \c AF is set to \c blaze::aligned, the elements of a blaze::HybridVector are possibly
// over-aligned to meet the alignment requirements of the available instruction set (SSE, AVX,
// AVX-512, ...). The alignment for fundamental types (\c short, \c int, \c float, \c double, ...)
// and complex types (\c complex<float>, \c complex<double>, ...) is 16 bytes for SSE, 32 bytes
// for AVX, and 64 bytes for AVX-512. All other types are aligned according to their intrinsic
// alignment:
\code
struct Int { int i; };
using VT1 = blaze::HybridVector<double,3UL>;
using VT2 = blaze::HybridVector<complex<float>,2UL>;
using VT3 = blaze::HybridVector<Int,5UL>;
alignof( VT1 ); // Evaluates to 16 for SSE, 32 for AVX, and 64 for AVX-512
alignof( VT2 ); // Evaluates to 16 for SSE, 32 for AVX, and 64 for AVX-512
alignof( VT3 ); // Evaluates to 'alignof( Int )'
\endcode
// Note that an aligned blaze::HybridVector instance may be bigger than an according unaligned
// blaze::HybridVector:
\code
sizeof( VT1 ); // Evaluates to 32 for both SSE and AVX
sizeof( VT2 ); // Evaluates to 16 for SSE and 32 for AVX
sizeof( VT3 ); // Evaluates to 20; no special alignment requirements
\endcode
// Please note that for this reason an aligned blaze::HybridVector cannot be used in containers
// using dynamic memory such as \c std::vector without additionally providing an allocator that
// can provide over-aligned memory:
\code
using Type = blaze::HybridVector<double,3UL>;
using Allocator = blaze::AlignedAllocator<Type>;
std::vector<Type> v1; // Might be misaligned for AVX or AVX-512
std::vector<Type,Allocator> v2; // Properly aligned for AVX or AVX-512
\endcode
// \subsubsection vector_types_hybrid_vector_padding Padding
//
// Adding padding elements to the end of a blaze::HybridVector can have a significant impact on
// the performance. For instance, assuming that AVX is available, then two padded 3-dimensional
// vectors of double precision values can be added via a single SIMD addition operation:
\code
using blaze::HybridVector;
using blaze::columnVector;
using blaze::aligned;
using blaze::unaligned;
using blaze::padded;
using blaze::unpadded;
HybridVector<double,3UL,columnVector,aligned,padded> a1, b1, c1;
HybridVector<double,3UL,columnVector,unaligned,unpadded> a2, b2, c2;
// ... Resizing and initialization
c1 = a1 + b1; // AVX-based vector addition; maximum performance
c2 = a2 + b2; // Scalar vector addition; limited performance
sizeof( a1 ); // Evaluates to 48 for SSE, 64 and AVX, and 128 for AVX-512
sizeof( a2 ); // Evaluates to 32 for SSE, AVX, and AVX-512 (minimum size)
\endcode
// Due to padding, the first addition will run at maximum performance. On the flip side, the size
// of each vector instance is increased due to the padding elements. The total size of an instance
// depends on the number of elements and width of the available instruction set (16 bytes for
// SSE, 32 bytes for AVX, and 64 bytes for AVX-512).
//
// The second addition will be limited in performance since due to the number of elements some of
// the elements need to be handled in a scalar operation. However, the size of an \c unaligned,
// \c unpadded blaze::HybridVector instance is guaranteed to be the sum of its elements plus the
// necessary data members to store the current size.
//
// Please also note that \b Blaze will zero initialize the padding elements in order to achieve
// maximum performance!
//
//
// \n \subsection vector_types_custom_vector CustomVector
//
// The blaze::CustomVector class template provides the functionality to represent an external
// array of elements of arbitrary type and a fixed size as a native \b Blaze dense vector data
// structure. Thus in contrast to all other dense vector types a custom vector does not perform
// any kind of memory allocation by itself, but it is provided with an existing array of element
// during construction. A custom vector can therefore be considered an alias to the existing
// array. It can be included via the header files
\code
#include <blaze/Blaze.h>
// or
#include <blaze/Math.h>
// or
#include <blaze/math/CustomVector.h>
\endcode
// and forward declared via the header file
\code
#include <blaze/Forward.h>
\endcode
// The type of the elements, the properties of the given array of elements, the transpose flag,
// and the group tag of the vector can be specified via the following five template parameters:
\code
namespace blaze {
template< typename Type, AlignmentFlag AF, PaddingFlag PF, bool TF, typename Tag >
class CustomVector;
} // namespace blaze
\endcode
// - \c Type: specifies the type of the vector elements. blaze::CustomVector can be used with
// any possibly cv-qualified, non-reference, non-pointer element type.
// - \c AF : specifies whether the represented, external arrays are properly aligned with
// respect to the available instruction set (SSE, AVX, ...) or not (\c blaze::aligned
// or \c blaze::unaligned).
// - \c PF : specified whether the represented, external arrays are properly padded with
// respect to the available instruction set (SSE, AVX, ...) or not (\c blaze::padded
// or \c blaze::unpadded).
// - \c TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column
// vector (\c blaze::columnVector). The default value is \c blaze::defaultTransposeFlag.
// - \c Tag : optional type parameter to tag the vector. The default type is \c blaze::Group0.
// See \ref grouping_tagging for details.
//
// The blaze::CustomVector is the right choice if any external array needs to be represented as
// a \b Blaze dense vector data structure or if a custom memory allocation strategy needs to be
// realized:
\code
using blaze::CustomVector;
using blaze::Deallocate;
using blaze::aligned;
using blaze::unaligned;
using blaze::padded;
using blaze::unpadded;
// Definition of an unmanaged custom column vector for unaligned, unpadded integer arrays
using UnalignedUnpadded = CustomVector<int,unaligned,unpadded,columnVector>;
std::vector<int> vec( 7UL );
UnalignedUnpadded a( &vec[0], 7UL );
// Definition of a managed custom column vector for unaligned but padded 'float' arrays
using UnalignedPadded = CustomVector<float,unaligned,padded,columnVector>;
std::unique_ptr<float[]> memory1( new float[16] );
UnalignedPadded b( memory1.get(), 9UL, 16UL );
// Definition of a managed custom row vector for aligned, unpadded 'double' arrays
using AlignedUnpadded = CustomVector<double,aligned,unpadded,rowVector>;
std::unique_ptr<double[],Deallocate> memory2( blaze::allocate<double>( 7UL ) );
AlignedUnpadded c( memory2.get(), 7UL );
// Definition of a managed custom row vector for aligned, padded 'complex<double>' arrays
using cplx = complex<double>;
using AlignedPadded = CustomVector<cplx,aligned,padded,columnVector>;
std::unique_ptr<cplx[],Deallocate> memory3( allocate<cplx>( 8UL ) );
AlignedPadded d( memory3.get(), 5UL, 8UL );
\endcode
// In comparison with the remaining \b Blaze dense vector types blaze::CustomVector has several
// special characteristics. All of these result from the fact that a custom vector is not
// performing any kind of memory allocation, but instead is given an existing array of elements.
// The following sections discuss all of these characteristics:
//
// -# <b>\ref vector_types_custom_vector_memory_management</b>
// -# <b>\ref vector_types_custom_vector_copy_operations</b>
// -# <b>\ref vector_types_custom_vector_alignment</b>
// -# <b>\ref vector_types_custom_vector_padding</b>
//
// \subsubsection vector_types_custom_vector_memory_management Memory Management
//
// The blaze::CustomVector class template acts as an adaptor for an existing array of elements. As
// such it provides everything that is required to use the array just like a native \b Blaze dense
// vector data structure. However, this flexibility comes with the price that the user of a custom
// vector is responsible for the resource management.
//
// The following examples give an impression of several possible types of custom vectors:
\code
using blaze::CustomVector;
using blaze::Deallocate;
using blaze::allocate;
using blaze::aligned;
using blaze::unaligned;
using blaze::padded;
using blaze::unpadded;
// Definition of a 3-dimensional custom vector with unaligned, unpadded and externally
// managed integer array. Note that the std::vector must be guaranteed to outlive the
// custom vector!
std::vector<int> vec( 3UL );
CustomVector<int,unaligned,unpadded> a( &vec[0], 3UL );
// Definition of a custom vector with size 3 and capacity 16 with aligned, padded and
// externally managed integer array. Note that the std::unique_ptr must be guaranteed
// to outlive the custom vector!
std::unique_ptr<int[],Deallocate> memory( allocate<int>( 16UL ) );
CustomVector<int,aligned,padded> b( memory.get(), 3UL, 16UL );
\endcode
// \subsubsection vector_types_custom_vector_copy_operations Copy Operations
//
// As with all dense vectors it is possible to copy construct a custom vector:
\code
using blaze::CustomVector;
using blaze::unaligned;
using blaze::unpadded;
using CustomType = CustomVector<int,unaligned,unpadded>;
std::vector<int> vec( 5UL, 10 ); // Vector of 5 integers of the value 10
CustomType a( &vec[0], 5UL ); // Represent the std::vector as Blaze dense vector
a[1] = 20; // Also modifies the std::vector
CustomType b( a ); // Creating a copy of vector a
b[2] = 20; // Also affects vector a and the std::vector
\endcode
// It is important to note that a custom vector acts as a reference to the specified array. Thus
// the result of the copy constructor is a new custom vector that is referencing and representing
// the same array as the original custom vector.
//
// In contrast to copy construction, just as with references, copy assignment does not change
// which array is referenced by the custom vector, but modifies the values of the array:
\code
std::vector<int> vec2( 5UL, 4 ); // Vector of 5 integers of the value 4
CustomType c( &vec2[0], 5UL ); // Represent the std::vector as Blaze dense vector
a = c; // Copy assignment: Set all values of vector a and b to 4.
\endcode
// \subsubsection vector_types_custom_vector_alignment Alignment
//
// In case the custom vector is specified as \c aligned the passed array must be guaranteed to
// be aligned according to the requirements of the used instruction set (SSE, AVX, ...). For
// instance, if AVX is active an array of integers must be 32-bit aligned:
\code
using blaze::CustomVector;
using blaze::Deallocate;
using blaze::allocate;
using blaze::aligned;
using blaze::unpadded;
// Allocation of 32-bit aligned memory
std::unique_ptr<int[],Deallocate> memory( allocate<int>( 5UL ) );
CustomVector<int,aligned,unpadded> a( memory.get(), 5UL );
\endcode
// In case the alignment requirements are violated, a \c std::invalid_argument exception is
// thrown.
//
// \subsubsection vector_types_custom_vector_padding Padding
//
// Adding padding elements to the end of an array can have a significant impact on the performance.
// For instance, assuming that AVX is available, then two aligned, padded, 3-dimensional vectors
// of double precision values can be added via a single SIMD addition operation:
\code
using blaze::CustomVector;
using blaze::Deallocate;
using blaze::allocate;
using blaze::aligned;
using blaze::padded;
using CustomType = CustomVector<double,aligned,padded>;
std::unique_ptr<double[],Deallocate> memory1( allocate<double>( 4UL ) );
std::unique_ptr<double[],Deallocate> memory2( allocate<double>( 4UL ) );
std::unique_ptr<double[],Deallocate> memory3( allocate<double>( 4UL ) );
// Creating padded custom vectors of size 3 and a capacity of 4
CustomType a( memory1.get(), 3UL, 4UL );
CustomType b( memory2.get(), 3UL, 4UL );
CustomType c( memory3.get(), 3UL, 4UL );
// ... Initialization
c = a + b; // AVX-based vector addition
\endcode
// In this example, maximum performance is possible. However, in case no padding elements are
// inserted, a scalar addition has to be used:
\code
using blaze::CustomVector;
using blaze::Deallocate;
using blaze::allocate;
using blaze::aligned;
using blaze::unpadded;
using CustomType = CustomVector<double,aligned,unpadded>;
std::unique_ptr<double[],Deallocate> memory1( allocate<double>( 3UL ) );
std::unique_ptr<double[],Deallocate> memory2( allocate<double>( 3UL ) );
std::unique_ptr<double[],Deallocate> memory3( allocate<double>( 3UL ) );
// Creating unpadded custom vector of size 3
CustomType a( allocate<double>( 3UL ), 3UL );
CustomType b( allocate<double>( 3UL ), 3UL );
CustomType c( allocate<double>( 3UL ), 3UL );
// ... Initialization
c = a + b; // Scalar vector addition
\endcode
// Note the different number of constructor parameters for unpadded and padded custom vectors:
// In contrast to unpadded vectors, where during the construction only the size of the array
// has to be specified, during the construction of a padded custom vector it is additionally
// necessary to explicitly specify the capacity of the array.
//
// The number of padding elements is required to be sufficient with respect to the available
// instruction set: In case of an aligned padded custom vector the added padding elements must
// guarantee that the capacity is greater or equal than the size and a multiple of the SIMD vector
// width. In case of unaligned padded vectors the number of padding elements can be greater or
// equal the number of padding elements of an aligned padded custom vector. In case the padding
// is insufficient with respect to the available instruction set, a \c std::invalid_argument
// exception is thrown.
//
// Please also note that \b Blaze will zero initialize the padding elements in order to achieve
// maximum performance!
//
//
// \n \subsection vector_types_uniform_vector UniformVector
//
// The blaze::UniformVector class template is the representation of an arbitrary sized uniform
// vector with elements of arbitrary type. It can be included via the header files
\code
#include <blaze/Blaze.h>
// or
#include <blaze/Math.h>
// or
#include <blaze/math/UniformVector.h>
\endcode
// and forward declared via the header file
\code
#include <blaze/Forward.h>
\endcode
// The type of the elements, the transpose flag, and the group tag of the vector can be specified
// via the three template parameters:
\code
namespace blaze {
template< typename Type, bool TF, typename Tag >
class UniformVector;
} // namespace blaze
\endcode
// - \c Type: specifies the type of the vector elements. UniformVector can be used with any
// non-cv-qualified, non-reference, non-pointer element type.
// - \c TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column
// vector (\c blaze::columnVector). The default value is \c blaze::defaultTransposeFlag.
// - \c Tag : optional type parameter to tag the vector. The default type is \c blaze::Group0.
// See \ref grouping_tagging for details.
//
// The blaze::UniformVector is the best choice for uniform vectors of any size. Its size can be
// modified at runtime:
\code
// Definition of a 3-dimensional integral column vector
blaze::UniformVector<int> a( 3UL );
// Definition of a 4-dimensional single precision column vector
blaze::UniformVector<float,blaze::columnVector> b( 4UL );
// Definition of a double precision row vector with size 0
blaze::UniformVector<double,blaze::rowVector> c;
\endcode
// \n \section vector_types_sparse_vectors Sparse Vectors
// <hr>
//
// \subsection vector_types_compressed_vector CompressedVector
//
// The blaze::CompressedVector class is the representation of an arbitrarily sized sparse
// vector, which stores only non-zero elements of arbitrary type. It can be included via the
// header files
\code
#include <blaze/Blaze.h>
// or
#include <blaze/Math.h>
// or
#include <blaze/math/CompressedVector.h>
\endcode
// and forward declared via the header file
\code
#include <blaze/Forward.h>
\endcode
// The type of the elements, the transpose flag, and the group tag of the vector can be specified
// via the three template parameters:
\code
namespace blaze {
template< typename Type, bool TF, typename Tag >
class CompressedVector;
} // namespace blaze
\endcode
// - \c Type: specifies the type of the vector elements. CompressedVector can be used with any
// non-cv-qualified, non-reference, non-pointer element type.
// - \c TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column
// vector (\c blaze::columnVector). The default value is \c blaze::defaultTransposeFlag.
// - \c Tag : optional type parameter to tag the vector. The default type is \c blaze::Group0.
// See \ref grouping_tagging for details.
//
// The blaze::CompressedVector is the right choice for all kinds of sparse vectors:
\code
// Definition of a 3-dimensional integral column vector
blaze::CompressedVector<int> a( 3UL );
// Definition of a 4-dimensional single precision column vector with capacity for 3 non-zero elements
blaze::CompressedVector<float,blaze::columnVector> b( 4UL, 3UL );
// Definition of a double precision row vector with size 0
blaze::CompressedVector<double,blaze::rowVector> c;
\endcode
// \n \subsection vector_types_zero_vector ZeroVector
//
// The blaze::ZeroVector class template is the representation of an immutable, arbitrary sized
// zero vector with elements of arbitrary type. It can be included via the header files
\code
#include <blaze/Blaze.h>
// or
#include <blaze/Math.h>
// or
#include <blaze/math/ZeroVector.h>
\endcode
// and forward declared via the header file
\code
#include <blaze/Forward.h>
\endcode
// The type of the elements, the transpose flag, and the group tag of the vector can be specified
// via the three template parameters:
\code
namespace blaze {
template< typename Type, bool TF, typename Tag >
class ZeroVector;
} // namespace blaze
\endcode
// - \c Type: specifies the type of the vector elements. ZeroVector can be used with any
// non-cv-qualified, non-reference, non-pointer element type.
// - \c TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column
// vector (\c blaze::columnVector). The default value is \c blaze::defaultTransposeFlag.
// - \c Tag : optional type parameter to tag the vector. The default type is \c blaze::Group0.
// See \ref grouping_tagging for details.
//
// The blaze::ZeroVector is the perfect choice to represent a zero vector:
\code
// Definition of a 3-dimensional integral zero column vector
blaze::ZeroVector<int> a( 3UL );
// Definition of a 6-dimensional single precision zero column vector
blaze::ZeroVector<float,blaze::columnVector> b( 6UL );
// Definition of a double precision row vector with size 0
blaze::ZeroVector<double,blaze::rowVector> c;
\endcode
// \n Previous: \ref vectors Next: \ref vector_operations
*/
//*************************************************************************************************
//**Vector Operations******************************************************************************
/*!\page vector_operations Vector Operations
//
// \tableofcontents
//
//
// \n \section vector_operations_constructors Constructors
// <hr>
//
// Instantiating and setting up a vector is very easy and intuitive. However, there are a few
// rules to take care of:
// - In case the last template parameter (the transpose flag) is omitted, the vector is per
// default a column vector.
// - The elements of a \c StaticVector or \c HybridVector are default initialized (i.e. built-in
// data types are initialized to 0, class types are initialized via the default constructor).
// - Newly allocated elements of a \c DynamicVector or \c CompressedVector remain uninitialized
// if they are of built-in type and are default constructed if they are of class type.
//
// \n \subsection vector_operations_default_construction Default Construction
\code
using blaze::StaticVector;
using blaze::DynamicVector;
using blaze::CompressedVector;
// All vectors can be default constructed. Whereas the size
// of StaticVectors is fixed via the second template parameter,
// the initial size of a default constructed DynamicVector or
// CompressedVector is 0.
StaticVector<int,2UL> v1; // Instantiation of a 2D integer column vector.
// All elements are initialized to 0.
StaticVector<long,3UL,columnVector> v2; // Instantiation of a 3D long integer column vector.
// Again, all elements are initialized to 0L.
DynamicVector<float> v3; // Instantiation of a dynamic single precision column
// vector of size 0.
DynamicVector<double,rowVector> v4; // Instantiation of a dynamic double precision row
// vector of size 0.
CompressedVector<int> v5; // Instantiation of a compressed integer column
// vector of size 0.
CompressedVector<double,rowVector> v6; // Instantiation of a compressed double precision row
// vector of size 0.
\endcode
// \n \subsection vector_operations_size_construction Construction with Specific Size
//
// The \c DynamicVector, \c HybridVector and \c CompressedVector classes offer a constructor that
// allows to immediately give the vector the required size. Whereas both dense vectors (i.e.
// \c DynamicVector and \c HybridVector) use this information to allocate memory for all vector
// elements, \c CompressedVector merely acquires the size but remains empty.
\code
DynamicVector<int,columnVector> v7( 9UL ); // Instantiation of an integer dynamic column vector
// of size 9. The elements are NOT initialized!
HybridVector< complex<float>, 5UL > v8( 2UL ); // Instantiation of a column vector with two single
// precision complex values. The elements are
// default constructed.
CompressedVector<int,rowVector> v9( 10UL ); // Instantiation of a compressed row vector with
// size 10. Initially, the vector provides no
// capacity for non-zero elements.
\endcode
// \n \subsection vector_operations_initialization_constructors Initialization Constructors
//
// All dense vector classes offer a constructor that allows for a direct, homogeneous initialization
// of all vector elements. In contrast, for sparse vectors the predicted number of non-zero elements
// can be specified
\code
StaticVector<int,3UL,rowVector> v10( 2 ); // Instantiation of a 3D integer row vector.
// All elements are initialized to 2.
DynamicVector<float> v11( 3UL, 7.0F ); // Instantiation of a dynamic single precision
// column vector of size 3. All elements are
// set to 7.0F.
CompressedVector<float,rowVector> v12( 15UL, 3UL ); // Instantiation of a single precision column
// vector of size 15, which provides enough
// space for at least 3 non-zero elements.
\endcode
// \n \subsection vector_operations_array_construction Array Construction
//
// Alternatively, all dense vector classes offer a constructor for an initialization with a dynamic
// or static array, or with a \c std::array. If the vector is initialized from a dynamic array, the
// constructor expects the actual size of the array as first argument, the array as second argument.
// In case of a static array or \c std::array, the fixed size of the array is used:
\code
const unique_ptr<double[]> array1( new double[2] );
// ... Initialization of the dynamic array
blaze::StaticVector<double,2UL> v13( 2UL, array1.get() );
const int array2[4] = { 4, -5, -6, 7 };
blaze::StaticVector<int,4UL> v14( array2 );
const std::array<float,3UL> array3{ 1.1F, 2.2F, 3.3F };
blaze::StaticVector<float,3UL> v15( array3 );
\endcode
// \n \subsection vector_operations_initializer_list_construction Initializer List Construction
//
// In addition, all dense and sparse vector classes can be directly initialized by means of an
// initializer list:
\code
blaze::DynamicVector<float> v16{ 1.0F, 2.0F, 3.0F, 4.0F };
blaze::CompressedVector<int> v17{ 0, 2, 0, 0, 5, 0, 7, 0 };
\endcode
// Dynamically sized vectors (such as e.g. \ref vector_types_hybrid_vector,
// \ref vector_types_dynamic_vector or \ref vector_types_compressed_vector) are sized according
// to the size of the initializer list and all their elements are (copy) assigned the values of
// the list. For fixed size vectors (such as e.g. \ref vector_types_static_vector) missing values
// are initialized as default and in case the size of the initializer list exceeds the size
// of the vector a \c std::invalid_argument exception is thrown. In case of sparse vectors, only
// the non-zero elements are used to initialize the vector.
//
// \n \subsection vector_operations_copy_construction Copy Construction
//
// All dense and sparse vectors can be created as the copy of any other dense or sparse vector
// with the same transpose flag (i.e. blaze::rowVector or blaze::columnVector).
\code
StaticVector<int,9UL,columnVector> v18( v7 ); // Instantiation of the dense column vector v17
// as copy of the dense column vector v7.
DynamicVector<int,rowVector> v19( v9 ); // Instantiation of the dense row vector v18 as
// copy of the sparse row vector v9.
CompressedVector<int,columnVector> v20( v1 ); // Instantiation of the sparse column vector v19
// as copy of the dense column vector v1.
CompressedVector<float,rowVector> v21( v12 ); // Instantiation of the sparse row vector v20 as
// copy of the row vector v12.
\endcode
// Note that it is not possible to create a \c StaticVector as a copy of a vector with a different
// size:
\code
StaticVector<int,5UL,columnVector> v22( v7 ); // Runtime error: Size does not match!
StaticVector<int,4UL,rowVector> v23( v10 ); // Compile time error: Size does not match!
\endcode
// \n \section vector_operations_assignment Assignment
// <hr>
//
// There are several types of assignment to dense and sparse vectors:
// \ref vector_operations_homogeneous_assignment, \ref vector_operations_array_assignment,
// \ref vector_operations_copy_assignment, and \ref vector_operations_compound_assignment.
//
// \n \subsection vector_operations_homogeneous_assignment Homogeneous Assignment
//
// Sometimes it may be necessary to assign the same value to all elements of a dense vector.
// For this purpose, the assignment operator can be used:
\code
blaze::StaticVector<int,3UL> v1;
blaze::DynamicVector<double> v2;
// Setting all integer elements of the StaticVector to 2
v1 = 2;
// Setting all double precision elements of the DynamicVector to 5.0
v2 = 5.0;
\endcode
// \n \subsection vector_operations_array_assignment Array Assignment
//
// Dense vectors can also be assigned a static array or \c std::array:
\code
blaze::StaticVector<float,2UL> v1;
blaze::DynamicVector<double,rowVector> v2;
const float array1[2] = { 1.0F, 2.0F };
const std::array<double,5UL> array2{ 2.1, 4.0, -1.7, 8.6, -7.2 };
v1 = array1;
v2 = array2;
\endcode
// \n \subsection vector_operations_initializer_list_assignment Initializer List Assignment
//
// Alternatively, it is possible to directly assign an initializer list to a dense or sparse
// vector:
\code
blaze::DynamicVector<float> v1;
blaze::CompressedVector<double,rowVector> v2;
v1 = { 1.0F, 2.0F };
v2 = { 2.1, 0.0, -1.7, 0.0, -7.2 };
\endcode
// Dynamically sized vectors (such as e.g. \ref vector_types_hybrid_vector,
// \ref vector_types_dynamic_vector or \ref vector_types_compressed_vector) are resized according
// to the size of the initializer list and all their elements are (copy) assigned the values of
// the list. For fixed size vectors (such as e.g. \ref vector_types_static_vector) missing values
// are reset to their default value and in case the size of the initializer list exceeds the size
// of the vector a \c std::invalid_argument exception is thrown. In case of sparse vectors, only
// the non-zero elements are considered.
//
// \n \subsection vector_operations_copy_assignment Copy Assignment
//
// For all vector types it is generally possible to assign another vector with the same transpose
// flag (i.e. blaze::columnVector or blaze::rowVector). Note that in case of \c StaticVectors, the
// assigned vector is required to have the same size as the \c StaticVector since the size of a
// \c StaticVector cannot be adapted!
\code
blaze::StaticVector<int,3UL,columnVector> v1;
blaze::DynamicVector<int,columnVector> v2( 3UL );
blaze::DynamicVector<float,columnVector> v3( 5UL );
blaze::CompressedVector<int,columnVector> v4( 3UL );
blaze::CompressedVector<float,rowVector> v5( 3UL );
// ... Initialization of the vectors
v1 = v2; // OK: Assignment of a 3D dense column vector to another 3D dense column vector
v1 = v4; // OK: Assignment of a 3D sparse column vector to a 3D dense column vector
v1 = v3; // Runtime error: Cannot assign a 5D vector to a 3D static vector
v1 = v5; // Compilation error: Cannot assign a row vector to a column vector
\endcode
// \n \subsection vector_operations_compound_assignment Compound Assignment
//
// Next to plain assignment, it is also possible to use addition assignment, subtraction
// assignment, and multiplication assignment. Note however, that in contrast to plain assignment
// the size and the transpose flag of the vectors has be to equal in order to able to perform a
// compound assignment.
\code
blaze::StaticVector<int,5UL,columnVector> v1;
blaze::DynamicVector<int,columnVector> v2( 5UL );
blaze::CompressedVector<float,columnVector> v3( 7UL );
blaze::DynamicVector<float,rowVector> v4( 7UL );
blaze::CompressedVector<float,rowVector> v5( 7UL );
// ... Initialization of the vectors
v1 += v2; // OK: Addition assignment between two column vectors of the same size
v1 += v3; // Runtime error: No compound assignment between vectors of different size
v1 -= v4; // Compilation error: No compound assignment between vectors of different transpose flag
v4 *= v5; // OK: Multiplication assignment between two row vectors of the same size
\endcode
// \n \section vector_operations_element_access Element Access
// <hr>
//
// \subsection vector_operations_subscript_operator_1 Subscript Operator
//
// The easiest and most intuitive way to access a dense or sparse vector is via the subscript
// operator. The indices to access a vector are zero-based:
\code
blaze::DynamicVector<int> v1( 5UL );
v1[0] = 1;
v1[1] = 3;
// ...
blaze::CompressedVector<float> v2( 5UL );
v2[2] = 7.3F;
v2[4] = -1.4F;
\endcode
// Whereas using the subscript operator on a dense vector only accesses the already existing
// element, accessing an element of a sparse vector via the subscript operator potentially
// inserts the element into the vector and may therefore be more expensive. Consider the
// following example:
\code
blaze::CompressedVector<int> v1( 10UL );
for( size_t i=0UL; i<v1.size(); ++i ) {
... = v1[i];
}
\endcode
// Although the compressed vector is only used for read access within the for loop, using the
// subscript operator temporarily inserts 10 non-zero elements into the vector. Therefore the
// preferred way to traverse the non-zero elements of a sparse vector is to use iterators.
//
// \n \subsection vector_operations_iterators Iterators
//
// An alternate way to traverse the elements contained in a dense or sparse vector is by means
// of iterators. For that purpose, all vectors provide the \c begin(), \c cbegin(), \c end(),
// and \c cend() members functions. In case of non-const vectors, \c begin() and \c end() return
// an \c Iterator, which allows a manipulation of the (non-zero) value. In case of a constant
// vector or in case \c cbegin() or \c cend() are used a \c ConstIterator is returned. Iterators
// on dense vectors traverse all elements of the vector, including the zero elements. Iterators
// on sparse vectors only traverse the non-zero elements.
//
// The following two examples demonstrate how to traverse the elements of a dense and sparse
// vector, respectively:
\code
using blaze::DynamicVector;
DynamicVector<int> v1( 10UL );
// Traversing all elements contained in the vector by Iterator
for( DynamicVector<int>::Iterator it=v1.begin(); it!=v1.end(); ++it ) {
*it = ...; // OK: Write access to the value of the element.
... = *it; // OK: Read access to the value of the element.
}
// Traversing all elements contained in the vector by ConstIterator
for( DynamicVector<int>::ConstIterator it=v1.cbegin(); it!=v1.cend(); ++it ) {
*it = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid.
... = *it; // OK: Read access to the value of the element.
}
// Traversing the vector elements by means of a range-based for loop
for( int& i : v1 ) {
i = ...; // OK: Write access to the value of the element.
... = i; // OK: Read access to the value of the element.
}
\endcode
\code
using blaze::CompressedVector;
CompressedVector<int> v2( 10UL );
// ... Initialization of the vector
// Traversing the non-zero elements contained in the vector by Iterator
for( CompressedVector<int>::Iterator it=v2.begin(); it!=v2.end(); ++it ) {
it->value() = ...; // OK: Write access to the value of the non-zero element.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the non-zero element.
}
// Traversing the non-zero elements contained in the vector by ConstIterator
for( CompressedVector<int>::ConstIterator it=v2.cbegin(); it!=v2.cend(); ++it ) {
it->value() = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the non-zero element.
}
\endcode
// Note that \c begin(), \c cbegin(), \c end(), and \c cend() are also available as free functions:
\code
for( CompressedVector<int>::Iterator it=begin( v2 ); it!=end( v2 ); ++it ) {
// ...
}
for( CompressedVector<int>::ConstIterator it=cbegin( v2 ); it!=cend( v2 ); ++it ) {
// ...
}
\endcode
// \n \subsection vector_operations_data .data() / data()
//
// Sometimes it is necessary to acquire a pointer to the first element of the underlying array
// of a dense vector. For that purpose the \c data() member function or the free \c data() function
// can be used:
\code
// Instantiating a dynamic vector with 10 elements
blaze::DynamicVector<int> v( 10UL );
v.data(); // Returns a pointer to the first element of the dynamic vector
data( v ); // Same effect as the member function
\endcode
// \n \section vector_operations_element_insertion Element Insertion
// <hr>
//
// In contrast to dense vectors, that store all elements independent of their value and that
// offer direct access to all elements, sparse vectors only store the non-zero elements contained
// in the vector. Therefore it is necessary to explicitly add elements to the vector.
//
// \n \subsection vector_operations_subscript_operator_2 Subscript Operator
//
// The first option to add elements to a sparse vector is the subscript operator:
\code
using blaze::CompressedVector;
CompressedVector<int> v1( 3UL );
v1[1] = 2;
\endcode
// In case the element at the given index is not yet contained in the vector, it is automatically
// inserted. Otherwise the old value is replaced by the new value 2. The operator returns a
// reference to the sparse vector element.
//
// \n \subsection vector_operations_set .set()
//
// An alternative to the subscript operator is the \c set() function: In case the element is not
// yet contained in the vector the element is inserted, else the element's value is modified:
\code
// Insert or modify the value at index 3
v1.set( 3, 1 );
\endcode
// \n \subsection vector_operations_insert .insert()
//
// The insertion of elements can be better controlled via the \c insert() function. In contrast to
// the subscript operator and the \c set() function it emits an exception in case the element is
// already contained in the vector. In order to check for this case, the \c find() function can be
// used:
\code
// In case the element at index 4 is not yet contained in the matrix it is inserted
// with a value of 6.
if( v1.find( 4 ) == v1.end() )
v1.insert( 4, 6 );
\endcode
// \n \subsection vector_operations_append .append()
//
// Although the \c insert() function is very flexible, due to performance reasons it is not suited
// for the setup of large sparse vectors. A very efficient, yet also very low-level way to fill
// a sparse vector is the \c append() function. It requires the sparse vector to provide enough
// capacity to insert a new element. Additionally, the index of the new element must be larger
// than the index of the previous element. Violating these conditions results in undefined
// behavior!
\code
v1.reserve( 10 ); // Reserving space for 10 non-zero elements
v1.append( 5, -2 ); // Appending the element -2 at index 5
v1.append( 6, 4 ); // Appending the element 4 at index 6
// ...
\endcode
// \n \section vector_operations_element_removal Element Removal
// <hr>
//
// \subsection vector_operations_erase .erase()
//
// The \c erase() member functions can be used to remove elements from a sparse vector. The
// following example gives an impression of the five different flavors of \c erase():
\code
using blaze::CompressedVector;
CompressedVector<int> v( 42 );
// ... Initialization of the vector
// Erasing the element at index 21
v.erase( 21 );
// Erasing a single element via iterator
v.erase( v.find( 4 ) );
// Erasing all non-zero elements in the range [7..24]
v.erase( v.lowerBound( 7 ), v.upperBound( 24 ) );
// Erasing all non-zero elements with a value larger than 9 by passing a unary predicate
v.erase( []( int i ){ return i > 9; } );
// Erasing all non-zero elements in the range [30..40] with a value larger than 5
v.erase( v.lowerBound( 30 ), v.upperBound( 40 ), []( int i ){ return i > 5; } );
\endcode
// \n \section vector_operations_element_lookup Element Lookup
// <hr>
//
// A sparse vector only stores the non-zero elements contained in the vector. Therefore, whenever
// accessing a vector element at a specific index a lookup operation is required. Whereas the
// subscript operator is performing this lookup automatically, it is also possible to use the
// \c find(), \c lowerBound(), and \c upperBound() member functions for a manual lookup.
//
// \n \subsection vector_operations_find .find() / find()
//
// The \c find() function can be used to check whether a specific element is contained in a sparse
// vector. It specifically searches for the element at the given index. In case the element is
// found, the function returns an iterator to the element. Otherwise an iterator just past the
// last non-zero element of the compressed vector (the \c end() iterator) is returned. Note that
// the returned iterator is subject to invalidation due to inserting operations via the subscript
// operator, the \c set() function or the \c insert() function!
\code
using blaze::CompressedVector;
CompressedVector<int> a( 42 );
// ... Initialization of the vector
// Searching the element at index 7. In case the element is not
// contained in the vector, the end() iterator is returned.
CompressedVector<int>::Iterator pos( a.find( 7 ) );
if( pos != a.end( 7 ) ) {
// ...
}
\endcode
// Alternatively, the free function \c find() can be used to find a specific element in a sparse
// vector:
\code
find( a, 7 ); // Searching the element at index 7; same effect as the member function
\endcode
// \n \subsection vector_operations_lowerbound .lowerBound() / lowerBound()
//
// The \c lowerBound() function returns an iterator to the first element with an index not less
// then the given index. In combination with the \c upperBound() function this function can be
// used to create a pair of iterators specifying a range of indices. Note that the returned
// iterator is subject to invalidation due to inserting operations via the subscript operator,
// the \c set() function or the \c insert() function!
\code
using blaze::CompressedVector;
CompressedVector<int> a( 42 );
// ... Initialization of the vector
// Searching the lower bound of index 17.
CompressedVector<int>::Iterator pos1( a.lowerBound( 17 ) );
// Searching the upper bound of index 28
CompressedVector<int>::Iterator pos2( a.upperBound( 28 ) );
// Erasing all elements in the specified range
a.erase( pos1, pos2 );
\endcode
// Alternatively, the free function \c lowerBound() can be used to:
\code
lowerBound( a, 17 ); // Searching the lower bound of index 17; same effect as the member function
\endcode
// \n \subsection vector_operations_upperbound .upperBound() / upperBound()
//
// The \c upperBound() function returns an iterator to the first element with an index greater then
// the given index. In combination with the \c lowerBound() function this function can be used to
// create a pair of iterators specifying a range of indices. Note that the returned iterator is
// subject to invalidation due to inserting operations via the subscript operator, the \c set()
// function or the \c insert() function!
\code
using blaze::CompressedVector;
CompressedVector<int> a( 42 );
// ... Initialization of the vector
// Searching the lower bound of index 17.
CompressedVector<int>::Iterator pos1( a.lowerBound( 17 ) );
// Searching the upper bound of index 28
CompressedVector<int>::Iterator pos2( a.upperBound( 28 ) );
// Erasing all elements in the specified range
a.erase( pos1, pos2 );
\endcode
// Alternatively, the free function \c upperBound() can be used to:
\code
upperBound( a, 28 ); // Searching the upper bound of index 28; same effect as the member function
\endcode
// \n \section vector_operations_non_modifying_operations Non-Modifying Operations
// <hr>
//
// \subsection vector_operations_size .size() / size()
//
// Via the \c size() member function, the current size of a dense or sparse vector can be queried:
\code
// Instantiating a dynamic vector with size 10
blaze::DynamicVector<int> v1( 10UL );
v1.size(); // Returns 10
// Instantiating a compressed vector with size 12 and capacity for 3 non-zero elements
blaze::CompressedVector<double> v2( 12UL, 3UL );
v2.size(); // Returns 12
\endcode
// Alternatively, the free function \c size() can be used to query to current size of a vector.
// In contrast to the member function, the free function can also be used to query the size of
// vector expressions:
\code
size( v1 ); // Returns 10, i.e. has the same effect as the member function
size( v2 ); // Returns 12, i.e. has the same effect as the member function
blaze::DynamicMatrix<int> A( 15UL, 12UL );
size( A * v2 ); // Returns 15, i.e. the size of the resulting vector
\endcode
// \n \subsection vector_operations_capacity .capacity() / capacity()
//
// Via the \c capacity() (member) function the internal capacity of a dense or sparse vector
// can be queried. Note that the capacity of a vector doesn't have to be equal to the size
// of a vector. In case of a dense vector the capacity will always be greater or equal than
// the size of the vector, in case of a sparse vector the capacity may even be less than
// the size.
\code
v1.capacity(); // Returns at least 10
\endcode
// For symmetry reasons, there is also a free function /c capacity() available that can be used
// to query the capacity:
\code
capacity( v1 ); // Returns at least 10, i.e. has the same effect as the member function
\endcode
// Note, however, that it is not possible to query the capacity of a vector expression:
\code
capacity( A * v1 ); // Compilation error!
\endcode
// \n \subsection vector_operations_nonzeros .nonZeros() / nonZeros()
//
// For both dense and sparse vectors the number of non-zero elements can be determined via the
// \c nonZeros() member function. Sparse vectors directly return their number of non-zero
// elements, dense vectors traverse their elements and count the number of non-zero elements.
\code
v1.nonZeros(); // Returns the number of non-zero elements in the dense vector
v2.nonZeros(); // Returns the number of non-zero elements in the sparse vector
\endcode
// There is also a free function \c nonZeros() available to query the current number of non-zero
// elements:
\code
nonZeros( v1 ); // Returns the number of non-zero elements in the dense vector
nonZeros( v2 ); // Returns the number of non-zero elements in the sparse vector
\endcode
// The free \c nonZeros() function can also be used to query the number of non-zero elements in
// a vector expression. However, the result is not the exact number of non-zero elements, but
// may be a rough estimation:
\code
nonZeros( A * v1 ); // Estimates the number of non-zero elements in the vector expression
\endcode
// \n \subsection vector_operations_isempty isEmpty()
//
// The \c isEmpty() function returns whether the total number of elements of the vector is zero:
\code
blaze::DynamicVector<int> a; // Create an empty vector
isEmpty( a ); // Returns true
a.resize( 10 ); // Resize to 10 elements
isEmpty( a ); // Returns false
\endcode
// \n \subsection vector_operations_isnan isnan()
//
// The \c isnan() function provides the means to check a dense or sparse vector for non-a-number
// elements:
\code
blaze::DynamicVector<double> a;
// ... Resizing and initialization
if( isnan( a ) ) { ... }
\endcode
\code
blaze::CompressedVector<double> a;
// ... Resizing and initialization
if( isnan( a ) ) { ... }
\endcode
// If at least one element of the vector is not-a-number, the function returns \c true, otherwise
// it returns \c false.
//
//
// \n \subsection vector_operations_isinf isinf()
//
// The \c isinf() function checks the given dense or sparse vector for infinite (\c inf) elements:
\code
blaze::DynamicVector<double> a;
// ... Resizing and initialization
if( isinf( a ) ) { ... }
\endcode
\code
blaze::CompressedVector<double> a;
// ... Resizing and initialization
if( isinf( a ) ) { ... }
\endcode
// If at least one element of the vector is infinite, the function returns \c true, otherwise it
// returns \c false.
//
//
// \n \subsection vector_operations_isfinite isfinite()
//
// The \c isfinite() function checks if all elements of the given dense or sparse vector are
// finite elements (i.e. normal, subnormal or zero elements, but not infinite or NaN):
\code
blaze::DynamicVector<double> a;
// ... Resizing and initialization
if( isfinite( a ) ) { ... }
\endcode
\code
blaze::CompressedVector<double> a;
// ... Resizing and initialization
if( isfinite( a ) ) { ... }
\endcode
// If all elements of the vector are finite, the function returns \c true, otherwise it returns
// \c false.
//
//
// \n \subsection vector_operations_isdefault isDefault()
//
// The \c isDefault() function returns whether the given dense or sparse vector is in default state:
\code
blaze::HybridVector<int,20UL> a;
// ... Resizing and initialization
if( isDefault( a ) ) { ... }
\endcode
// A vector is in default state if it appears to just have been default constructed. All resizable
// vectors (\c HybridVector, \c DynamicVector, or \c CompressedVector) and \c CustomVector are
// in default state if its size is equal to zero. A non-resizable vector (\c StaticVector, all
// subvectors, element selections, rows, and columns) is in default state if all its elements are
// in default state. For instance, in case the vector is instantiated for a built-in integral or
// floating point data type, the function returns \c true in case all vector elements are 0 and
// \c false in case any vector element is not 0.
//
//
// \n \subsection vector_operations_isUniform isUniform()
//
// In order to check if all vector elements are identical, the \c isUniform() function can be used:
\code
blaze::DynamicVector<int> a;
// ... Resizing and initialization
if( isUniform( a ) ) { ... }
\endcode
// Note that in case of sparse vectors the zero elements are also taken into account!
//
//
// \n \subsection vector_operations_isZero isZero()
//
// In order to check if all vector elements are zero, the \c isZero() function can be used:
\code
blaze::DynamicVector<int> a;
// ... Resizing and initialization
if( isZero( a ) ) { ... }
\endcode
// \n \subsection vector_operations_length length() / sqrLength()
//
// In order to calculate the length (magnitude) of a dense or sparse vector, both the \c length()
// and \c sqrLength() function can be used:
\code
blaze::StaticVector<float,3UL,rowVector> v{ -1.2F, 2.7F, -2.3F };
const float len = length ( v ); // Computes the current length of the vector
const float sqrlen = sqrLength( v ); // Computes the square length of the vector
\endcode
// Note that both functions can only be used for vectors with built-in or complex element type!
//
//
// \n \subsection vector_operations_vector_trans trans()
//
// As already mentioned, vectors can either be column vectors (blaze::columnVector) or row vectors
// (blaze::rowVector). A column vector cannot be assigned to a row vector and vice versa. However,
// vectors can be transposed via the \c trans() function:
\code
blaze::DynamicVector<int,columnVector> v1( 4UL );
blaze::CompressedVector<int,rowVector> v2( 4UL );
v1 = v2; // Compilation error: Cannot assign a row vector to a column vector
v1 = trans( v2 ); // OK: Transposing the row vector to a column vector and assigning it
// to the column vector v1
v2 = trans( v1 ); // OK: Transposing the column vector v1 and assigning it to the row vector v2
v1 += trans( v2 ); // OK: Addition assignment of two column vectors
\endcode
// \n \subsection vector_operations_ctrans ctrans()
//
// It is also possible to compute the conjugate transpose of a vector. This operation is available
// via the \c ctrans() function:
\code
blaze::CompressedVector< complex<float>, rowVector > v1( 4UL );
blaze::DynamicVector< complex<float>, columnVector > v2( 4UL );
v1 = ctrans( v2 ); // Compute the conjugate transpose vector
\endcode
// Note that the \c ctrans() function has the same effect as manually applying the \c conj() and
// \c trans() function in any order:
\code
v1 = trans( conj( v2 ) ); // Computing the conjugate transpose vector
v1 = conj( trans( v2 ) ); // Computing the conjugate transpose vector
\endcode
// \n \subsection vector_operations_reverse reverse()
//
// Via the \c reverse() function is is possible to reverse the elements of a dense or sparse
// vector. The following examples demonstrates this by means of a dense vector:
\code
blaze::DynamicVector<int> a{ 1, 2, 3, 4, 5 };
blaze::DynamicVector<int> b;
b = reverse( a ); // Results in ( 5 4 3 2 1 )
\endcode
// \n \subsection vector_operations_evaluate eval() / evaluate()
//
// The \c evaluate() function forces an evaluation of the given vector expression and enables
// an automatic deduction of the correct result type of an operation. The following code example
// demonstrates its intended use for the multiplication of a dense and a sparse vector:
\code
using blaze::DynamicVector;
using blaze::CompressedVector;
blaze::DynamicVector<double> a;
blaze::CompressedVector<double> b;
// ... Resizing and initialization
auto c = evaluate( a * b );
\endcode
// In this scenario, the \c evaluate() function assists in deducing the exact result type of
// the operation via the \c auto keyword. Please note that if \c evaluate() is used in this
// way, no temporary vector is created and no copy operation is performed. Instead, the result
// is directly written to the target vector due to the return value optimization (RVO). However,
// if \c evaluate() is used in combination with an explicit target type, a temporary will be
// created and a copy operation will be performed if the used type differs from the type
// returned from the function:
\code
CompressedVector<double> d( a * b ); // No temporary & no copy operation
DynamicVector<double> e( a * b ); // Temporary & copy operation
d = evaluate( a * b ); // Temporary & copy operation
\endcode
// Sometimes it might be desirable to explicitly evaluate a sub-expression within a larger
// expression. However, please note that \c evaluate() is not intended to be used for this
// purpose. This task is more elegantly and efficiently handled by the \c eval() function:
\code
blaze::DynamicVector<double> a, b, c, d;
d = a + evaluate( b * c ); // Unnecessary creation of a temporary vector
d = a + eval( b * c ); // No creation of a temporary vector
\endcode
// In contrast to the \c evaluate() function, \c eval() can take the complete expression
// into account and therefore can guarantee the most efficient way to evaluate it (see also
// \ref intra_statement_optimization).
//
// \n \subsection vector_operations_noalias noalias()
//
// The \b Blaze library is able to reliably detect aliasing during the assignment of vectors.
// In case the aliasing would lead to an incorrect result, \b Blaze introduces an intermediate
// temporary of the appropriate type to break the aliasing. For instance, in the following
// example \b Blaze performs an alias detection in both assignments, but only, in the second
// assignment it detects a problematic aliasing and uses an intermediate temporary in order
// to be able to compute the correct result:
\code
blaze::DynamicVector<double> x, y;
blaze::DynamicMatrix<double> A;
x = x + y; // No problematic aliasing of x, no intermediate temporary is required.
x = A * x; // Problematic aliasing of x; intermediate temporary required!
\endcode
// The detection of aliasing effects, however, takes a small runtime effort. In order to disable
// the aliasing detection, the \c noalias() function can be used:
\code
blaze::DynamicVector<double> x, y;
blaze::DynamicMatrix<double> A;
x = noalias( x + y ); // No alias detection performed, no intermediate temporary.
x = noalias( A * x ); // No alias detection performed, no intermediate temporary.
// Note that the final result will be incorrect!
\endcode
// \warning The \c noalias() operation has the semantics of a cast: The caller is completely
// responsible and the system trusts the given information. Using \c noalias() in a situation
// where an aliasing effect occurs leads to undefined behavior (which can be violated invariants
// or wrong computation results)!
//
// \n \subsection vector_operations_nosimd nosimd()
//
// By default, \b Blaze attempts to vectorize all operations by means of SSE, AVX, etc. in order
// to achieve maximum performance. However, via the \c nosimd() operation it is possible to disable
// the SIMD evaluation of any operation:
\code
blaze::DynamicVector<double> x, y;
blaze::DynamicMatrix<double> A;
x = nosimd( x + y ); // Disables SIMD for the vector/vector addition
x = nosimd( A * x ); // Disables SIMD for the matrix/vector multiplication
\endcode
// Please note that the main purpose of the \c nosimd() operation is to enable an easy performance
// comparison between the vectorized and non-vectorized evaluation. Using the \c nosimd() operation
// will likely result in significantly reduced performance!
//
// \n \subsection vector_operations_fix fix()
//
// By default, resizable vectors such as \c DynamicVector, \c HybridVector, and \c CompressedVector
// can adapt their size during an assignment:
\code
blaze::DynamicVector<int> a{ 1, 2 }; // Setup of a vector with 2 elements
blaze::DynamicVector<int> b{ 1, 2, 3, 4 }; // Setup of a vector with 4 elements
a = b; // Resizes vector 'a' to 4 elements
\endcode
// Via the \c fix() operation it is possible to fix the size of a resizable vector. If a vector
// with a different size is assigned, instead of resizing the vector the operation fails by
// throwing a \c std::invalid_argument exception:
\code
blaze::DynamicVector<int> a{ 1, 2 }; // Setup of a vector with 2 elements
blaze::DynamicVector<int> b{ 1, 2, 3, 4 }; // Setup of a vector with 4 elements
fix( a ) = b; // Throws an exception: Vector cannot be resized!
\endcode
// \n \section vector_operations_modifying_operations Modifying Operations
// <hr>
//
// \subsection vector_operations_resize_reserve .resize() / .reserve()
//
// The size of a \c StaticVector is fixed by the second template parameter and a \c CustomVector
// cannot be resized. In contrast, the size of \c DynamicVectors, \c HybridVectors as well as
// \c CompressedVectors can be changed via the \c resize() function:
\code
using blaze::DynamicVector;
using blaze::CompressedVector;
DynamicVector<int,columnVector> v1;
CompressedVector<int,rowVector> v2( 4 );
v2[1] = -2;
v2[3] = 11;
// Adapting the size of the dynamic and compressed vectors. The (optional) second parameter
// specifies whether the existing elements should be preserved. Per default, the existing
// elements are preserved.
v1.resize( 5UL ); // Resizing vector v1 to 5 elements. Elements of built-in type remain
// uninitialized, elements of class type are default constructed.
v1.resize( 3UL, false ); // Resizing vector v1 to 3 elements. The old elements are lost, the
// new elements are NOT initialized!
v2.resize( 8UL, true ); // Resizing vector v2 to 8 elements. The old elements are preserved.
v2.resize( 5UL, false ); // Resizing vector v2 to 5 elements. The old elements are lost.
\endcode
// Note that resizing a vector invalidates all existing views (see e.g. \ref views_subvectors)
// on the vector:
\code
blaze::DynamicVector<int,rowVector> v1( 10UL ); // Creating a dynamic vector of size 10
auto sv = subvector( v1, 2UL, 5UL ); // Creating a view on the range [2..6]
v1.resize( 6UL ); // Resizing the vector invalidates the view
\endcode
// When the internal capacity of a vector is no longer sufficient, the allocation of a larger
// junk of memory is triggered. In order to avoid frequent reallocations, the \c reserve()
// function can be used up front to set the internal capacity:
\code
blaze::DynamicVector<int> v1;
v1.reserve( 100 );
v1.size(); // Returns 0
v1.capacity(); // Returns at least 100
\endcode
// Note that the size of the vector remains unchanged, but only the internal capacity is set
// according to the specified value!
//
// \n \subsection vector_operations_shrinkToFit .shrinkToFit()
//
// The internal capacity of vectors with dynamic memory is preserved in order to minimize the
// number of reallocations. For that reason, the \c resize() and \c reserve() functions can lead
// to memory overhead. The \c shrinkToFit() member function can be used to minimize the internal
// capacity:
\code
blaze::DynamicVector<int> v1( 1000UL ); // Create a vector of 1000 integers
v1.resize( 10UL ); // Resize to 10, but the capacity is preserved
v1.shrinkToFit(); // Remove the unused capacity
\endcode
// Please note that due to padding the capacity might not be reduced exactly to \c size(). Please
// also note that in case a reallocation occurs, all iterators (including \c end() iterators), all
// pointers and references to elements of the vector are invalidated.
//
// \subsection vector_operations_reset_clear reset() / clear()
//
// In order to reset all elements of a vector, the \c reset() function can be used:
\code
// Setup of a single precision column vector, whose elements are initialized with 2.0F.
blaze::DynamicVector<float> v1( 3UL, 2.0F );
// Resetting all elements to 0.0F. Only the elements are reset, the size of the vector is unchanged.
reset( v1 ); // Resetting all elements
v1.size(); // Returns 3: size and capacity remain unchanged
\endcode
// In order to return a vector to its default state (i.e. the state of a default constructed
// vector), the \c clear() function can be used:
\code
// Setup of a single precision column vector, whose elements are initialized with -1.0F.
blaze::DynamicVector<float> v1( 5, -1.0F );
// Resetting the entire vector.
clear( v1 ); // Resetting the entire vector
v1.size(); // Returns 0: size is reset, but capacity remains unchanged
\endcode
// Note that resetting or clearing both dense and sparse vectors does not change the capacity
// of the vectors.
//
//
// \n \subsection vector_operations_swap swap()
//
// Via the \c swap() function it is possible to completely swap the contents of two vectors of
// the same type:
\code
blaze::DynamicVector<int,columnVector> v1( 10UL );
blaze::DynamicVector<int,columnVector> v2( 20UL );
swap( v1, v2 ); // Swapping the contents of v1 and v2
\endcode
// \n \section vector_operations_arithmetic_operations Arithmetic Operations
// <hr>
//
// \subsection vector_operations_normalize normalize()
//
// The \c normalize() function can be used to scale any non-zero vector to a length of 1. In
// case the vector does not contain a single non-zero element (i.e. is a zero vector), the
// \c normalize() function returns a zero vector.
\code
blaze::DynamicVector<float,columnVector> v1( 10UL );
blaze::CompressedVector<double,columnVector> v2( 12UL );
v1 = normalize( v1 ); // Normalizing the dense vector v1
length( v1 ); // Returns 1 (or 0 in case of a zero vector)
v1 = normalize( v2 ); // Assigning v1 the normalized vector v2
length( v1 ); // Returns 1 (or 0 in case of a zero vector)
\endcode
// Note that the \c normalize() function only works for floating point vectors. The attempt to
// use it for an integral vector results in a compile time error.
//
//
// \n \subsection vector_operations_min_max min() / max()
//
// The \c min() and \c max() functions can be used for a single vector, multiple vectors, and
// a vector and a scalar.
//
// <b>Single Vector</b>
//
// If passed a single vector, the functions return the smallest and largest element of the given
// dense vector or the smallest and largest non-zero element of the given sparse vector,
// respectively:
\code
blaze::StaticVector<int,4UL,rowVector> a{ -5, 2, 7, -4 };
min( a ); // Returns -5
max( a ); // Returns 7
\endcode
\code
blaze::CompressedVector<int> b{ 1, 0, 3, 0 };
min( b ); // Returns 1
max( b ); // Returns 3
\endcode
// For more information on the unary \c min() and \c max() reduction operations see the
// \ref vector_operations_reduction_operations section.
//
// <b>Multiple Vectors</b>
//
// If passed two or more dense vectors, the \c min() and \c max() functions compute the
// componentwise minimum or maximum of the given vectors, respectively:
\code
blaze::StaticVector<int,4UL,rowVector> c{ -5, 1, -7, 4 };
blaze::StaticVector<int,4UL,rowVector> d{ -5, 3, 0, 2 };
min( a, c ); // Results in the vector ( -5, 1, -7, -4 )
max( a, c, d ); // Results in the vector ( -5, 3, 7, 4 )
\endcode
// Please note that sparse vectors can only be used in the unary \c min() and \c max() functions.
// Also note that all forms of the \c min() and \c max() functions can be used to compute the
// smallest and largest element of a vector expression:
\code
min( a + b + c ); // Returns -9, i.e. the smallest value of the resulting vector
max( a - b - c ); // Returns 11, i.e. the largest value of the resulting vector
min( a + c, c - d ); // Results in ( -10 -2 -7 0 )
max( a - c, c + d ); // Results in ( 0 4 14 6 )
\endcode
// <b>Vector and Scalar</b>
//
// If passed a dense vector and a scalar, the \c min() and \c max() functions compute the
// componentwise minimum or maximum between the given vector and a uniform vector represented by
// the scalar value:
\code
min( a, 0 ); // Results in ( -5, 0, 0, -4 )
min( 0, a ); // Results in ( -5, 0, 0, -4 )
max( a, 0 ); // Results in ( 0, 2, 7, 0 )
max( 0, a ); // Results in ( 0, 2, 7, 0 )
\endcode
// \n \subsection vector_operators_softmax softmax()
//
// The <a href="https://en.wikipedia.org/wiki/Softmax_function">softmax function</a>, also called
// the normalized exponential function, of a given dense vector can be computed via \c softmax().
// The resulting dense vector consists of real values in the range (0..1], which add up to 1.
\code
blaze::StaticVector<double,7UL,rowVector> x{ 1.0, 2.0, 3.0, 4.0, 1.0, 2.0, 3.0 };
blaze::StaticVector<double,7UL,rowVector> y;
// Evaluating the softmax function
y = softmax( x ); // Results in ( 0.024 0.064 0.175 0.475 0.024 0.064 0.175 )
double s = sum( y ); // Results in 1
\endcode
// \n \subsection vector_operators_abs abs()
//
// The \c abs() function can be used to compute the absolute values of each element of a vector.
// For instance, the following computation
\code
blaze::StaticVector<int,3UL,rowVector> a{ -1, 2, -3 };
blaze::StaticVector<int,3UL,rowVector> b( abs( a ) );
\endcode
// results in the vector
\f$ b = \left(\begin{array}{*{1}{c}}
1 \\
2 \\
3 \\
\end{array}\right)\f$
// \n \subsection vector_operators_sign sign()
//
// The \c sign() function can be used to evaluate the sign of each element of a vector \a a. For
// each element \c i the corresponding result is 1 if \a a[i] is greater than zero, 0 if \a a[i]
// is zero, and -1 if \a a[i] is less than zero. For instance, the following use of the \c sign()
// function
\code
blaze::StaticVector<int,3UL,rowVector> a{ -1, 2, 0 };
blaze::StaticVector<int,3UL,rowVector> b( sign( a ) );
\endcode
// results in the vector
\f$ b = \left(\begin{array}{*{1}{c}}
-1 \\
1 \\
0 \\
\end{array}\right)\f$
// \n \subsection vector_operations_rounding_functions floor() / ceil() / trunc() / round()
//
// The \c floor(), \c ceil(), \c trunc(), and \c round() functions can be used to round down/up
// each element of a vector, respectively:
\code
blaze::StaticVector<double,3UL,rowVector> a, b;
b = floor( a ); // Rounding down each element of the vector
b = ceil ( a ); // Rounding up each element of the vector
b = trunc( a ); // Truncating each element of the vector
b = round( a ); // Rounding each element of the vector
\endcode
// \n \subsection vector_operators_conj conj()
//
// The \c conj() function can be applied on a dense or sparse vector to compute the complex
// conjugate of each element of the vector:
\code
using blaze::StaticVector;
using cplx = std::complex<double>;
// Creating the vector
// ( (-2,-1) )
// ( ( 1, 1) )
StaticVector<cplx,2UL> a{ cplx(-2.0,-1.0), cplx(1.0,1.0) };
// Computing the vector of complex conjugates
// ( (-2, 1) )
// ( ( 1,-1) )
StaticVector<cplx,2UL> b;
b = conj( a );
\endcode
// Additionally, vectors can be conjugated in-place via the \c conjugate() function:
\code
blaze::DynamicVector<cplx> c( 5UL );
conjugate( c ); // In-place conjugate operation.
c = conj( c ); // Same as above
\endcode
// \n \subsection vector_operators_real real()
//
// The \c real() function can be used on a dense or sparse vector to extract the real part of
// each element of the vector:
\code
using blaze::StaticVector;
using cplx = std::complex<double>;
// Creating the vector
// ( (-2,-1) )
// ( ( 1, 1) )
StaticVector<cplx,2UL> a{ cplx(-2.0,-1.0), cplx(1.0,1.0) };
// Extracting the real part of each vector element
// ( -2 )
// ( 1 )
StaticVector<double,2UL> b;
b = real( a );
\endcode
// \n \subsection vector_operators_imag imag()
//
// The \c imag() function can be used on a dense or sparse vector to extract the imaginary part
// of each element of the vector:
\code
using blaze::StaticVector;
using cplx = std::complex<double>;
// Creating the vector
// ( (-2,-1) )
// ( ( 1, 1) )
StaticVector<cplx,2UL> a{ cplx(-2.0,-1.0), cplx(1.0,1.0) };
// Extracting the imaginary part of each vector element
// ( -1 )
// ( 1 )
StaticVector<double,2UL> b;
b = imag( a );
\endcode
// \n \subsection vector_operators_arg arg()
//
// The \c arg() function can be used on a dense or sparse vector to compute the phase angle for
// each element of the vector:
\code
using blaze::StaticVector;
using cplx = std::complex<double>;
// Creating the vector
// ( (-2,-1) )
// ( ( 1, 1) )
StaticVector<cplx,2UL> a{ cplx(-2.0,-1.0), cplx(1.0,1.0) };
// Compute the phase angle of each vector element
// ( -2.67795 )
// ( 0.785398 )
StaticVector<double,2UL> b;
b = arg( a );
\endcode
// \n \subsection vector_operations_sqrt sqrt() / invsqrt()
//
// Via the \c sqrt() and \c invsqrt() functions the (inverse) square root of each element of a
// vector can be computed:
\code
blaze::DynamicVector<double> a, b, c;
b = sqrt( a ); // Computes the square root of each element
c = invsqrt( a ); // Computes the inverse square root of each element
\endcode
// Note that in case of sparse vectors only the non-zero elements are taken into account!
//
//
// \n \subsection vector_operations_cbrt cbrt() / invcbrt()
//
// The \c cbrt() and \c invcbrt() functions can be used to compute the the (inverse) cubic root
// of each element of a vector:
\code
blaze::HybridVector<double,3UL> a, b, c;
b = cbrt( a ); // Computes the cubic root of each element
c = invcbrt( a ); // Computes the inverse cubic root of each element
\endcode
// Note that in case of sparse vectors only the non-zero elements are taken into account!
//
//
// \n \subsection vector_operations_hypot hypot()
//
// The \c hypot() function can be used to compute the componentwise hypotenous for a pair of
// dense vectors:
\code
blaze::StaticVector<double,3UL> a, b, c;
c = hypot( a, b ); // Computes the componentwise hypotenuous
\endcode
// \n \subsection vector_operations_clamp clamp()
//
// The \c clamp() function can be used to restrict all elements of a vector to a specific range:
\code
blaze::DynamicVector<double> a, b
b = clamp( a, -1.0, 1.0 ); // Restrict all elements to the range [-1..1]
\endcode
// Note that in case of sparse vectors only the non-zero elements are taken into account!
//
//
// \n \subsection vector_operations_pow pow()
//
// The \c pow() function can be used to compute the exponential value of each element of a vector.
// If passed a vector and a numeric exponent, the function computes the exponential value of each
// element of the vector using the same exponent. If passed a second vector, the function computes
// the componentwise exponential value:
\code
blaze::StaticVector<double,3UL> a, b, c;
c = pow( a, 1.2 ); // Computes the exponential value of each element
c = pow( a, b ); // Computes the componentwise exponential value
\endcode
// \n \subsection vector_operations_exp exp() / exp2() / exp10()
//
// \c exp(), \c exp2() and \c exp10() compute the base e/2/10 exponential of each element of a
// vector, respectively:
\code
blaze::DynamicVector<double> a, b;
b = exp( a ); // Computes the base e exponential of each element
b = exp2( a ); // Computes the base 2 exponential of each element
b = exp10( a ); // Computes the base 10 exponential of each element
\endcode
// Note that in case of sparse vectors only the non-zero elements are taken into account!
//
//
// \n \subsection vector_operations_log log() / log2() / log10() / log1p() / lgamma()
//
// The \c log(), \c log2(), \c log10(), \c log1p() and \c lgamma() functions can be used to
// compute the natural, binary and common logarithm of each element of a vector:
\code
blaze::StaticVector<double,3UL> a, b;
b = log( a ); // Computes the natural logarithm of each element
b = log2( a ); // Computes the binary logarithm of each element
b = log10( a ); // Computes the common logarithm of each element
b = log1p( a ); // Computes the natural logarithm of x+1 of each element
b = lgamma( a ); // Computes the natural logarithm of the absolute value of the gamma function
\endcode
// \n \subsection vector_operations_trigonometric_functions sin() / cos() / tan() / asin() / acos() / atan()
//
// The following trigonometric functions are available for both dense and sparse vectors:
\code
blaze::DynamicVector<double> a, b;
b = sin( a ); // Computes the sine of each element of the vector
b = cos( a ); // Computes the cosine of each element of the vector
b = tan( a ); // Computes the tangent of each element of the vector
b = asin( a ); // Computes the inverse sine of each element of the vector
b = acos( a ); // Computes the inverse cosine of each element of the vector
b = atan( a ); // Computes the inverse tangent of each element of the vector
\endcode
// Note that in case of sparse vectors only the non-zero elements are taken into account!
//
//
// \n \subsection vector_operations_hyperbolic_functions sinh() / cosh() / tanh() / asinh() / acosh() / atanh()
//
// The following hyperbolic functions are available for both dense and sparse vectors:
\code
blaze::DynamicVector<double> a, b;
b = sinh( a ); // Computes the hyperbolic sine of each element of the vector
b = cosh( a ); // Computes the hyperbolic cosine of each element of the vector
b = tanh( a ); // Computes the hyperbolic tangent of each element of the vector
b = asinh( a ); // Computes the inverse hyperbolic sine of each element of the vector
b = acosh( a ); // Computes the inverse hyperbolic cosine of each element of the vector
b = atanh( a ); // Computes the inverse hyperbolic tangent of each element of the vector
\endcode
// Note that in case of sparse vectors only the non-zero elements are taken into account!
//
//
// \n \subsection vector_operations_atan2 atan2()
//
// The multi-valued inverse tangent is available for a pair of dense vectors:
\code
blaze::DynamicVector<double> a, b, c;
c = atan2( a, b ); // Computes the componentwise multi-valued inverse tangent
\endcode
// \n \subsection vector_operations_erf erf() / erfc()
//
// The \c erf() and \c erfc() functions compute the (complementary) error function of each
// element of a vector:
\code
blaze::StaticVector<double,3UL,rowVector> a, b;
b = erf( a ); // Computes the error function of each element
b = erfc( a ); // Computes the complementary error function of each element
\endcode
// Note that in case of sparse vectors only the non-zero elements are taken into account!
//
//
// \n \subsection vector_operations_map map() / forEach()
//
// Via the \c map() functions it is possible to execute componentwise custom operations on vectors.
// The unary \c map() function can be used to apply a custom operation on each element of a dense
// or sparse vector. For instance, the following example demonstrates a custom square root
// computation via a lambda:
\code
blaze::DynamicVector<double> a, b;
b = map( a, []( double d ) { return std::sqrt( d ); } );
\endcode
// The N-ary \c map() functions can be used to apply an operation componentwise to the elements
// of N dense vectors (where \f$ N <= 6 \f$). The following example demonstrates the merging of
// two column vectors of double precision values into a vector of double precision complex numbers:
\code
blaze::DynamicVector<double> real{ 2.1, -4.2, 1.0, 0.6 };
blaze::DynamicVector<double> imag{ 0.3, 1.4, 2.9, -3.4 };
blaze::DynamicVector< complex<double> > cplx;
// Creating the vector
// ( ( 2.1, 0.3) )
// ( (-4.2, 1.4) )
// ( ( 1.0, 2.9) )
// ( ( 0.6, -3.4) )
cplx = map( real, imag, []( double r, double i ){ return complex<double>( r, i ); } );
\endcode
// Applying the map() function to a column vector and a row vector results in the outer map of
// the two vectors. The following example demonstrates the outer sum of a column vector and a
// row vector:
\code
blaze::DynamicVector<int,columnVector> v1{ 2, 5, -1 };
blaze::DynamicVector<int,rowVector> v2{ -1, 3, -2, 4 };
// Results in the matrix
//
// ( 1 5 0 6 )
// A = ( 4 8 3 9 )
// ( -2 2 -3 3 )
//
blaze::StaticMatrix<int,3UL,4UL> M1 = map( v1, v2, []( int a, int b ){ return a + b; } );
\endcode
// Although the computation in the two previous examples can be parallelized it is not vectorized
// and thus cannot perform at peak performance. However, it is also possible to create vectorized
// custom operations. See \ref custom_operations for a detailed overview of the possibilities of
// custom operations.
//
// Please note that unary custom operations on vectors have been introduced in \b Blaze 3.0 in
// form of the \c forEach() function. With the introduction of binary custom functions, the
// \c forEach() function has been renamed to \c map(). The \c forEach() function can still be
// used, but the function might be deprecated in future releases of \b Blaze.
//
//
// \n \subsection vector_operations_select select()
//
// The \c select() function performs a componentwise, conditional selection of elements. Given
// the three dense vectors \c cond, \c a, and \c b, in case an element in the \c cond vector
// evaluates to \c true, the according element of \a a is selected, in case the \a cond element
// evaluates to \c false, the according element of \a b is selected. The following example
// demonstrates the use of the \a select() function:
\code
blaze::DynamicVector<bool> cond{ true, false, true false };
blaze::DynamicVector<int> a{ 1, -1, 1, -1 };
blaze::DynamicVector<int> b{ -2, 2, -2, 2 };
blaze::DynamicVector<int> c;
// ... Resizing and initialization
c = select( cond, a, b ); // Results in ( 1, 2, 1, 2 )
\endcode
// \n \section vector_operations_reduction_operations Reduction Operations
// <hr>
//
// \subsection vector_operations_reduction_operations_reduce reduce()
//
// The \c reduce() function performs a total reduction of the elements of the given dense vector
// or the non-zero elements of the given sparse vector. The following examples demonstrate the
// total reduction of a dense and sparse vector:
\code
blaze::DynamicVector<double> a;
// ... Resizing and initialization
const double totalsum1 = reduce( a, blaze::Add() );
const double totalsum2 = reduce( a, []( double a, double b ){ return a + b; } );
\endcode
\code
blaze::CompressedVector<double> a;
// ... Resizing and initialization
const double totalmin1 = reduce( a, blaze::Min() );
const double totalmin2 = reduce( a, []( double a, double b ){ return blaze::min( a, b ); } );
\endcode
// As demonstrated in the examples it is possible to pass any binary callable as custom reduction
// operation. However, for instance in the case of lambdas the vectorization of the reduction
// operation is compiler dependent and might not perform at peak performance. However, it is also
// possible to create vectorized custom operations. See \ref custom_operations for a detailed
// overview of the possibilities of custom operations.
//
// Please note that the evaluation order of the \c reduce() function is unspecified. Thus the
// behavior is non-deterministic if the given reduction operation is not associative or not
// commutative. Also, the operation is undefined if the given reduction operation modifies the
// values.
//
// \n \subsection vector_operations_reduction_operations_sum sum()
//
// The \c sum() function reduces the elements of the given dense vector or the non-zero elements
// of the given sparse vector by means of addition:
\code
blaze::DynamicVector<int> a{ 1, 2, 3, 4 };
const int totalsum = sum( a ); // Results in 10
\endcode
\code
blaze::CompressedVector<int> a{ 1, 2, 3, 4 };
const int totalsum = sum( a ); // Results in 10
\endcode
// Please note that the evaluation order of the \c sum() function is unspecified.
//
// \n \subsection vector_operations_reduction_operations_prod prod()
//
// The \c prod() function reduces the elements of the given dense vector or the non-zero elements
// of the given sparse vector by means of multiplication:
\code
blaze::DynamicVector<int> a{ 1, 2, 3, 4 };
const int totalprod = prod( a ); // Results in 24
\endcode
\code
blaze::CompressedVector<int> a{ 1, 2, 3, 4 };
const int totalprod = prod( a ); // Results in 24
\endcode
// \n \subsection vector_operations_reduction_operations_min min()
//
// The unary \c min() function returns the smallest element of the given dense vector or the
// smallest non-zero element of the given sparse vector. It can only be used for element types
// that support the smaller-than relationship. In case the given vector currently has a size
// of 0, the returned value is the default value (e.g. 0 in case of fundamental data types).
\code
blaze::DynamicVector<int> a{ 1, -2, 3, 0 };
const int totalmin = min( a ); // Results in -2
\endcode
\code
blaze::CompressedVector<int> a{ 1, 0, 3, 0 };
const int totalmin = min( a ); // Results in 1
\endcode
// \note In case the sparse vector is not completely filled, the implicit zero elements are NOT
// taken into account. In the previous example the compressed vector has only 2 non-zero elements.
// However, the minimum of the vector is 1.
//
// \n \subsection vector_operations_reduction_operations_max max()
//
// The unary \c max() function returns the largest element of the given dense vector or the
// largest non-zero element of the given sparse vector. It can only be used for element types
// that support the smaller-than relationship. In case the given vector currently has a size
// of 0, the returned value is the default value (e.g. 0 in case of fundamental data types).
\code
blaze::DynamicVector<int> a{ 1, -2, 3, 0 };
const int totalmax = max( a ); // Results in 3
\endcode
\code
blaze::CompressedVector<int> a{ -1, 0, -3, 0 };
const int totalmin = max( a ); // Results in -1
\endcode
// \note In case the sparse vector is not completely filled, the implicit zero elements are NOT
// taken into account. In the previous example the compressed vector has only 2 non-zero elements.
// However, the maximum of the vector is -1.
//
// \n \subsection vector_operations_reduction_operations_argmin argmin()
//
// The \c argmin() function returns the index of the first smallest element of the given dense
// vector. This function can only be used for element types that support the smaller-than
// relationship. In case the given vector currently has a size of 0, the returned index is 0.
\code
blaze::DynamicVector<int> a{ 1, -2, 3, 0 };
const size_t minindex = argmin( a ); // Results in 1
\endcode
// \n \subsection vector_operations_reduction_operations_argmax argmax()
//
// The \c argmax() function returns the index of the first largest element of the given dense
// vector. This function can only be used for element types that support the smaller-than
// relationship. In case the given vector currently has a size of 0, the returned index is 0.
\code
blaze::DynamicVector<int> a{ 1, -2, 3, 0 };
const size_t maxindex = argmax( a ); // Results in 2
\endcode
// \n \section vector_operations_norms Norms
// <hr>
//
// \subsection vector_operations_norms_norm norm()
//
// The \c norm() function computes the L2 norm of the given dense or sparse vector:
\code
blaze::DynamicVector<double> a;
blaze::CompressedVector<double> b;
// ... Resizing and initialization
const double norm1 = norm( a );
const double norm2 = norm( b );
\endcode
// \n \subsection vector_operations_norms_sqrnorm sqrNorm()
//
// The \c sqrNorm() function computes the squared L2 norm of the given dense or sparse vector:
\code
blaze::DynamicVector<double> a;
blaze::CompressedVector<double> b;
// ... Resizing and initialization
const double norm1 = sqrNorm( a );
const double norm2 = sqrNorm( b );
\endcode
// \n \subsection vector_operations_norms_l1norm l1Norm()
//
// The \c l1Norm() function computes the squared L1 norm of the given dense or sparse vector:
\code
blaze::DynamicVector<double> a;
blaze::CompressedVector<double> b;
// ... Resizing and initialization
const double norm1 = l1Norm( a );
const double norm2 = l1Norm( b );
\endcode
// \n \subsection vector_operations_norms_l2norm l2Norm()
//
// The \c l2Norm() function computes the squared L2 norm of the given dense or sparse vector:
\code
blaze::DynamicVector<double> a;
blaze::CompressedVector<double> b;
// ... Resizing and initialization
const double norm1 = l2Norm( a );
const double norm2 = l2Norm( b );
\endcode
// \n \subsection vector_operations_norms_l3norm l3Norm()
//
// The \c l3Norm() function computes the squared L3 norm of the given dense or sparse vector:
\code
blaze::DynamicVector<double> a;
blaze::CompressedVector<double> b;
// ... Resizing and initialization
const double norm1 = l3Norm( a );
const double norm2 = l3Norm( b );
\endcode
// \n \subsection vector_operations_norms_l4norm l4Norm()
//
// The \c l4Norm() function computes the squared L4 norm of the given dense or sparse vector:
\code
blaze::DynamicVector<double> a;
blaze::CompressedVector<double> b;
// ... Resizing and initialization
const double norm1 = l4Norm( a );
const double norm2 = l4Norm( b );
\endcode
// \n \subsection vector_operations_norms_lpnorm lpNorm()
//
// The \c lpNorm() function computes the general Lp norm of the given dense or sparse vector,
// where the norm is specified by either a compile time or a runtime argument:
\code
blaze::DynamicVector<double> a;
blaze::CompressedVector<double> b;
// ... Resizing and initialization
const double norm1 = lpNorm<2>( a ); // Compile time argument
const double norm2 = lpNorm( b, 2.3 ); // Runtime argument
\endcode
// \n \subsection vector_operations_norms_maxnorm linfNorm() / maxNorm()
//
// The \c linfNorm() and \c maxNorm() functions compute the infinity/maximum norm of the given
// dense or sparse vector:
\code
blaze::DynamicVector<double> a;
blaze::CompressedVector<double> b;
// ... Resizing and initialization
const double norm1 = linfNorm( a );
const double norm2 = maxNorm( b );
\endcode
// \n \subsection vector_operations_norms_minnorm minNorm()
//
// The \c minNorm() function computes the minimum norm of the given dense or sparse vector:
\code
blaze::DynamicVector<double> a;
blaze::CompressedVector<double> b;
// ... Resizing and initialization
const double norm = minNorm( b );
\endcode
// \n \section vector_operations_scalar_expansion Scalar Expansion
// <hr>
//
// By means of the \c uniform() function it is possible to expand a scalar value into a dense,
// uniform vector. By default, the resulting uniform vector is a column vector, but it is possible
// to specify the transpose flag explicitly:
\code
using blaze::columnVector;
int scalar = 5;
blaze::DynamicVector<int,columnVector> v;
// ... Resizing and initialization
// Expansion of 'scalar' to a 3-dimensional uniform column vector
//
// ( 5 )
// ( 5 )
// ( 5 )
//
v = uniform( 3UL, scalar );
v = uniform<columnVector>( 3UL, scalar );
\endcode
// \n \section vector_operations_vector_expansion Vector Expansion
// <hr>
//
// Via the \c expand() function it is possible to convert a dense or sparse vector into a matrix.
// A column vector is expanded into a column-major matrix, a row vector is expanded into a
// row-major matrix. As demonstrated by the following examples, \c expand() can be used with both
// runtime and compile time parameters:
\code
blaze::DynamicVector<int,columnVector> a{ 1, 2, 3 };
blaze::CompressedVector<int,rowVector> b{ 1, 0, 3, 0, 5 };
// Expand the dense column vector ( 1 2 3 ) into a dense 3x5 column-major matrix
//
// ( 1 1 1 1 1 )
// ( 2 2 2 2 2 )
// ( 3 3 3 3 3 )
//
expand( a, 5 ); // Runtime parameter
expand<5>( a ); // Compile time parameter
// Expand the sparse row vector ( 1 0 3 0 5 ) into a sparse 3x5 row-major matrix
//
// ( 1 0 3 0 5 )
// ( 1 0 3 0 5 )
// ( 1 0 3 0 5 )
//
expand( b, 3 ); // Runtime parameter
expand<3>( b ); // Compile time parameter
\endcode
// \n \section vector_operations_vector_repetition Vector Repetition
// <hr>
//
// Via the \c repeat() function it is possible to repeat a dense or sparse vector multiple times
// to represent a larger vector. Repeating a column vector results in a column vector, repeating
// a row vector results in a row vector. As demonstrated by the following examples, \c repeat()
// can be used with both runtime and compile time parameters:
\code
using blaze::columnVector;
using blaze::rowVector;
blaze::DynamicVector<int,columnVector> a1{ 1, 0, -2 };
blaze::CompressedVector<int,rowVector> b1{ 0, -1, 7 };
blaze::DynamicVector<int,columnVector> a2;
blaze::CompressedVector<int,rowVector> b2;
// ... Resizing and initialization
// Repeating the dense column vector ( 1 0 -2 ) three times results in
//
// ( 1 0 -2 1 0 -2 1 0 -2 )
//
a2 = repeat( a1, 3UL );
a2 = repeat<3UL>( a1 );
// Repeating the sparse row vector ( 0 -1 7 ) three times results in
//
// ( 0 -1 7 0 -1 7 0 -1 7 )
//
b2 = repeat( b1, 3UL );
b2 = repeat<3UL>( b1 );
\endcode
// \n \section vector_operations_statistic_operations Statistic Operations
// <hr>
//
// \subsection vector_operations_mean mean()
//
// The <a href="https://en.wikipedia.org/wiki/Arithmetic_mean">(arithmetic) mean</a> of a dense or
// sparse vector can be computed via the \c mean() function. In case of a sparse vector, both the
// non-zero and zero elements are taken into account. The following example demonstrates the
// computation of the mean of a dense vector:
\code
blaze::DynamicVector<int> v{ 1, 4, 3, 6, 7 };
const double m = mean( v ); // Results in 4.2 (i.e. 21/5)
\endcode
// In case the size of the given vector is 0, a \c std::invalid_argument is thrown.
//
// \n \subsection vector_operations_var var()
//
// The <a href="https://en.wikipedia.org/wiki/Variance">variance</a> of a dense or sparse vector
// can be computed via the \c var() function. In case of a sparse vector, both the non-zero and
// zero elements are taken into account. The following example demonstrates the computation of
// the variance of a dense vector:
\code
blaze::DynamicVector<int> v{ 1, 4, 3, 6, 7 };
const double v = var( v ); // Results in 5.7
\endcode
// In case the size of the given vector is smaller than 2, a \c std::invalid_argument is thrown.
//
// \n \subsection vector_operations_stddev stddev()
//
// The <a href="https://en.wikipedia.org/wiki/Standard_deviation">standard deviation</a> of a
// dense or sparse vector can be computed via the \c stddev() function. In case of a sparse
// vector, both the non-zero and zero elements are taken into account. The following example
// demonstrates the computation of the standard deviation of a dense vector:
\code
blaze::DynamicVector<int> v{ 1, 4, 3, 6, 7 };
const double s = stddev( v ); // Results in 2.38747
\endcode
// In case the size of the given vector is smaller than 2, a \c std::invalid_argument is thrown.
//
//
// \n \section vector_operations_declaration_operations Declaration Operations
// <hr>
//
// \subsection vector_operations_declzero declzero()
//
// The \c declzero() operation can be used to explicitly declare any vector or vector expression
// as zero vector:
\code
blaze::DynamicVector<double> a, b;
// ... Resizing and initialization
b = declzero( a );
\endcode
// Any vector or vector expression that has been declared as zero vector via \c declzero() will
// gain all the benefits of a zero vector, which range from reduced runtime checking to a
// considerable speed-up in computations:
\code
using blaze::DynamicVector;
DynamicVector<double> a, b, c;
// ... Resizing and initialization
isZero( declzero( a ) ); // Will always return true without runtime effort
c = declzero( a ) + b; // Declare the left operand of the vector addition as a
// zero vector, i.e. no addition needs to be performed
\endcode
// \warning The \c declzero() operation has the semantics of a cast: The caller is completely
// responsible and the system trusts the given information. Declaring a non-zero vector or
// vector expression as zero vector via the \c declzero() operation leads to undefined behavior
// (which can be violated invariants or wrong computation results)!
//
//
// \n \section vector_operations_vector_generators Vector Generators
// <hr>
//
// \subsection vector_operations_generate generate()
//
// The \c generate() function returns a dense vector filled elementwise via the given custom
// operation. By default, the returned vector is a column vector, but this setting can be changed
// via the \c BLAZE_DEFAULT_TRANSPOSE_FLAG switch (see \ref transpose_flag). Alternatively it is
// possible to specify the transpose flag explicitly.\n
// The following example demonstrates the use of the \c generate() function:
\code
using blaze::generate;
using blaze::columnVector;
using blaze::rowVector;
// Generates the homogeneous integer vector ( 2, 2, 2, 2, 2 )
blaze::DynamicVector<int,columnVector> a;
a = generate( 5UL, []( size_t index ){ return 2; } );
// Generates the linearly spaced float vector ( 2.1, 3.2, 4.3, 5.4 )
blaze::DynamicVector<float,columnVector> b;
b = generate( 4UL, []( size_t index ){ return 2.1F + 1.1F*index; } );
// Generates the logarithmically spaced double vector ( 1.0, 10.0, 100.0, 1000.0 )
blaze::DynamicVector<double,columnVector> c;
c = generate<columnVector>( 4UL, []( size_t index ){ return blaze::exp10( 1.0 + 1.0*index ); } );
// Generates the vector of integer vectors ( ( 1, 2 ), ( 2, 3 ), ( 3, 4 ), ( 4, 5 ) )
using VT = blaze::StaticVector<int,2UL>;
blaze::StaticVector<VT,4UL,rowVector> d;
d = generate<rowVector>( []( size_t index ) { return evaluate( VT{ 1, 2 } + index ); } );
\endcode
// \n \subsection vector_operations_linspace linspace()
//
// The \c linspace() function returns a dense vector filled with linearly spaced elements. By
// default, the returned vector is a column vector, but this setting can be changed via the
// \c BLAZE_DEFAULT_TRANSPOSE_FLAG switch (see \ref transpose_flag). Alternatively it is possible
// to specify the transpose flag explicitly.\n
// The following example demonstrates the use of the \c linspace() function:
\code
using blaze::linspace;
using blaze::columnVector;
using blaze::rowVector;
// Generates the linearly spaced integer vector ( 2, 3, 4, 5, 6 )
blaze::DynamicVector<int,columnVector> a;
a = linspace( 5UL, 2, 6 );
// Generates the linearly spaced integer vector ( 6, 5, 4, 3, 2 )
blaze::DynamicVector<int,columnVector> b;
b = linspace<columnVector>( 5UL, 6, 2 );
// Generates the linearly spaced float vector ( 2.1, 3.2, 4.3, 5.4 )
blaze::DynamicVector<float,rowVector> c;
c = linspace<rowVector>( 4UL, 2.1F, 5.4F );
\endcode
// \n \subsection vector_operations_logspace logspace()
//
// The \c logspace() function returns a dense vector filled with logarithmically spaced elements.
// By default, the returned vector is a column vector, but this setting can be changed via the
// \c BLAZE_DEFAULT_TRANSPOSE_FLAG switch (see \ref transpose_flag). Alternatively it is possible
// to specify the transpose flag explicitly.\n
// The following example demonstrates the use of the \c logspace() function:
\code
using blaze::logspace;
using blaze::columnVector;
using blaze::rowVector;
// Generates the logarithmically spaced double vector ( 1, 10, 100, 1000 )
blaze::DynamicVector<int,columnVector> a;
a = logspace( 4UL, 0, 3 );
// Generates the logarithmically spaced double vector ( 1000.0, 100.0, 10.0, 1.0 )
blaze::DynamicVector<double,rowVector> b;
b = logspace<rowVector>( 4UL, 3.0, 0.0 );
\endcode
// \n \subsection vector_operations_uniform uniform()
//
// The \c uniform() function creates a uniform vector of the given size. By default, the
// resulting uniform vector is a column vector, but this setting can be changed via the
// \c BLAZE_DEFAULT_TRANSPOSE_FLAG switch (see \ref transpose_flag). Alternatively it is
// possible to specify the transpose flag explicitly.\n
// The following example demonstrates the use of the \c uniform() function:
\code
using blaze::uniform;
using blaze::columnVector;
using blaze::rowVector;
// Creates the uniform column vector ( 1, 1, 1, 1, 1 )
auto u1 = uniform( 5UL, 1 );
// Creates the uniform column vector ( 1.2, 1.2, 1.2 )
auto u2 = uniform<columnVector>( 3UL, 1.2 );
// Creates the uniform row vector ( 5U, 5U, 5U, 5U )
auto u3 = uniform<rowVector>( 4UL, 5U );
\endcode
// \n \subsection vector_operations_zero zero()
//
// The \c zero() function creates a zero vector of the given element type and size. By default,
// the resulting zero vector is a column vector, but this setting can be changed via the
// \c BLAZE_DEFAULT_TRANSPOSE_FLAG switch (see \ref transpose_flag). Alternatively it is
// possible to specify the transpose flag explicitly.\n
// The following example demonstrates the use of the \c zero() function:
\code
using blaze::zero;
using blaze::columnVector;
using blaze::rowVector;
// Creates the zero column vector ( 0, 0, 0, 0, 0 )
auto z1 = zero<int>( 5UL );
// Creates the zero column vector ( 0.0, 0.0, 0.0 )
auto z2 = zero<double,columnVector>( 3UL );
// Creates the zero row vector ( 0U, 0U, 0U, 0U )
auto z3 = zero<unsigned int,rowVector>( 4UL );
\endcode
// \n Previous: \ref vector_types Next: \ref matrices
*/
//*************************************************************************************************
//**Matrices***************************************************************************************
/*!\page matrices Matrices
//
// \tableofcontents
//
//
// \n \section matrices_general General Concepts
// <hr>
//
// The \b Blaze library currently offers five dense matrix types (\ref matrix_types_static_matrix,
// \ref matrix_types_dynamic_matrix, \ref matrix_types_hybrid_matrix, \ref matrix_types_custom_matrix,
// and \ref matrix_types_uniform_matrix) and three sparse matrix types (\ref matrix_types_compressed_matrix,
// \ref matrix_types_identity_matrix, and \ref matrix_types_zero_matrix). All matrices can either
// be stored as row-major matrices or column-major matrices:
\code
using blaze::DynamicMatrix;
using blaze::rowMajor;
using blaze::columnMajor;
// Setup of the 2x3 row-major dense matrix
//
// ( 1 2 3 )
// ( 4 5 6 )
//
DynamicMatrix<int,rowMajor> A{ { 1, 2, 3 },
{ 4, 5, 6 } };
// Setup of the 3x2 column-major dense matrix
//
// ( 1 4 )
// ( 2 5 )
// ( 3 6 )
//
DynamicMatrix<int,columnMajor> B{ { 1, 4 },
{ 2, 5 },
{ 3, 6 } };
\endcode
// Per default, all matrices in \b Blaze are row-major matrices:
\code
// Instantiation of a 3x3 row-major matrix
blaze::DynamicMatrix<int> C( 3UL, 3UL );
\endcode
// \n \section matrices_details Matrix Details
// <hr>
//
// - \ref matrix_types
// - \ref matrix_operations
//
//
// \n \section matrices_examples Examples
// <hr>
\code
using blaze::StaticMatrix;
using blaze::DynamicMatrix;
using blaze::CompressedMatrix;
using blaze::rowMajor;
using blaze::columnMajor;
StaticMatrix<double,6UL,20UL> A; // Instantiation of a 6x20 row-major static matrix
CompressedMatrix<double,rowMajor> B; // Instantiation of a row-major compressed matrix
DynamicMatrix<double,columnMajor> C; // Instantiation of a column-major dynamic matrix
// ... Resizing and initialization
C = A * B;
\endcode
// \n Previous: \ref vector_operations Next: \ref matrix_types
*/
//*************************************************************************************************
//**Matrix Types***********************************************************************************
/*!\page matrix_types Matrix Types
//
// \tableofcontents
//
//
// \n \section matrix_types_dense_matrices Dense Matrices
// <hr>
//
// \subsection matrix_types_static_matrix StaticMatrix
//
// The blaze::StaticMatrix class template is the representation of a fixed size matrix with
// statically allocated elements of arbitrary type. It can be included via the header files
\code
#include <blaze/Blaze.h>
// or
#include <blaze/Math.h>
// or
#include <blaze/math/StaticMatrix.h>
\endcode
// and forward declared via the header file
\code
#include <blaze/Forward.h>
\endcode
// The type of the elements, the number of rows and columns, the storage order of the matrix,
// the alignment, the padding, and the group tag of the matrix can be specified via the seven
// template parameters:
\code
namespace blaze {
template< typename Type, size_t M, size_t N, bool SO, AlignmentFlag AF, PaddingFlag PF, typename Tag >
class StaticMatrix;
} // namespace blaze
\endcode
// - \c Type: specifies the type of the matrix elements. StaticMatrix can be used with any
// non-cv-qualified, non-reference element type.
// - \c M : specifies the total number of rows of the matrix.
// - \c N : specifies the total number of columns of the matrix. Note that it is expected
// that StaticMatrix is only used for tiny and small matrices.
// - \c SO : specifies the storage order (\c blaze::rowMajor, \c blaze::columnMajor) of the
// matrix. The default value is \c blaze::defaultStorageOrder.
// - \c AF : specifies whether the first element of every row/column is properly aligned with
// respect to the available instruction set (SSE, AVX, ...). Possible values are
// \c blaze::aligned and \c blaze::unaligned. The default value is
// \c blaze::defaultAlignmentFlag.
// - \c PF : specifies whether every row/column of the matrix should be padded to maximize the
// efficiency of vectorized operations. Possible values are \c blaze::padded and
// \c blaze::unpadded. The default value is \c blaze::defaultPaddingFlag.
// - \c Tag : optional type parameter to tag the matrix. The default type is \c blaze::Group0.
// See \ref grouping_tagging for details.
//
// The blaze::StaticMatrix is perfectly suited for small to medium matrices whose dimensions are
// known at compile time:
\code
// Definition of a 3x4 integral row-major matrix
blaze::StaticMatrix<int,3UL,4UL> A;
// Definition of a 4x6 single precision row-major matrix
blaze::StaticMatrix<float,4UL,6UL,blaze::rowMajor> B;
// Definition of an unaligned, unpadded 6x4 double precision column-major matrix
blaze::StaticMatrix<double,6UL,4UL,blaze::columnMajor,blaze::unaligned,blaze::unpadded> C;
\endcode
// \subsubsection matrix_types_static_matrix_alignment Alignment
//
// In case \c AF is set to \c blaze::aligned, the elements of a blaze::StaticMatrix are possibly
// over-aligned to meet the alignment requirements of the available instruction set (SSE, AVX,
// AVX-512, ...). The alignment for fundamental types (\c short, \c int, \c float, \c double, ...)
// and complex types (\c complex<float>, \c complex<double>, ...) is 16 bytes for SSE, 32 bytes
// for AVX, and 64 bytes for AVX-512. All other types are aligned according to their intrinsic
// alignment:
\code
struct Int { int i; };
using MT1 = blaze::StaticMatrix<double,3UL,5UL>;
using MT2 = blaze::StaticMatrix<complex<float>,2UL,3UL>;
using MT3 = blaze::StaticMatrix<Int,5UL,4UL>;
alignof( MT1 ); // Evaluates to 16 for SSE, 32 for AVX, and 64 for AVX-512
alignof( MT2 ); // Evaluates to 16 for SSE, 32 for AVX, and 64 for AVX-512
alignof( MT3 ); // Evaluates to 'alignof( Int )'
\endcode
// Note that an aligned blaze::StaticMatrix instance may be bigger than the sum of its data
// elements:
\code
sizeof( MT1 ); // Evaluates to 160 for SSE, and 192 for AVX and AVX-512
sizeof( MT2 ); // Evaluates to 64 for SSE and AVX and 128 for AVX-512
sizeof( MT3 ); // Evaluates to 80; no special alignment requirements
\endcode
// Please note that for this reason a blaze::StaticMatrix cannot be used in containers using
// dynamic memory such as \c std::vector without additionally providing an allocator that can
// provide over-aligned memory:
\code
using Type = blaze::StaticMatrix<double,3UL,5UL>;
using Allocator = blaze::AlignedAllocator<Type>;
std::vector<Type> v1; // Might be misaligned for AVX or AVX-512
std::vector<Type,Allocator> v2; // Properly aligned for AVX or AVX-512
\endcode
// \subsubsection matrix_types_static_matrix_padding Padding
//
// Adding padding elements to the end of every row or column of a blaze::StaticMatrix can have a
// significant impact on the performance. For instance, assuming that AVX is available, then two
// padded 3x3 matrices of double precision values can be added with three SIMD addition operations:
\code
using blaze::StaticMatrix;
using blaze::rowMajor;
using blaze::aligned;
using blaze::unaligned;
using blaze::padded;
using blaze::unpadded;
StaticMatrix<double,3UL,3UL,rowMajor,aligned,padded> A1, B1, C1;
StaticMatrix<double,3UL,3UL,rowMajor,unaligned,unpadded> A2, B2, C2;
// ... Initialization
C1 = A1 + B1; // AVX-based matrix addition; maximum performance
C2 = A2 + B2; // Scalar matrix addition; limited performance
sizeof( A1 ); // Evaluates to 96 for SSE and AVX, and 192 for AVX-512
sizeof( A2 ); // Evaluates to 72 for SSE, AVX, and AVX-512 (minimum size)
\endcode
// Due to padding, the first addition will run at maximum performance. On the flip side, the size
// of each matrix instance is increased due to the padding elements. The total size of an instance
// depends on the number of elements and width of the available instruction set (16 bytes for
// SSE, 32 bytes for AVX, and 64 bytes for AVX-512).
//
// The second addition will be limited in performance since due to the number of elements some of
// the elements need to be handled in a scalar operation. However, the size of an \c unaligned,
// \c unpadded blaze::StaticMatrix instance is guaranteed to be the sum of its elements.
//
// Please also note that \b Blaze will zero initialize the padding elements in order to achieve
// maximum performance!
//
//
// \n \subsection matrix_types_dynamic_matrix DynamicMatrix
//
// The blaze::DynamicMatrix class template is the representation of an arbitrary sized matrix
// with \f$ M \cdot N \f$ dynamically allocated elements of arbitrary type. It can be included
// via the header files
\code
#include <blaze/Blaze.h>
// or
#include <blaze/Math.h>
// or
#include <blaze/math/DynamicMatrix.h>
\endcode
// and forward declared via the header file
\code
#include <blaze/Forward.h>
\endcode
// The type of the elements, the storage order, the type of the allocator, and the group tag of
// the matrix can be specified via the three template parameters:
\code
namespace blaze {
template< typename Type, bool SO, typename Alloc, typename Tag >
class DynamicMatrix;
} // namespace blaze
\endcode
// - \c Type : specifies the type of the matrix elements. DynamicMatrix can be used with any
// non-cv-qualified, non-reference element type.
// - \c SO : specifies the storage order (\c blaze::rowMajor, \c blaze::columnMajor) of the
// matrix. The default value is \c blaze::defaultStorageOrder.
// - \c Alloc: specifies the type of allocator used to allocate dynamic memory. The default type
// of allocator is \c blaze::AlignedAllocator.
// - \c Tag : optional type parameter to tag the matrix. The default type is \c blaze::Group0.
// See \ref grouping_tagging for details.
//
// The blaze::DynamicMatrix is the default choice for all kinds of dense matrices and the best
// choice for medium to large matrices. The number of rows and columns can be modified at runtime:
\code
// Definition of a 3x4 integral row-major matrix
blaze::DynamicMatrix<int> A( 3UL, 4UL );
// Definition of a 4x6 single precision row-major matrix
blaze::DynamicMatrix<float,blaze::rowMajor> B( 4UL, 6UL );
// Definition of a double precision column-major matrix with 0 rows and columns
blaze::DynamicMatrix<double,blaze::columnMajor> C;
\endcode
// \subsubsection matrix_types_dynamic_matrix_allocators Allocators
//
// Via the third template parameter it is possible to customize the memory allocation of a
// \c blaze::DynamicMatrix. The provided allocator is expected to represent an implementation of
// the allocator concept of the standard library (see for instance
// <a href="https://en.cppreference.com/w/cpp/container/vector">std::vector</a> and
// <a href="https://en.cppreference.com/w/cpp/memory/allocator">std::allocator</a>). In
// addition, the provided allocator is also required to provide properly (over-)aligned memory
// for fundamental and complex numbers. For instance, in case SSE vectorization is possible, the
// returned memory must be at least 16-byte aligned. In case AVX is active, the memory must be at
// least 32-byte aligned, and in case of AVX-512 the memory must be even 64-byte aligned.
//
//
// \n \subsection matrix_types_hybrid_matrix HybridMatrix
//
// The HybridMatrix class template combines the flexibility of a dynamically sized matrix with
// the efficiency and performance of a fixed size matrix. It is implemented as a crossing between
// the blaze::StaticMatrix and the blaze::DynamicMatrix class templates: Similar to the static
// matrix it uses static stack memory instead of dynamically allocated memory and similar to the
// dynamic matrix it can be resized (within the extend of the static memory). It can be included
// via the header files
\code
#include <blaze/Blaze.h>
// or
#include <blaze/Math.h>
// or
#include <blaze/math/HybridMatrix.h>
\endcode
// and forward declared via the header file
\code
#include <blaze/Forward.h>
\endcode
// The type of the elements, the maximum number of rows and columns, the storage order of the
// matrix, the alignment, the padding, and the group tag of the matrix can be specified via the
// seven template parameters:
\code
namespace blaze {
template< typename Type, size_t M, size_t N, bool SO, AlignmentFlag AF, PaddingFlag PF, typename Tag >
class HybridMatrix;
} // namespace blaze
\endcode
// - \c Type: specifies the type of the matrix elements. HybridMatrix can be used with any
// non-cv-qualified, non-reference, non-pointer element type.
// - \c M : specifies the maximum number of rows of the matrix.
// - \c N : specifies the maximum number of columns of the matrix. Note that it is expected
// that HybridMatrix is only used for tiny and small matrices.
// - \c SO : specifies the storage order (\c blaze::rowMajor, \c blaze::columnMajor) of the
// matrix. The default value is \c blaze::defaultStorageOrder.
// - \c AF : specifies whether the first element of every row/column is properly aligned with
// respect to the available instruction set (SSE, AVX, ...). Possible values are
// \c blaze::aligned and \c blaze::unaligned. The default value is
// \c blaze::defaultAlignmentFlag.
// - \c PF : specifies whether every row/column of the matrix should be padded to maximize the
// efficiency of vectorized operations. Possible values are \c blaze::padded and
// \c blaze::unpadded. The default value is \c blaze::defaultPaddingFlag.
// - \c Tag : optional type parameter to tag the matrix. The default type is \c blaze::Group0.
// See \ref grouping_tagging for details.
//
// The blaze::HybridMatrix is a suitable choice for small to medium matrices, whose dimensions
// are not known at compile time or not fixed at runtime, but whose maximum dimensions are known
// at compile time:
\code
// Definition of a 3x4 integral row-major matrix with maximum dimensions of 6x8
blaze::HybridMatrix<int,6UL,8UL> A( 3UL, 4UL );
// Definition of a 4x6 single precision row-major matrix with maximum dimensions of 12x16
blaze::HybridMatrix<float,12UL,16UL,blaze::rowMajor> B( 4UL, 6UL );
// Definition of an unaligned, unpadded 0x0 double precision column-major matrix and maximum dimensions of 6x6
blaze::HybridMatrix<double,6UL,6UL,blaze::columnMajor,blaze::unaligned,blaze::unpadded> C;
\endcode
// \subsubsection matrix_types_hybrid_matrix_alignment Alignment
//
// In case \c AF is set to \c blaze::aligned, the elements of a blaze::HybridMatrix are possibly
// over-aligned to meet the alignment requirements of the available instruction set (SSE, AVX,
// AVX-512, ...). The alignment for fundamental types (\c short, \c int, \c float, \c double, ...)
// and complex types (\c complex<float>, \c complex<double>, ...) is 16 bytes for SSE, 32 bytes
// for AVX, and 64 bytes for AVX-512. All other types are aligned according to their intrinsic
// alignment:
\code
struct Int { int i; };
using MT1 = blaze::HybridMatrix<double,3UL,5UL>;
using MT2 = blaze::HybridMatrix<complex<float>,2UL,3UL>;
using MT3 = blaze::HybridMatrix<Int,5UL,4UL>;
alignof( MT1 ); // Evaluates to 16 for SSE, 32 for AVX, and 64 for AVX-512
alignof( MT2 ); // Evaluates to 16 for SSE, 32 for AVX, and 64 for AVX-512
alignof( MT3 ); // Evaluates to 'alignof( Int )'
\endcode
// Note that an aligned blaze::HybridMatrix instance may be bigger than an according unaligned
// blaze::HybridMatrix:
\code
sizeof( MT1 ); // Evaluates to 160 for SSE, 224 for AVX, and 256 for AVX-512
sizeof( MT2 ); // Evaluates to 80 for SSE, 96 for AVX, and 192 for AVX-512
sizeof( MT3 ); // Evaluates to 96; no special alignment requirements
\endcode
// Please note that for this reason a blaze::HybridMatrix cannot be used in containers using
// dynamic memory such as \c std::vector without additionally providing an allocator that can
// provide over-aligned memory:
\code
using Type = blaze::HybridMatrix<double,3UL,5UL>;
using Allocator = blaze::AlignedAllocator<Type>;
std::vector<Type> v1; // Might be misaligned for AVX or AVX-512
std::vector<Type,Allocator> v2; // Properly aligned for AVX or AVX-512
\endcode
// \subsubsection matrix_types_hybrid_matrix_padding Padding
//
// Adding padding elements to the end of every row or column of a blaze::HybridMatrix can have a
// significant impact on the performance. For instance, assuming that AVX is available, then two
// padded 3x3 matrices of double precision values can be added with three SIMD addition operations:
\code
using blaze::HybridMatrix;
using blaze::rowMajor;
using blaze::aligned;
using blaze::unaligned;
using blaze::padded;
using blaze::unpadded;
HybridMatrix<double,3UL,3UL,rowMajor,aligned,padded> A1, B1, C1;
HybridMatrix<double,3UL,3UL,rowMajor,unaligned,unpadded> A2, B2, C2;
// ... Initialization
C1 = A1 + B1; // AVX-based matrix addition; maximum performance
C2 = A2 + B2; // Scalar matrix addition; limited performance
sizeof( A1 ); // Evaluates to 112 for SSE, 128 for AVX, and 256 for AVX-512
sizeof( A2 ); // Evaluates to 88 for SSE, AVX, and AVX-512 (minimum size)
\endcode
// Due to padding, the first addition will run at maximum performance. On the flip side, the size
// of each matrix instance is increased due to the padding elements. The total size of an instance
// depends on the number of elements and width of the available instruction set (16 bytes for
// SSE, 32 bytes for AVX, and 64 bytes for AVX-512).
//
// The second addition will be limited in performance since due to the number of elements some of
// the elements need to be handled in a scalar operation. However, the size of an \c unaligned,
// \c unpadded blaze::HybridMatrix instance is guaranteed to be the sum of its elements plus the.
// necessary data members to store the current number of rows and columns.
//
// Please also note that \b Blaze will zero initialize the padding elements in order to achieve
// maximum performance!
//
//
// \n \subsection matrix_types_custom_matrix CustomMatrix
//
// The blaze::CustomMatrix class template provides the functionality to represent an external
// array of elements of arbitrary type and a fixed size as a native \b Blaze dense matrix data
// structure. Thus in contrast to all other dense matrix types a custom matrix does not perform
// any kind of memory allocation by itself, but it is provided with an existing array of element
// during construction. A custom matrix can therefore be considered an alias to the existing
// array. It can be included via the header files
\code
#include <blaze/Blaze.h>
// or
#include <blaze/Math.h>
// or
#include <blaze/math/CustomMatrix.h>
\endcode
// and forward declared via the header file
\code
#include <blaze/Forward.h>
\endcode
// The type of the elements, the properties of the given array of elements, the storage order,
// and the group tag of the matrix can be specified via the following five template parameters:
\code
namespace blaze {
template< typename Type, AlignmentFlag AF, PaddingFlag PF, bool SO, typename Tag >
class CustomMatrix;
} // namespace blaze
\endcode
// - \c Type: specifies the type of the matrix elements. blaze::CustomMatrix can be used with
// any possibly cv-qualified, non-reference, non-pointer element type.
// - \c AF : specifies whether the represented, external arrays are properly aligned with
// respect to the available instruction set (SSE, AVX, ...) or not (\c blaze::aligned
// or \c blaze::unaligned).
// - \c PF : specified whether the represented, external arrays are properly padded with
// respect to the available instruction set (SSE, AVX, ...) or not (\c blaze::padded
// or \c blaze::unpadded).
// - \c SO : specifies the storage order (\c blaze::rowMajor, \c blaze::columnMajor) of the
// matrix. The default value is \c blaze::defaultStorageOrder.
// - \c Tag : optional type parameter to tag the matrix. The default type is \c blaze::Group0.
// See \ref grouping_tagging for details.
//
// The blaze::CustomMatrix is the right choice if any external array needs to be represented as
// a \b Blaze dense matrix data structure or if a custom memory allocation strategy needs to be
// realized:
\code
using blaze::CustomMatrix;
using blaze::Deallocate;
using blaze::aligned;
using blaze::unaligned;
using blaze::padded;
using blaze::unpadded;
// Definition of an unmanaged 3x4 custom matrix for unaligned, unpadded integer arrays
using UnalignedUnpadded = CustomMatrix<int,unaligned,unpadded,rowMajor>;
std::vector<int> vec( 12UL )
UnalignedUnpadded A( &vec[0], 3UL, 4UL );
// Definition of a managed 5x6 custom matrix for unaligned but padded 'float' arrays
using UnalignedPadded = CustomMatrix<float,unaligned,padded,columnMajor>;
std::unique_ptr<float[]> memory1( new float[40] );
UnalignedPadded B( memory1.get(), 5UL, 6UL, 8UL );
// Definition of a managed 12x13 custom matrix for aligned, unpadded 'double' arrays
using AlignedUnpadded = CustomMatrix<double,aligned,unpadded,rowMajor>;
std::unique_ptr<double[],Deallocate> memory2( blaze::allocate<double>( 192UL ) );
AlignedUnpadded C( memory2.get(), 12UL, 13UL, 16UL );
// Definition of a 7x14 custom matrix for aligned, padded 'complex<double>' arrays
using cplx = complex<double>;
using AlignedPadded = CustomMatrix<cplx,aligned,padded,columnMajor>;
std::unique_ptr<cplx[],Deallocate> memory3( blaze::allocate<cplx>( 112UL ) );
AlignedPadded D( memory3.get(), 7UL, 14UL, 16UL );
\endcode
// In comparison with the remaining \b Blaze dense matrix types blaze::CustomMatrix has several
// special characteristics. All of these result from the fact that a custom matrix is not
// performing any kind of memory allocation, but instead is given an existing array of elements.
// The following sections discuss all of these characteristics:
//
// -# <b>\ref matrix_types_custom_matrix_memory_management</b>
// -# <b>\ref matrix_types_custom_matrix_copy_operations</b>
// -# <b>\ref matrix_types_custom_matrix_alignment</b>
// -# <b>\ref matrix_types_custom_matrix_padding</b>
//
// \subsubsection matrix_types_custom_matrix_memory_management Memory Management
//
// The blaze::CustomMatrix class template acts as an adaptor for an existing array of elements. As
// such it provides everything that is required to use the array just like a native \b Blaze dense
// matrix data structure. However, this flexibility comes with the price that the user of a custom
// matrix is responsible for the resource management.
//
// The following examples give an impression of several possible types of custom matrices:
\code
using blaze::CustomMatrix;
using blaze::Deallocate;
using blaze::allocate;
using blaze::aligned;
using blaze::unaligned;
using blaze::padded;
using blaze::unpadded;
// Definition of a 3x4 custom row-major matrix with unaligned, unpadded and externally
// managed integer array. Note that the std::vector must be guaranteed to outlive the
// custom matrix!
std::vector<int> vec( 12UL );
CustomMatrix<int,unaligned,unpadded> A( &vec[0], 3UL, 4UL );
// Definition of a custom 8x12 matrix for an aligned and padded integer array of
// capacity 128 (including 8 padding elements per row). Note that the std::unique_ptr
// must be guaranteed to outlive the custom matrix!
std::unique_ptr<int[],Deallocate> memory( allocate<int>( 128UL ) );
CustomMatrix<int,aligned,padded> B( memory.get(), 8UL, 12UL, 16UL );
\endcode
// \subsubsection matrix_types_custom_matrix_copy_operations Copy Operations
//
// As with all dense matrices it is possible to copy construct a custom matrix:
\code
using blaze::CustomMatrix;
using blaze::unaligned;
using blaze::unpadded;
using CustomType = CustomMatrix<int,unaligned,unpadded>;
std::vector<int> vec( 6UL, 10 ); // Vector of 6 integers of the value 10
CustomType A( &vec[0], 2UL, 3UL ); // Represent the std::vector as Blaze dense matrix
a[1] = 20; // Also modifies the std::vector
CustomType B( a ); // Creating a copy of vector a
b[2] = 20; // Also affects matrix A and the std::vector
\endcode
// It is important to note that a custom matrix acts as a reference to the specified array. Thus
// the result of the copy constructor is a new custom matrix that is referencing and representing
// the same array as the original custom matrix.
//
// In contrast to copy construction, just as with references, copy assignment does not change
// which array is referenced by the custom matrices, but modifies the values of the array:
\code
std::vector<int> vec2( 6UL, 4 ); // Vector of 6 integers of the value 4
CustomType C( &vec2[0], 2UL, 3UL ); // Represent the std::vector as Blaze dense matrix
A = C; // Copy assignment: Set all values of matrix A and B to 4.
\endcode
// \subsubsection matrix_types_custom_matrix_alignment Alignment
//
// In case the custom matrix is specified as \c aligned the passed array must adhere to some
// alignment restrictions based on the alignment requirements of the used data type and the
// used instruction set (SSE, AVX, ...). The restriction applies to the first element of each
// row/column: In case of a row-major matrix the first element of each row must be properly
// aligned, in case of a column-major matrix the first element of each column must be properly
// aligned. For instance, if a row-major matrix is used and AVX is active the first element of
// each row must be 32-bit aligned:
\code
using blaze::CustomMatrix;
using blaze::Deallocate;
using blaze::allocate;
using blaze::aligned;
using blaze::padded;
using blaze::rowMajor;
// Allocation of 32-bit aligned memory
std::unique_ptr<int[],Deallocate> memory( allocate<int>( 40UL ) );
CustomMatrix<int,aligned,padded,rowMajor> A( memory.get(), 5UL, 6UL, 8UL );
\endcode
// In the example, the row-major matrix has six columns. However, since with AVX eight integer
// values are loaded together the matrix is padded with two additional elements. This guarantees
// that the first element of each row is 32-bit aligned. In case the alignment requirements are
// violated, a \c std::invalid_argument exception is thrown.
//
// \subsubsection matrix_types_custom_matrix_padding Padding
//
// Adding padding elements to the end of each row/column can have a significant impact on the
// performance. For instance, assuming that AVX is available, then two aligned, padded, 3x3 double
// precision matrices can be added via three SIMD addition operations:
\code
using blaze::CustomMatrix;
using blaze::Deallocate;
using blaze::allocate;
using blaze::aligned;
using blaze::padded;
using CustomType = CustomMatrix<double,aligned,padded>;
std::unique_ptr<double[],Deallocate> memory1( allocate<double>( 12UL ) );
std::unique_ptr<double[],Deallocate> memory2( allocate<double>( 12UL ) );
std::unique_ptr<double[],Deallocate> memory3( allocate<double>( 12UL ) );
// Creating padded custom 3x3 matrix with an additional padding element in each row
CustomType A( memory1.get(), 3UL, 3UL, 4UL );
CustomType B( memory2.get(), 3UL, 3UL, 4UL );
CustomType C( memory3.get(), 3UL, 3UL, 4UL );
// ... Initialization
C = A + B; // AVX-based matrix addition
\endcode
// In this example, maximum performance is possible. However, in case no padding elements are
// inserted a scalar addition has to be used:
\code
using blaze::CustomMatrix;
using blaze::Deallocate;
using blaze::allocate;
using blaze::aligned;
using blaze::unpadded;
using CustomType = CustomMatrix<double,aligned,unpadded>;
std::unique_ptr<double[],Deallocate> memory1( allocate<double>( 9UL ) );
std::unique_ptr<double[],Deallocate> memory2( allocate<double>( 9UL ) );
std::unique_ptr<double[],Deallocate> memory3( allocate<double>( 9UL ) );
// Creating unpadded custom 3x3 matrix
CustomType A( memory1.get(), 3UL, 3UL );
CustomType B( memory2.get(), 3UL, 3UL );
CustomType C( memory3.get(), 3UL, 3UL );
// ... Initialization
C = A + B; // Scalar matrix addition
\endcode
// Note that the construction of padded and unpadded aligned matrices looks identical. However,
// in case of padded matrices, \b Blaze will zero initialize the padding element and use them
// in all computations in order to achieve maximum performance. In case of an unpadded matrix
// \b Blaze will ignore the elements with the downside that it is not possible to load a complete
// row to an AVX register, which makes it necessary to fall back to a scalar addition.
//
// The number of padding elements is required to be sufficient with respect to the available
// instruction set: In case of an aligned padded custom matrix the added padding elements must
// guarantee that the total number of elements in each row/column is a multiple of the SIMD
// vector width. In case of an unaligned padded matrix the number of padding elements can be
// greater or equal the number of padding elements of an aligned padded custom matrix. In case
// the padding is insufficient with respect to the available instruction set, a
// \c std::invalid_argument exception is thrown.
//
//
// \n \subsection matrix_types_uniform_matrix UniformMatrix
//
// The blaze::UniformMatrix class template is the representation of an arbitrary sized uniform
// matrix with elements of arbitrary type. It can be included via the header files
\code
#include <blaze/Blaze.h>
// or
#include <blaze/Math.h>
// or
#include <blaze/math/UniformMatrix.h>
\endcode
// and forward declared via the header file
\code
#include <blaze/Forward.h>
\endcode
// The type of the elements, the storage order, and the group tag of the matrix can be specified
// via the three template parameters:
\code
namespace blaze {
template< typename Type, bool SO, typename Tag >
class UniformMatrix;
} // namespace blaze
\endcode
// - \c Type: specifies the type of the matrix elements. UniformMatrix can be used with any
// non-cv-qualified, non-reference element type.
// - \c SO : specifies the storage order (\c blaze::rowMajor, \c blaze::columnMajor) of the
// matrix. The default value is \c blaze::defaultStorageOrder.
// - \c Tag : optional type parameter to tag the matrix. The default type is \c blaze::Group0.
// See \ref grouping_tagging for details.
//
// The blaze::UniformVector is the best choice for uniform matrices of any size. The number of
// rows and columns can be modified at runtime:
\code
// Definition of a 3x4 integral row-major matrix
blaze::UniformMatrix<int> A( 3UL, 4UL );
// Definition of a 4x6 single precision row-major matrix
blaze::UniformMatrix<float,blaze::rowMajor> B( 4UL, 6UL );
// Definition of a double precision column-major matrix with 0 rows and columns
blaze::UniformMatrix<double,blaze::columnMajor> C;
\endcode
// \n \section matrix_types_sparse_matrices Sparse Matrices
// <hr>
//
// \subsection matrix_types_compressed_matrix CompressedMatrix
//
// The blaze::CompressedMatrix class template is the representation of an arbitrary sized sparse
// matrix with \f$ M \cdot N \f$ dynamically allocated elements of arbitrary type. It can be
// included via the header files
\code
#include <blaze/Blaze.h>
// or
#include <blaze/Math.h>
// or
#include <blaze/math/CompressedMatrix.h>
\endcode
// and forward declared via the header file
\code
#include <blaze/Forward.h>
\endcode
// The type of the elements, the storage order, and the group tag of the matrix can be specified
// via the three template parameters:
\code
namespace blaze {
template< typename Type, bool SO, typename Tag >
class CompressedMatrix;
} // namespace blaze
\endcode
// - \c Type: specifies the type of the matrix elements. CompressedMatrix can be used with
// any non-cv-qualified, non-reference, non-pointer element type.
// - \c SO : specifies the storage order (\c blaze::rowMajor, \c blaze::columnMajor) of the
// matrix. The default value is \c blaze::defaultStorageOrder.
// - \c Tag : optional type parameter to tag the matrix. The default type is \c blaze::Group0.
// See \ref grouping_tagging for details.
//
// The blaze::CompressedMatrix is the right choice for all kinds of sparse matrices:
\code
// Definition of a 3x4 integral row-major matrix
blaze::CompressedMatrix<int> A( 3UL, 4UL );
// Definition of a 4x6 single precision row-major matrix
blaze::CompressedMatrix<float,blaze::rowMajor> B( 4UL, 6UL );
// Definition of a double precision column-major matrix with 0 rows and columns
blaze::CompressedMatrix<double,blaze::columnMajor> C;
\endcode
// \n \subsection matrix_types_identity_matrix IdentityMatrix
//
// The blaze::IdentityMatrix class template is the representation of an immutable, arbitrary
// sized identity matrix with \f$ N \cdot N \f$ elements of arbitrary type. It can be included
// via the header files
\code
#include <blaze/Blaze.h>
// or
#include <blaze/Math.h>
// or
#include <blaze/math/IdentityMatrix.h>
\endcode
// and forward declared via the header file
\code
#include <blaze/Forward.h>
\endcode
// The type of the elements and the storage order of the matrix can be specified via the three
// template parameters:
\code
namespace blaze {
template< typename Type, bool SO >
class IdentityMatrix;
} // namespace blaze
\endcode
// - \c Type: specifies the type of the matrix elements. IdentityMatrix can be used with any
// non-cv-qualified, non-reference, non-pointer element type.
// - \c SO : specifies the storage order (\c blaze::rowMajor, \c blaze::columnMajor) of the
// matrix. The default value is \c blaze::defaultStorageOrder.
// - \c Tag : optional type parameter to tag the matrix. The default type is \c blaze::Group0.
// See \ref grouping_tagging for details.
//
// The blaze::IdentityMatrix is the perfect choice to represent an identity matrix:
\code
// Definition of a 3x3 integral row-major identity matrix
blaze::IdentityMatrix<int> A( 3UL );
// Definition of a 6x6 single precision row-major identity matrix
blaze::IdentityMatrix<float,blaze::rowMajor> B( 6UL );
// Definition of a double precision column-major identity matrix with 0 rows and columns
blaze::IdentityMatrix<double,blaze::columnMajor> C;
\endcode
// \n \subsection matrix_types_zero_matrix ZeroMatrix
//
// The blaze::ZeroMatrix class template is the representation of an immutable, arbitrary sized
// zero matrix with \f$ M \cdot N \f$ elements of arbitrary type. It can be included via the
// header files
\code
#include <blaze/Blaze.h>
// or
#include <blaze/Math.h>
// or
#include <blaze/math/ZeroMatrix.h>
\endcode
// and forward declared via the header file
\code
#include <blaze/Forward.h>
\endcode
// The type of the elements, the storage order, and the group tag of the matrix can be specified
// via the three template parameters:
\code
namespace blaze {
template< typename Type, bool SO, typename Tag >
class ZeroMatrix;
} // namespace blaze
\endcode
// - \c Type: specifies the type of the matrix elements. ZeroMatrix can be used with any
// non-cv-qualified, non-reference, non-pointer element type.
// - \c SO : specifies the storage order (\c blaze::rowMajor, \c blaze::columnMajor) of the
// matrix. The default value is \c blaze::defaultStorageOrder.
// - \c Tag : optional type parameter to tag the matrix. The default type is \c blaze::Group0.
// See \ref grouping_tagging for details.
//
// The blaze::ZeroMatrix is the perfect choice to represent a zero matrix:
\code
// Definition of a 3x5 integral row-major zero matrix
blaze::ZeroMatrix<int> A( 3UL, 5UL );
// Definition of a 6x4 single precision row-major zero matrix
blaze::ZeroMatrix<float,blaze::rowMajor> B( 6UL, 4UL );
// Definition of a double precision column-major zero matrix with 0 rows and columns
blaze::ZeroMatrix<double,blaze::columnMajor> C;
\endcode
// \n Previous: \ref matrices Next: \ref matrix_operations
*/
//*************************************************************************************************
//**Matrix Operations******************************************************************************
/*!\page matrix_operations Matrix Operations
//
// \tableofcontents
//
//
// \n \section matrix_operations_constructors Constructors
// <hr>
//
// Matrices are just as easy and intuitive to create as vectors. Still, there are a few rules
// to be aware of:
// - In case the last template parameter (the storage order) is omitted, the matrix is per
// default stored in row-major order.
// - The elements of a \c StaticMatrix or \c HybridMatrix are default initialized (i.e. built-in
// data types are initialized to 0, class types are initialized via the default constructor).
// - Newly allocated elements of a \c DynamicMatrix or \c CompressedMatrix remain uninitialized
// if they are of built-in type and are default constructed if they are of class type.
//
// \n \subsection matrix_operations_default_construction Default Construction
\code
using blaze::StaticMatrix;
using blaze::DynamicMatrix;
using blaze::CompressedMatrix;
// All matrices can be default constructed. Whereas the size of
// a StaticMatrix is fixed via the second and third template
// parameter, the initial size of a constructed DynamicMatrix
// or CompressedMatrix is 0.
StaticMatrix<int,2UL,2UL> M1; // Instantiation of a 2x2 integer row-major
// matrix. All elements are initialized to 0.
DynamicMatrix<float> M2; // Instantiation of a single precision dynamic
// row-major matrix with 0 rows and 0 columns.
DynamicMatrix<double,columnMajor> M3; // Instantiation of a double precision dynamic
// column-major matrix with 0 rows and 0 columns.
CompressedMatrix<int> M4; // Instantiation of a compressed integer
// row-major matrix of size 0x0.
CompressedMatrix<double,columnMajor> M5; // Instantiation of a compressed double precision
// column-major matrix of size 0x0.
\endcode
// \n \subsection matrix_operations_size_construction Construction with Specific Size
//
// The \c DynamicMatrix, \c HybridMatrix, and \c CompressedMatrix classes offer a constructor
// that allows to immediately give the matrices a specific number of rows and columns:
\code
DynamicMatrix<int> M6( 5UL, 4UL ); // Instantiation of a 5x4 dynamic row-major
// matrix. The elements are not initialized.
HybridMatrix<double,5UL,9UL> M7( 3UL, 7UL ); // Instantiation of a 3x7 hybrid row-major
// matrix. The elements are not initialized.
CompressedMatrix<float,columnMajor> M8( 8UL, 6UL ); // Instantiation of an empty 8x6 compressed
// column-major matrix.
\endcode
// Note that dense matrices (in this case \c DynamicMatrix and \c HybridMatrix) immediately
// allocate enough capacity for all matrix elements. Sparse matrices on the other hand (in this
// example \c CompressedMatrix) merely acquire the size, but don't necessarily allocate memory.
//
//
// \n \subsection matrix_operations_initialization_constructors Initialization Constructors
//
// All dense matrix classes offer a constructor for a direct, homogeneous initialization of all
// matrix elements. In contrast, for sparse matrices the predicted number of non-zero elements
// can be specified.
\code
StaticMatrix<int,4UL,3UL,columnMajor> M9( 7 ); // Instantiation of a 4x3 integer column-major
// matrix. All elements are initialized to 7.
DynamicMatrix<float> M10( 2UL, 5UL, 2.0F ); // Instantiation of a 2x5 single precision row-major
// matrix. All elements are initialized to 2.0F.
CompressedMatrix<int> M11( 3UL, 4UL, 4 ); // Instantiation of a 3x4 integer row-major
// matrix with capacity for 4 non-zero elements.
\endcode
// \n \subsection matrix_operations_array_construction Array Construction
//
// Alternatively, all dense matrix classes offer a constructor for an initialization with a dynamic
// or static array, or with a \c std::array. If the matrix is initialized from a dynamic array, the
// constructor expects the dimensions of values provided by the array as first and second argument,
// the array as third argument. In case of a static array or \c std::array, the fixed size of the
// array is used:
\code
const std::unique_ptr<double[]> array1( new double[6] );
// ... Initialization of the dynamic array
blaze::StaticMatrix<double,2UL,3UL> M12( 2UL, 3UL, array1.get() );
int array2[2][2] = { { 4, -5 }, { -6, 7 } };
blaze::StaticMatrix<int,2UL,2UL,rowMajor> M13( array2 );
const std::array<std::array<float,3UL>,2UL> array3{ { { 1, 2, 3 }, { 4, 5, 6 } } };
blaze::StaticMatrix<int,2UL,3UL> M14( array3 );
\endcode
// \n \subsection matrix_operations_initializer_list_construction
//
// In addition, all dense and sparse matrix classes can be directly initialized by means of an
// initializer list:
\code
blaze::DynamicMatrix<float,columnMajor> M15{ { 3.1F, 6.4F },
{ -0.9F, -1.2F },
{ 4.8F, 0.6F } };
blaze::CompressedMatrix<int,rowMajor> M16{ { 3 },
{ 1 },
{ 0, 2 } };
\endcode
// Dynamically sized matrices (such as e.g. \ref matrix_types_hybrid_matrix,
// \ref matrix_types_dynamic_matrix or \ref matrix_types_compressed_matrix) are sized according
// to the size of the initializer list and all their elements are (copy) assigned the values of
// the list. For fixed size matrices (such as e.g. \ref matrix_types_static_matrix) missing values
// are initialized as default and in case the size of the top-level initializer list does not
// match the number of rows of the matrix or the size of any nested list exceeds the number of
// columns, a \c std::invalid_argument exception is thrown. In case of sparse matrices, only
// the non-zero elements are used to initialize the matrix.
//
// \n \subsection matrix_operations_copy_construction Copy Construction
//
// All dense and sparse matrices can be created as a copy of another dense or sparse matrix.
\code
StaticMatrix<int,5UL,4UL,rowMajor> M17( M6 ); // Instantiation of the dense row-major matrix M16
// as copy of the dense row-major matrix M6.
DynamicMatrix<float,columnMajor> M18( M8 ); // Instantiation of the dense column-major matrix M17
// as copy of the sparse column-major matrix M8.
CompressedMatrix<double,columnMajor> M19( M7 ); // Instantiation of the compressed column-major matrix
// M18 as copy of the dense row-major matrix M7.
CompressedMatrix<float,rowMajor> M20( M8 ); // Instantiation of the compressed row-major matrix
// M19 as copy of the compressed column-major matrix M8.
\endcode
// Note that it is not possible to create a \c StaticMatrix as a copy of a matrix with a different
// number of rows and/or columns:
\code
StaticMatrix<int,4UL,5UL,rowMajor> M21( M6 ); // Runtime error: Number of rows and columns
// does not match!
StaticMatrix<int,4UL,4UL,columnMajor> M22( M9 ); // Compile time error: Number of columns does
// not match!
\endcode
// \n \section matrix_operations_assignment Assignment
// <hr>
//
// There are several types of assignment to dense and sparse matrices:
// \ref matrix_operations_homogeneous_assignment, \ref matrix_operations_array_assignment,
// \ref matrix_operations_copy_assignment, and \ref matrix_operations_compound_assignment.
//
//
// \n \subsection matrix_operations_homogeneous_assignment Homogeneous Assignment
//
// It is possible to assign the same value to all elements of a dense matrix. All dense matrix
// classes provide an according assignment operator:
\code
blaze::StaticMatrix<int,3UL,2UL> M1;
blaze::DynamicMatrix<double> M2;
// Setting all integer elements of the StaticMatrix to 4
M1 = 4;
// Setting all double precision elements of the DynamicMatrix to 3.5
M2 = 3.5
\endcode
// \n \subsection matrix_operations_array_assignment Array Assignment
//
// Dense matrices can also be assigned a static array:
\code
blaze::StaticMatrix<int,2UL,2UL,rowMajor> M1;
blaze::StaticMatrix<int,2UL,2UL,columnMajor> M2;
blaze::DynamicMatrix<double> M3;
int array1[2][2] = { { 1, 2 }, { 3, 4 } };
double array2[3][2] = { { 3.1, 6.4 }, { -0.9, -1.2 }, { 4.8, 0.6 } };
M1 = array1;
M2 = array1;
M3 = array2;
\endcode
// Note that the dimensions of the static array have to match the size of a \c StaticMatrix,
// whereas a \c DynamicMatrix is resized according to the array dimensions:
\f$ M3 = \left(\begin{array}{*{2}{c}}
3.1 & 6.4 \\
-0.9 & -1.2 \\
4.8 & 0.6 \\
\end{array}\right)\f$
// \n \subsection matrix_operations_initializer_list_assignment Initializer List Assignment
//
// Alternatively, it is possible to directly assign an initializer list to a dense or sparse
// matrix:
\code
blaze::DynamicMatrix<double> M1;
blaze::CompressedMatrix<int> M2;
M1 = { { 3.1, 6.4 }, { -0.9, -1.2 }, { 4.8, 0.6 } };
M2 = { { 1, 0 }, {}, { 0, 1 }, { 2 } };
\endcode
// Dynamically sized matrices (such as e.g. \ref matrix_types_hybrid_matrix,
// \ref matrix_types_dynamic_matrix or \ref matrix_types_compressed_matrix) are resized according
// to the size of the initializer list and all their elements are (copy) assigned the values of
// the list. For fixed size matrices (such as e.g. \ref matrix_types_static_matrix) missing values
// are reset to their default value and in case the size of the top-level initializer list does
// not match the number of rows of the matrix or the size of any nested list exceeds the number
// of columns, a \c std::invalid_argument exception is thrown. In case of sparse matrices, only
// the non-zero elements are considered.
//
// \n \subsection matrix_operations_copy_assignment Copy Assignment
//
// All kinds of matrices can be assigned to each other. The only restriction is that since a
// \c StaticMatrix cannot change its size, the assigned matrix must match both in the number of
// rows and in the number of columns.
\code
blaze::StaticMatrix<int,3UL,2UL,rowMajor> M1;
blaze::DynamicMatrix<int,rowMajor> M2( 3UL, 2UL );
blaze::DynamicMatrix<float,rowMajor> M3( 5UL, 2UL );
blaze::CompressedMatrix<int,rowMajor> M4( 3UL, 2UL );
blaze::CompressedMatrix<float,columnMajor> M5( 3UL, 2UL );
// ... Initialization of the matrices
M1 = M2; // OK: Assignment of a 3x2 dense row-major matrix to another 3x2 dense row-major matrix
M1 = M4; // OK: Assignment of a 3x2 sparse row-major matrix to a 3x2 dense row-major matrix
M1 = M3; // Runtime error: Cannot assign a 5x2 matrix to a 3x2 static matrix
M1 = M5; // OK: Assignment of a 3x2 sparse column-major matrix to a 3x2 dense row-major matrix
\endcode
// \n \subsection matrix_operations_compound_assignment Compound Assignment
//
// Compound assignment is also available for matrices: addition assignment, subtraction assignment,
// and multiplication assignment. In contrast to plain assignment, however, the number of rows
// and columns of the two operands have to match according to the arithmetic operation.
\code
blaze::StaticMatrix<int,2UL,3UL,rowMajor> M1;
blaze::DynamicMatrix<int,rowMajor> M2( 2UL, 3UL );
blaze::CompressedMatrix<float,columnMajor> M3( 2UL, 3UL );
blaze::CompressedMatrix<float,rowMajor> M4( 2UL, 4UL );
blaze::StaticMatrix<float,2UL,4UL,rowMajor> M5;
blaze::CompressedMatrix<float,rowMajor> M6( 3UL, 2UL );
// ... Initialization of the matrices
M1 += M2; // OK: Addition assignment between two row-major matrices of the same dimensions
M1 -= M3; // OK: Subtraction assignment between between a row-major and a column-major matrix
M1 += M4; // Runtime error: No compound assignment between matrices of different size
M1 -= M5; // Compilation error: No compound assignment between matrices of different size
M2 *= M6; // OK: Multiplication assignment between two row-major matrices
\endcode
// Note that the multiplication assignment potentially changes the number of columns of the
// target matrix:
\f$\left(\begin{array}{*{3}{c}}
2 & 0 & 1 \\
0 & 3 & 2 \\
\end{array}\right) \times
\left(\begin{array}{*{2}{c}}
4 & 0 \\
1 & 0 \\
0 & 3 \\
\end{array}\right) =
\left(\begin{array}{*{2}{c}}
8 & 3 \\
3 & 6 \\
\end{array}\right)\f$
// Since a \c StaticMatrix cannot change its size, only a square StaticMatrix can be used in a
// multiplication assignment with other square matrices of the same dimensions.
//
//
// \n \section matrix_operations_element_access Element Access
// <hr>
//
// \subsection matrix_operations_function_call_operator_1 Function Call Operator
//
// The easiest way to access a specific dense or sparse matrix element is via the function call
// operator. The indices to access a matrix are zero-based:
\code
blaze::DynamicMatrix<int> M1( 4UL, 6UL );
M1(0,0) = 1;
M1(0,1) = 3;
// ...
blaze::CompressedMatrix<double> M2( 5UL, 3UL );
M2(0,2) = 4.1;
M2(1,1) = -6.3;
\endcode
// Since dense matrices allocate enough memory for all contained elements, using the function
// call operator on a dense matrix directly returns a reference to the accessed value. In case
// of a sparse matrix, if the accessed value is currently not contained in the matrix, the
// value is inserted into the matrix prior to returning a reference to the value, which can
// be much more expensive than the direct access to a dense matrix. Consider the following
// example:
\code
blaze::CompressedMatrix<int> M1( 4UL, 4UL );
for( size_t i=0UL; i<M1.rows(); ++i ) {
for( size_t j=0UL; j<M1.columns(); ++j ) {
... = M1(i,j);
}
}
\endcode
// Although the compressed matrix is only used for read access within the for loop, using the
// function call operator temporarily inserts 16 non-zero elements into the matrix. Therefore
// the preferred way to traverse the non-zero elements of a sparse matrix is to use iterators.
//
// \n \subsection matrix_operations_iterators Iterators
//
// An alternate way to traverse the elements contained in a dense or sparse matrix is by means
// of iterators. For that purpose, all matrices provide the \c begin(), \c cbegin(), \c end(),
// and \c cend() members functions. Note that it is not possible to traverse all elements of the
// matrix, but that it is only possible to traverse elements in a row-wise fashion (in case of
// a row-major matrix) or in a column-wise fashion (in case of a column-major matrix). In case of
// non-const matrices, \c begin() and \c end() return an \c Iterator, which allows a manipulation
// of the (non-zero) value. In case of a constant matrix or in case \c cbegin() or \c cend() are
// used a \c ConstIterator is returned. Iterators on dense matrices traverse all elements of the
// matrix, including the zero elements. Iterators on sparse matrices only traverse the non-zero
// elements.
//
// The following two examples demonstrate how to traverse the elements of a dense and sparse
// matrix, respectively:
\code
using blaze::DynamicMatrix;
using blaze::rowMajor;
using blaze::columnMajor;
DynamicMatrix<int,rowMajor> M1( 4UL, 6UL );
DynamicMatrix<int,columnMajor> M2( 4UL, 6UL );
// Traversing all elements contained in the row-major matrix by Iterator
for( size_t i=0UL; i<M1.rows(); ++i ) {
for( DynamicMatrix<int,rowMajor>::Iterator it=M1.begin(i); it!=M1.end(i); ++it ) {
*it = ...; // OK: Write access to the value of the element.
... = *it; // OK: Read access to the value of the element.
}
}
// Traversing all elements contained in the column-major matrix by ConstIterator
for( size_t j=0UL; j<M2.columns(); ++j ) {
for( DynamicMatrix<int,columnMajor>::ConstIterator it=M2.cbegin(j); it!=M2.cend(j); ++it ) {
*it = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid.
... = *it; // OK: Read access to the value of the element.
}
}
\endcode
\code
using blaze::CompressedMatrix;
using blaze::rowMajor;
using blaze::columnMajor;
CompressedMatrix<int,rowMajor> M3( 4UL, 6UL );
CompressedMatrix<int,columnMajor> M4( 4UL, 6UL );
// Traversing the non-zero elements contained in the row-major matrix by Iterator
for( size_t i=0UL; i<M3.rows(); ++i ) {
for( CompressedMatrix<int,rowMajor>::Iterator it=M3.begin(i); it!=M3.end(i); ++it ) {
it->value() = ...; // OK: Write access to the value of the non-zero element.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the non-zero element.
}
}
// Traversing the non-zero elements contained in the column-major matrix by ConstIterator
for( size_t j=0UL; j<M4.columns(); ++j ) {
for( CompressedMatrix<int,columnMajor>::ConstIterator it=M4.cbegin(j); it!=M4.cend(j); ++it ) {
it->value() = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the non-zero element.
}
}
\endcode
// Note that \c begin(), \c cbegin(), \c end(), and \c cend() are also available as free functions:
\code
for( size_t i=0UL; i<M3.rows(); ++i ) {
for( CompressedMatrix<int,rowMajor>::Iterator it=begin( M3, i ); it!=end( M3, i ); ++it ) {
// ...
}
}
for( size_t j=0UL; j<M4.columns(); ++j ) {
for( CompressedMatrix<int,columnMajor>::ConstIterator it=cbegin( M4, j ); it!=cend( M4, j ); ++it ) {
// ...
}
}
\endcode
// \n \subsection matrix_operations_data .data() / data()
//
// Sometimes it is necessary to acquire a pointer to the first element of the underlying array
// of a dense matrix. For that purpose the \c data() member function or the free \c data() function
// can be used:
\code
// Instantiating a dynamic vector with 10 elements
blaze::DynamicMatrix<int> A( 5UL, 7UL );
A.data(); // Returns a pointer to the first element of the dynamic matrix
data( A ); // Same effect as the member function
\endcode
// Note that you can NOT assume that all matrix elements lie adjacent to each other! The dense
// matrix may use techniques such as padding to improve the alignment of the data. Whereas the
// number of elements within a row/column are given by the \ref matrix_operations_rows "rows()" and
// \ref matrix_operations_columns "columns()" functions, respectively, the total number of elements including
// padding is given by the \ref matrix_operations_spacing "spacing()" function.
//
//
// \n \section matrix_operations_element_insertion Element Insertion
// <hr>
//
// Whereas a dense matrix always provides enough capacity to store all matrix elements, a sparse
// matrix only stores the non-zero elements. Therefore it is necessary to explicitly add elements
// to the matrix.
//
// \n \subsection matrix_operations_function_call_operator_2 Function Call Operator
//
// The first possibility to add elements to a sparse matrix is the function call operator:
\code
using blaze::CompressedMatrix;
CompressedMatrix<int> M1( 3UL, 4UL );
M1(1,2) = 9;
\endcode
// In case the element at the given position is not yet contained in the sparse matrix, it is
// automatically inserted. Otherwise the old value is replaced by the new value 2. The operator
// returns a reference to the sparse vector element.
//
// \n \subsection matrix_operations_set .set()
//
// An alternative to the function call operator is the \c set() function: In case the element is
// not yet contained in the matrix the element is inserted, else the element's value is modified:
\code
// Insert or modify the value at position (2,0)
M1.set( 2, 0, 1 );
\endcode
// \n \subsection matrix_operations_insert .insert()
// The insertion of elements can be better controlled via the \c insert() function. In contrast
// to the function call operator and the \c set() function it emits an exception in case the
// element is already contained in the matrix. In order to check for this case, the \c find()
// function can be used:
\code
// In case the element at position (2,3) is not yet contained in the matrix it is inserted
// with a value of 4.
if( M1.find( 2, 3 ) == M1.end( 2 ) )
M1.insert( 2, 3, 4 );
\endcode
// \n \subsection matrix_operations_append .append()
//
// Although the \c insert() function is very flexible, due to performance reasons it is not
// suited for the setup of large sparse matrices. A very efficient, yet also very low-level
// way to fill a sparse matrix is the \c append() function. It requires the sparse matrix to
// provide enough capacity to insert a new element in the specified row/column. Additionally,
// the index of the new element must be larger than the index of the previous element in the
// same row/column. Violating these conditions results in undefined behavior!
\code
M1.reserve( 0, 3 ); // Reserving space for three non-zero elements in row 0
M1.append( 0, 1, 2 ); // Appending the element 2 in row 0 at column index 1
M1.append( 0, 2, -4 ); // Appending the element -4 in row 0 at column index 2
// ...
\endcode
// The most efficient way to fill a sparse matrix with elements, however, is a combination of
// \c reserve(), \c append(), and the \c finalize() function:
\code
// Setup of the compressed row-major matrix
//
// ( 0 1 0 2 0 )
// A = ( 0 0 0 0 0 )
// ( 3 0 0 0 0 )
//
blaze::CompressedMatrix<int> M1( 3UL, 5UL );
M1.reserve( 3 ); // Reserving enough space for 3 non-zero elements
M1.append( 0, 1, 1 ); // Appending the value 1 in row 0 with column index 1
M1.append( 0, 3, 2 ); // Appending the value 2 in row 0 with column index 3
M1.finalize( 0 ); // Finalizing row 0
M1.finalize( 1 ); // Finalizing the empty row 1 to prepare row 2
M1.append( 2, 0, 3 ); // Appending the value 3 in row 2 with column index 0
M1.finalize( 2 ); // Finalizing row 2
\endcode
// \note The \c finalize() function has to be explicitly called for each row or column, even
// for empty ones!
// \note Although \c append() does not allocate new memory, it still invalidates all iterators
// returned by the \c end() functions!
//
//
// \n \section matrix_operations_element_removal Element Removal
// <hr>
//
// \subsection matrix_operations_erase .erase()
//
// The \c erase() member functions can be used to remove elements from a sparse matrix. The
// following example gives an impression of the five different flavors of \c erase():
\code
using blaze::CompressedMatrix;
CompressedMatrix<int,rowMajor> A( 42, 53 );
// ... Initialization of the matrix
// Erasing the element at position (21,23)
A.erase( 21, 23 );
// Erasing a single element in row 17 via iterator
A.erase( 17, A.find( 4 ) );
// Erasing all non-zero elements in the range [7..24] of row 33
A.erase( 33, A.lowerBound( 33, 7 ), A.upperBound( 33, 24 ) );
// Erasing all non-zero elements with a value larger than 9 by passing a unary predicate
A.erase( []( int i ){ return i > 9; } );
// Erasing all non-zero elements in the range [30..40] of row 37 with a value larger than 5
CompressedMatrix<int,rowMajor>::Iterator pos1( A.lowerBound( 37, 30 ) );
CompressedMatrix<int,rowMajor>::Iterator pos2( A.upperBound( 37, 40 ) );
A.erase( 37, pos1, pos2, []( int i ){ return i > 5; } );
\endcode
// \n \section matrix_operations_element_lookup Element Lookup
// <hr>
//
// A sparse matrix only stores the non-zero elements contained in the matrix. Therefore, whenever
// accessing a matrix element at a specific position a lookup operation is required. Whereas the
// function call operator is performing this lookup automatically, it is also possible to use the
// \c find(), \c lowerBound(), and \c upperBound() member functions for a manual lookup.
//
// \n \subsection matrix_operations_find .find() / find()
//
// The \c find() function can be used to check whether a specific element is contained in the
// sparse matrix. It specifically searches for the element at the specified position. In case
// the element is found, the function returns an iterator to the element. Otherwise an iterator
// just past the last non-zero element of the according row or column (the \c end() iterator)
// is returned. Note that the returned iterator is subject to invalidation due to inserting
// operations via the function call operator, the \c set() function or the \c insert() function!
\code
using blaze::CompressedMatrix;
CompressedMatrix<int,rowMajor> A( 42, 53 );
// ... Initialization of the matrix
// Searching the element at position (7,17). In case the element is not
// contained in the vector, the end() iterator of row 7 is returned.
CompressedMatrix<int,rowMajor>::Iterator pos( A.find( 7, 17 ) );
if( pos != A.end( 7 ) ) {
// ...
}
\endcode
// Alternatively, the free function \c find() can be used to find a specific element in a sparse
// matrix:
\code
find( A, 7, 17 ); // Searching the element at position (7,17); same effect as the member function
\endcode
// \n \subsection matrix_operations_lowerbound .lowerBound() / lowerBound()
//
// In case of a row-major matrix, this function returns a row iterator to the first element with
// an index not less then the given column index. In case of a column-major matrix, the function
// returns a column iterator to the first element with an index not less then the given row
// index. In combination with the \c upperBound() function this function can be used to create a
// pair of iterators specifying a range of indices. Note that the returned iterator is subject
// to invalidation due to inserting operations via the function call operator, the \c set()
// function or the \c insert() function!
\code
using blaze::CompressedMatrix;
CompressedMatrix<int,rowMajor> A( 42, 53 );
// ... Initialization of the matrix
// Searching the lower bound of column index 17 in row 7.
CompressedMatrix<int,rowMajor>::Iterator pos1( A.lowerBound( 7, 17 ) );
// Searching the upper bound of column index 28 in row 7
CompressedMatrix<int,rowMajor>::Iterator pos2( A.upperBound( 7, 28 ) );
// Erasing all elements in the specified range
A.erase( 7, pos1, pos2 );
\endcode
// Alternatively, the free function \c lowerBound() can be used to:
\code
lowerBound( A, 7, 17 ); // Searching the lower bound of (7,17); same effect as the member function
\endcode
// \n \subsection matrix_operations_upperbound .upperBound() / upperBound()
//
// In case of a row-major matrix, this function returns a row iterator to the first element with
// an index greater then the given column index. In case of a column-major matrix, the function
// returns a column iterator to the first element with an index greater then the given row
// index. In combination with the \c lowerBound() function this function can be used to create a
// pair of iterators specifying a range of indices. Note that the returned iterator is subject
// to invalidation due to inserting operations via the function call operator, the \c set()
// function or the \c insert() function!
\code
using blaze::CompressedMatrix;
CompressedMatrix<int,columnMajor> A( 42, 53 );
// ... Initialization of the matrix
// Searching the lower bound of row index 17 in column 9.
CompressedMatrix<int,columnMajor>::Iterator pos1( A.lowerBound( 17, 9 ) );
// Searching the upper bound of row index 28 in column 9
CompressedMatrix<int,columnMajor>::Iterator pos2( A.upperBound( 28, 9 ) );
// Erasing all elements in the specified range
A.erase( 9, pos1, pos2 );
\endcode
// Alternatively, the free function \c upperBound() can be used to:
\code
upperBound( A, 28, 9 ); // Searching the upper bound of (28,9); same effect as the member function
\endcode
// \n \section matrix_operations_non_modifying_operations Non-Modifying Operations
// <hr>
//
// \subsection matrix_operations_rows .rows() / rows()
//
// The current number of rows of a matrix can be acquired via the \c rows() member function:
\code
// Instantiating a dynamic matrix with 10 rows and 8 columns
blaze::DynamicMatrix<int> M1( 10UL, 8UL );
M1.rows(); // Returns 10
// Instantiating a compressed matrix with 8 rows and 12 columns
blaze::CompressedMatrix<double> M2( 8UL, 12UL );
M2.rows(); // Returns 8
\endcode
// Alternatively, the free functions \c rows() can be used to query the current number of rows of
// a matrix. In contrast to the member function, the free function can also be used to query the
// number of rows of a matrix expression:
\code
rows( M1 ); // Returns 10, i.e. has the same effect as the member function
rows( M2 ); // Returns 8, i.e. has the same effect as the member function
rows( M1 * M2 ); // Returns 10, i.e. the number of rows of the resulting matrix
\endcode
// \n \subsection matrix_operations_columns .columns() / columns()
//
// The current number of columns of a matrix can be acquired via the \c columns() member function:
\code
// Instantiating a dynamic matrix with 6 rows and 8 columns
blaze::DynamicMatrix<int> M1( 6UL, 8UL );
M1.columns(); // Returns 8
// Instantiating a compressed matrix with 8 rows and 7 columns
blaze::CompressedMatrix<double> M2( 8UL, 7UL );
M2.columns(); // Returns 7
\endcode
// There is also a free function \c columns() available, which can also be used to query the number
// of columns of a matrix expression:
\code
columns( M1 ); // Returns 8, i.e. has the same effect as the member function
columns( M2 ); // Returns 7, i.e. has the same effect as the member function
columns( M1 * M2 ); // Returns 7, i.e. the number of columns of the resulting matrix
\endcode
// \subsection matrix_operations_size size()
//
// The \c size() function returns the total number of elements of a matrix:
\code
// Instantiating a dynamic matrix with 6 rows and 8 columns
blaze::DynamicMatrix<int> M1( 6UL, 8UL );
size( M1 ); // Returns 48
// Instantiating a compressed matrix with 8 rows and 7 columns
blaze::CompressedMatrix<double> M2( 8UL, 7UL );
size( M2 ); // Returns 56
\endcode
// \subsection matrix_operations_spacing .spacing() / spacing()
//
// The total number of elements of a row or column of a dense matrix, including potential padding
// elements, can be acquired via the \c spacing member function. In case of a row-major matrix
// (i.e. in case the storage order is set to blaze::rowMajor) the function returns the spacing
// between two rows, in case of a column-major matrix (i.e. in case the storage flag is set to
// blaze::columnMajor) the function returns the spacing between two columns:
\code
// Instantiating a row-major dynamic matrix with 7 rows and 8 columns
blaze::DynamicMatrix<int,blaze::rowMajor> M1( 7UL, 8UL );
M1.spacing(); // Returns the total number of elements in a row
// Instantiating a column-major dynamic matrix with 8 rows and 12 columns
blaze::CompressedMatrix<double> M2( 8UL, 12UL );
M2.spacing(); // Returns the total number of element in a column
\endcode
// Alternatively, the free functions \c spacing() can be used to query the current number of
// elements in a row/column.
\code
spacing( M1 ); // Returns the total number of elements in a row
spacing( M2 ); // Returns the total number of elements in a column
\endcode
// \n \subsection matrix_operations_capacity .capacity() / capacity()
//
// The \c capacity() member function returns the internal capacity of a dense or sparse matrix.
// Note that the capacity of a matrix doesn't have to be equal to the size of a matrix. In case of
// a dense matrix the capacity will always be greater or equal than the total number of elements
// of the matrix. In case of a sparse matrix, the capacity will usually be much less than the
// total number of elements.
\code
blaze::DynamicMatrix<float> M1( 5UL, 7UL );
blaze::StaticMatrix<float,7UL,4UL> M2;
M1.capacity(); // Returns at least 35
M2.capacity(); // Returns at least 28
\endcode
// There is also a free function \c capacity() available to query the capacity. However, please
// note that this function cannot be used to query the capacity of a matrix expression:
\code
capacity( M1 ); // Returns at least 35, i.e. has the same effect as the member function
capacity( M2 ); // Returns at least 28, i.e. has the same effect as the member function
capacity( M1 * M2 ); // Compilation error!
\endcode
// \n \subsection matrix_operations_nonzeros .nonZeros() / nonZeros()
//
// For both dense and sparse matrices the current number of non-zero elements can be queried
// via the \c nonZeros() member function. In case of matrices there are two flavors of the
// \c nonZeros() function: One returns the total number of non-zero elements in the matrix,
// the second returns the number of non-zero elements in a specific row (in case of a row-major
// matrix) or column (in case of a column-major matrix). Sparse matrices directly return their
// number of non-zero elements, dense matrices traverse their elements and count the number of
// non-zero elements.
\code
blaze::DynamicMatrix<int,rowMajor> M1( 3UL, 5UL );
// ... Initializing the dense matrix
M1.nonZeros(); // Returns the total number of non-zero elements in the dense matrix
M1.nonZeros( 2 ); // Returns the number of non-zero elements in row 2
\endcode
\code
blaze::CompressedMatrix<double,columnMajor> M2( 4UL, 7UL );
// ... Initializing the sparse matrix
M2.nonZeros(); // Returns the total number of non-zero elements in the sparse matrix
M2.nonZeros( 3 ); // Returns the number of non-zero elements in column 3
\endcode
// The free \c nonZeros() function can also be used to query the number of non-zero elements in a
// matrix expression. However, the result is not the exact number of non-zero elements, but may be
// a rough estimation:
\code
nonZeros( M1 ); // Has the same effect as the member function
nonZeros( M1, 2 ); // Has the same effect as the member function
nonZeros( M2 ); // Has the same effect as the member function
nonZeros( M2, 3 ); // Has the same effect as the member function
nonZeros( M1 * M2 ); // Estimates the number of non-zero elements in the matrix expression
\endcode
// \n \subsection matrix_operations_isempty isEmpty()
//
// The \c isEmpty() function returns whether the total number of elements of the matrix is zero:
\code
blaze::DynamicMatrix<int> A; // Create an empty matrix
isEmpty( A ); // Returns true
A.resize( 5, 0 ); // Resize to a 5x0 matrix
isEmpty( A ); // Returns true
A.resize( 5, 3 ); // Resize to a 5x3 matrix
isEmpty( A ); // Returns false
\endcode
// \n \subsection matrix_operations_isnan isnan()
//
// The \c isnan() function provides the means to check a dense or sparse matrix for non-a-number
// elements:
\code
blaze::DynamicMatrix<double> A( 3UL, 4UL );
// ... Initialization
if( isnan( A ) ) { ... }
\endcode
\code
blaze::CompressedMatrix<double> A( 3UL, 4UL );
// ... Initialization
if( isnan( A ) ) { ... }
\endcode
// If at least one element of the matrix is not-a-number, the function returns \c true, otherwise
// it returns \c false.
//
//
// \n \subsection matrix_operations_isinf isinf()
//
// The \c isinf() function checks the given dense or sparse matrix for infinite (\c inf) elements:
\code
blaze::DynamicMatrix<double> A( 3UL, 4UL );
// ... Initialization
if( isinf( A ) ) { ... }
\endcode
\code
blaze::CompressedMatrix<double> A( 3UL, 4UL );
// ... Initialization
if( isinf( A ) ) { ... }
\endcode
// If at least one element of the matrix is infinite, the function returns \c true, otherwise it
// returns \c false.
//
//
// \n \subsection matrix_operations_isfinite isfinite()
//
// The \c isfinite() function checks if all elements of the given dense or sparse matrix are
// finite elements (i.e. normal, subnormal or zero elements, but not infinite or NaN):
\code
blaze::DynamicMatrix<double> A( 3UL, 4UL );
// ... Initialization
if( isfinite( A ) ) { ... }
\endcode
\code
blaze::CompressedMatrix<double> A( 3UL, 4UL );
// ... Initialization
if( isfinite( A ) ) { ... }
\endcode
// If all elements of the matrix are finite, the function returns \c true, otherwise it returns
// \c false.
//
//
// \n \subsection matrix_operations_isdefault isDefault()
//
// The \c isDefault() function returns whether the given dense or sparse matrix is in default state:
\code
blaze::HybridMatrix<int,5UL,4UL> A;
// ... Resizing and initialization
if( isDefault( A ) ) { ... }
\endcode
// A matrix is in default state if it appears to just have been default constructed. All resizable
// matrices (\c HybridMatrix, \c DynamicMatrix, or \c CompressedMatrix) and \c CustomMatrix are in
// default state if its size is equal to zero. A non-resizable matrix (\c StaticMatrix and all
// submatrices) is in default state if all its elements are in default state. For instance, in case
// the matrix is instantiated for a built-in integral or floating point data type, the function
// returns \c true in case all matrix elements are 0 and \c false in case any matrix element is
// not 0.
//
//
// \n \subsection matrix_operations_isSquare isSquare()
//
// Whether a dense or sparse matrix is a square matrix (i.e. if the number of rows is equal to the
// number of columns) can be checked via the \c isSquare() function:
\code
blaze::DynamicMatrix<double> A;
// ... Resizing and initialization
if( isSquare( A ) ) { ... }
\endcode
// \n \subsection matrix_operations_issymmetric isSymmetric()
//
// Via the \c isSymmetric() function it is possible to check whether a dense or sparse matrix
// is symmetric:
\code
blaze::DynamicMatrix<float> A;
// ... Resizing and initialization
if( isSymmetric( A ) ) { ... }
\endcode
// Note that non-square matrices are never considered to be symmetric!
//
//
// \n \subsection matrix_operations_isUniform isUniform()
//
// In order to check if all matrix elements are identical, the \c isUniform() function can be used:
\code
blaze::DynamicMatrix<int> A;
// ... Resizing and initialization
if( isUniform( A ) ) { ... }
\endcode
// Note that in case of a sparse matrix also the zero elements are also taken into account!
//
//
// \n \subsection matrix_operations_isZero isZero()
//
// In order to check if all matrix elements are zero, the \c isZero() function can be used:
\code
blaze::DynamicMatrix<int> A;
// ... Resizing and initialization
if( isZero( A ) ) { ... }
\endcode
// \n \subsection matrix_operations_islower isLower()
//
// Via the \c isLower() function it is possible to check whether a dense or sparse matrix is
// lower triangular:
\code
blaze::DynamicMatrix<float> A;
// ... Resizing and initialization
if( isLower( A ) ) { ... }
\endcode
// Note that non-square matrices are never considered to be lower triangular!
//
//
// \n \subsection matrix_operations_isunilower isUniLower()
//
// Via the \c isUniLower() function it is possible to check whether a dense or sparse matrix is
// lower unitriangular:
\code
blaze::DynamicMatrix<float> A;
// ... Resizing and initialization
if( isUniLower( A ) ) { ... }
\endcode
// Note that non-square matrices are never considered to be lower unitriangular!
//
//
// \n \subsection matrix_operations_isstrictlylower isStrictlyLower()
//
// Via the \c isStrictlyLower() function it is possible to check whether a dense or sparse matrix
// is strictly lower triangular:
\code
blaze::DynamicMatrix<float> A;
// ... Resizing and initialization
if( isStrictlyLower( A ) ) { ... }
\endcode
// Note that non-square matrices are never considered to be strictly lower triangular!
//
//
// \n \subsection matrix_operations_isUpper isUpper()
//
// Via the \c isUpper() function it is possible to check whether a dense or sparse matrix is
// upper triangular:
\code
blaze::DynamicMatrix<float> A;
// ... Resizing and initialization
if( isUpper( A ) ) { ... }
\endcode
// Note that non-square matrices are never considered to be upper triangular!
//
//
// \n \subsection matrix_operations_isuniupper isUniUpper()
//
// Via the \c isUniUpper() function it is possible to check whether a dense or sparse matrix is
// upper unitriangular:
\code
blaze::DynamicMatrix<float> A;
// ... Resizing and initialization
if( isUniUpper( A ) ) { ... }
\endcode
// Note that non-square matrices are never considered to be upper unitriangular!
//
//
// \n \subsection matrix_operations_isstrictlyupper isStrictlyUpper()
//
// Via the \c isStrictlyUpper() function it is possible to check whether a dense or sparse matrix
// is strictly upper triangular:
\code
blaze::DynamicMatrix<float> A;
// ... Resizing and initialization
if( isStrictlyUpper( A ) ) { ... }
\endcode
// Note that non-square matrices are never considered to be strictly upper triangular!
//
//
// \n \subsection matrix_operations_isdiagonal isDiagonal()
//
// The \c isDiagonal() function checks if the given dense or sparse matrix is a diagonal matrix,
// i.e. if it has only elements on its diagonal and if the non-diagonal elements are default
// elements:
\code
blaze::CompressedMatrix<float> A;
// ... Resizing and initialization
if( isDiagonal( A ) ) { ... }
\endcode
// Note that non-square matrices are never considered to be diagonal!
//
//
// \n \subsection matrix_operations_isidentity isIdentity()
//
// The \c isIdentity() function checks if the given dense or sparse matrix is an identity matrix,
// i.e. if all diagonal elements are 1 and all non-diagonal elements are 0:
\code
blaze::CompressedMatrix<float> A;
// ... Resizing and initialization
if( isIdentity( A ) ) { ... }
\endcode
// Note that non-square matrices are never considered to be identity matrices!
//
//
// \n \subsection matrix_operations_ispositivedefinite isPositiveDefinite()
//
// The \c isPositiveDefinite() function checks if the given dense matrix is positive definite.
\code
blaze::DynamicMatrix<double> A;
// ... Initialization
if( isPositiveDefinite( A ) ) { ... }
\endcode
// Note that non-square matrices are never considered to be positive definite!
//
// \note The \c isPositiveDefinite() function can only be used for dense matrices with \c float,
// \c double, \c complex<float> or \c complex<double> element type. The attempt to call the
// function with matrices of any other element type or with a sparse matrix results in a compile
// time error!
//
// \note The function is depending on LAPACK kernels. Thus the function can only be used if a
// fitting LAPACK library is available and linked to the executable. Otherwise a linker error
// will be created.
//
//
// \n \subsection matrix_operations_matrix_trans trans()
//
// Matrices can be transposed via the \c trans() function. Row-major matrices are transposed into
// a column-major matrix and vice versa:
\code
blaze::DynamicMatrix<int,rowMajor> M1( 5UL, 2UL );
blaze::CompressedMatrix<int,columnMajor> M2( 3UL, 7UL );
M1 = M2; // Assigning a column-major matrix to a row-major matrix
M1 = trans( M2 ); // Assigning the transpose of M2 (i.e. a row-major matrix) to M1
M1 += trans( M2 ); // Addition assignment of two row-major matrices
\endcode
// \n \subsection matrix_operations_ctrans ctrans()
//
// The conjugate transpose of a dense or sparse matrix (also called adjoint matrix, Hermitian
// conjugate, or transjugate) can be computed via the \c ctrans() function:
\code
blaze::DynamicMatrix< complex<float>, rowMajor > M1( 5UL, 2UL );
blaze::CompressedMatrix< complex<float>, columnMajor > M2( 2UL, 5UL );
M1 = ctrans( M2 ); // Compute the conjugate transpose matrix
\endcode
// Note that the \c ctrans() function has the same effect as manually applying the \c conj() and
// \c trans() function in any order:
\code
M1 = trans( conj( M2 ) ); // Computing the conjugate transpose matrix
M1 = conj( trans( M2 ) ); // Computing the conjugate transpose matrix
\endcode
// \n \subsection matrix_operations_reverse reverse()
//
// Via the \c reverse() function is is possible to reverse the rows or columns of a dense or sparse
// matrix. The following examples gives an impression of both alternatives:
\code
blaze::DynamicMatrix<int,rowMajor> A{ { 1, 0, 2, 3 },
{ 2, 4, 0, 1 },
{ 0, 3, 1, 0 } };
blaze::DynamicMatrix<int> B;
// Reversing the rows result in the matrix
//
// ( 0 3 1 0 )
// ( 2 4 0 1 )
// ( 1 0 2 3 )
//
B = reverse<rowwise>( A );
// Reversing the columns result in the matrix
//
// ( 3 2 0 1 )
// ( 1 0 4 2 )
// ( 0 1 3 0 )
//
B = reverse<columnwise>( A );
\endcode
// \n \subsection matrix_operations_evaluate eval() / evaluate()
//
// The \c evaluate() function forces an evaluation of the given matrix expression and enables
// an automatic deduction of the correct result type of an operation. The following code example
// demonstrates its intended use for the multiplication of a lower and a strictly lower dense
// matrix:
\code
using blaze::DynamicMatrix;
using blaze::LowerMatrix;
using blaze::StrictlyLowerMatrix;
LowerMatrix< DynamicMatrix<double> > A;
StrictlyLowerMatrix< DynamicMatrix<double> > B;
// ... Resizing and initialization
auto C = evaluate( A * B );
\endcode
// In this scenario, the \c evaluate() function assists in deducing the exact result type of
// the operation via the \c auto keyword. Please note that if \c evaluate() is used in this
// way, no temporary matrix is created and no copy operation is performed. Instead, the result
// is directly written to the target matrix due to the return value optimization (RVO). However,
// if \c evaluate() is used in combination with an explicit target type, a temporary will be
// created and a copy operation will be performed if the used type differs from the type
// returned from the function:
\code
StrictlyLowerMatrix< DynamicMatrix<double> > D( A * B ); // No temporary & no copy operation
LowerMatrix< DynamicMatrix<double> > E( A * B ); // Temporary & copy operation
DynamicMatrix<double> F( A * B ); // Temporary & copy operation
D = evaluate( A * B ); // Temporary & copy operation
\endcode
// Sometimes it might be desirable to explicitly evaluate a sub-expression within a larger
// expression. However, please note that \c evaluate() is not intended to be used for this
// purpose. This task is more elegantly and efficiently handled by the \c eval() function:
\code
blaze::DynamicMatrix<double> A, B, C, D;
D = A + evaluate( B * C ); // Unnecessary creation of a temporary matrix
D = A + eval( B * C ); // No creation of a temporary matrix
\endcode
// In contrast to the \c evaluate() function, \c eval() can take the complete expression
// into account and therefore can guarantee the most efficient way to evaluate it (see also
// \ref intra_statement_optimization).
//
// \n \subsection matrix_operations_noalias noalias()
//
// The \b Blaze library is able to reliably detect aliasing during the assignment of matrices.
// In case the aliasing would lead to an incorrect result, \b Blaze introduces an intermediate
// temporary of the appropriate type to break the aliasing. For instance, in the following
// example \b Blaze performs an alias detection in both assignments, but only, in the second
// assignment it detects a problematic aliasing and uses an intermediate temporary in order
// to be able to compute the correct result:
\code
blaze::DynamicMatrix<double> A, B;
A = A + B; // No problematic aliasing of A, no intermediate temporary is required.
A = A * B; // Problematic aliasing of A; intermediate temporary required!
\endcode
// The detection of aliasing effects, however, takes a small runtime effort. In order to disable
// the aliasing detection, the \c noalias() function can be used:
\code
blaze::DynamicMatrix<double> A, B;
A = noalias( A + B ); // No alias detection performed, no intermediate temporary.
A = noalias( A * B ); // No alias detection performed, no intermediate temporary.
// Note that the final result will be incorrect!
\endcode
// \warning The \c noalias() operation has the semantics of a cast: The caller is completely
// responsible and the system trusts the given information. Using \c noalias() in a situation
// where an aliasing effect occurs leads to undefined behavior (which can be violated invariants
// or wrong computation results)!
//
// \n \subsection matrix_operations_nosimd nosimd()
//
// By default, \b Blaze attempts to vectorize all operations by means of SSE, AVX, etc. in order
// to achieve maximum performance. However, via the \c nosimd() operation it is possible to disable
// the SIMD evaluation of any operation:
\code
blaze::DynamicMatrix<double> A, B;
A = nosimd( A + B ); // Disables SIMD for the matrix/matrix addition
A = nosimd( A * B ); // Disables SIMD for the matrix/matrix multiplication
\endcode
// Please note that the main purpose of the \c nosimd() operation is to enable an easy performance
// comparison between the vectorized and non-vectorized evaluation. Using the \c nosimd() operation
// will likely result in significantly reduced performance!
//
// \n \subsection matrix_operations_fix fix()
//
// By default, resizable matrices such as \c DynamicMatrix, \c HybridMatrix, and \c CompressedMatrix
// can adapt their numbers of rows and columns during an assignment:
\code
blaze::DynamicMatrix<int> A{ { 1, 2 } }; // Setup of a 1x2 matrix
blaze::DynamicVector<int> B{ { 1, 2, 3 }, { 4, 5, 6, } }; // Setup of a 2x3 matrix
A = B; // Resizes matrix 'A' to a 2x3 matrix
\endcode
// Via the \c fix() operation it is possible to fix the size of a resizable matrix. If a matrix
// with a different number of rows or a different number of columns is assigned, instead of
// resizing the matrix the operation fails by throwing a \c std::invalid_argument exception:
\code
blaze::DynamicMatrix<int> A{ { 1, 2 } }; // Setup of a 1x2 matrix
blaze::DynamicVector<int> B{ { 1, 2, 3 }, { 4, 5, 6, } }; // Setup of a 2x3 matrix
fix( A ) = B; // Throws an exception: Matrix cannot be resized!
\endcode
// \n \section matrix_operations_modifying_operations Modifying Operations
// <hr>
//
// \subsection matrix_operations_resize_reserve .resize() / .reserve()
//
// The dimensions of a \c StaticMatrix are fixed at compile time by the second and third template
// parameter and a \c CustomMatrix cannot be resized. In contrast, the number or rows and columns
// of \c DynamicMatrix, \c HybridMatrix, and \c CompressedMatrix can be changed at runtime:
\code
using blaze::DynamicMatrix;
using blaze::CompressedMatrix;
DynamicMatrix<int,rowMajor> M1;
CompressedMatrix<int,columnMajor> M2( 3UL, 2UL );
// Adapting the number of rows and columns via the resize() function. The (optional)
// third parameter specifies whether the existing elements should be preserved. Per
// default, the existing elements are preserved.
M1.resize( 2UL, 2UL ); // Resizing matrix M1 to 2x2 elements. Elements of built-in type
// remain uninitialized, elements of class type are default
// constructed.
M1.resize( 3UL, 1UL, false ); // Resizing M1 to 3x1 elements. The old elements are lost, the
// new elements are NOT initialized!
M2.resize( 5UL, 7UL, true ); // Resizing M2 to 5x7 elements. The old elements are preserved.
M2.resize( 3UL, 2UL, false ); // Resizing M2 to 3x2 elements. The old elements are lost.
\endcode
// Note that resizing a matrix invalidates all existing views (see e.g. \ref views_submatrices)
// on the matrix:
\code
blaze::DynamicMatrix<int,rowMajor> M1( 10UL, 20UL ); // Creating a 10x20 matrix
auto row8 = row( M1, 8UL ); // Creating a view on the 8th row of the matrix
M1.resize( 6UL, 20UL ); // Resizing the matrix invalidates the view
\endcode
// When the internal capacity of a matrix is no longer sufficient, the allocation of a larger
// junk of memory is triggered. In order to avoid frequent reallocations, the \c reserve()
// function can be used up front to set the internal capacity:
\code
blaze::DynamicMatrix<int> M1;
M1.reserve( 100 );
M1.rows(); // Returns 0
M1.capacity(); // Returns at least 100
\endcode
// Additionally it is possible to reserve memory in a specific row (for a row-major matrix) or
// column (for a column-major matrix):
\code
blaze::CompressedMatrix<int> M1( 4UL, 6UL );
M1.reserve( 1, 4 ); // Reserving enough space for four non-zero elements in row 1
\endcode
// \n \subsection matrix_operations_shrinkToFit .shrinkToFit()
//
// The internal capacity of matrices with dynamic memory is preserved in order to minimize the
// number of reallocations. For that reason, the \c resize() and \c reserve() functions can lead
// to memory overhead. The \c shrinkToFit() member function can be used to minimize the internal
// capacity:
\code
blaze::DynamicMatrix<int> M1( 100UL, 100UL ); // Create a 100x100 integer matrix
M1.resize( 10UL, 10UL ); // Resize to 10x10, but the capacity is preserved
M1.shrinkToFit(); // Remove the unused capacity
\endcode
// Please note that due to padding the capacity might not be reduced exactly to \c rows() times
// \c columns(). Please also note that in case a reallocation occurs, all iterators (including
// \c end() iterators), all pointers and references to elements of this matrix are invalidated.
//
//
// \subsection matrix_operations_reset_clear reset() / clear
//
// In order to reset all elements of a dense or sparse matrix, the \c reset() function can be
// used. The number of rows and columns of the matrix are preserved:
\code
// Setting up a single precision row-major matrix, whose elements are initialized with 2.0F.
blaze::DynamicMatrix<float> M1( 4UL, 5UL, 2.0F );
// Resetting all elements to 0.0F.
reset( M1 ); // Resetting all elements
M1.rows(); // Returns 4: size and capacity remain unchanged
\endcode
// Alternatively, only a single row or column of the matrix can be resetted:
\code
blaze::DynamicMatrix<int,blaze::rowMajor> M1( 7UL, 6UL, 5 ); // Setup of a row-major matrix
blaze::DynamicMatrix<int,blaze::columnMajor> M2( 4UL, 5UL, 4 ); // Setup of a column-major matrix
reset( M1, 2UL ); // Resetting the 2nd row of the row-major matrix
reset( M2, 3UL ); // Resetting the 3rd column of the column-major matrix
\endcode
// In order to reset a row of a column-major matrix or a column of a row-major matrix, use a
// row or column view (see \ref views_rows and views_colums).
//
// In order to return a matrix to its default state (i.e. the state of a default constructed
// matrix), the \c clear() function can be used:
\code
// Setting up a single precision row-major matrix, whose elements are initialized with 2.0F.
blaze::DynamicMatrix<float> M1( 4UL, 5UL, 2.0F );
// Resetting all elements to 0.0F.
clear( M1 ); // Resetting the entire matrix
M1.rows(); // Returns 0: size is reset, but capacity remains unchanged
\endcode
// \n \subsection matrix_operations_matrix_transpose transpose()
//
// In addition to the non-modifying \c trans() function, matrices can be transposed in-place via
// the \c transpose() function:
\code
blaze::DynamicMatrix<int,rowMajor> M( 5UL, 2UL );
transpose( M ); // In-place transpose operation.
M = trans( M ); // Same as above
\endcode
// Note however that the transpose operation fails if ...
//
// - ... the given matrix has a fixed size and is non-square;
// - ... the given matrix is a triangular matrix;
// - ... the given submatrix affects the restricted parts of a triangular matrix;
// - ... the given submatrix would cause non-deterministic results in a symmetric/Hermitian matrix.
//
//
// \n \subsection matrix_operations_ctranspose ctranspose()
//
// The \c ctranspose() function can be used to perform an in-place conjugate transpose operation:
\code
blaze::DynamicMatrix<int,rowMajor> M( 5UL, 2UL );
ctranspose( M ); // In-place conjugate transpose operation.
M = ctrans( M ); // Same as above
\endcode
// Note however that the conjugate transpose operation fails if ...
//
// - ... the given matrix has a fixed size and is non-square;
// - ... the given matrix is a triangular matrix;
// - ... the given submatrix affects the restricted parts of a triangular matrix;
// - ... the given submatrix would cause non-deterministic results in a symmetric/Hermitian matrix.
//
//
// \n \subsection matrix_operations_swap swap()
//
// Via the \c \c swap() function it is possible to completely swap the contents of two matrices
// of the same type:
\code
blaze::DynamicMatrix<int,blaze::rowMajor> M1( 10UL, 15UL );
blaze::DynamicMatrix<int,blaze::rowMajor> M2( 20UL, 10UL );
swap( M1, M2 ); // Swapping the contents of M1 and M2
\endcode
// \n \section matrix_operations_arithmetic_operations Arithmetic Operations
// <hr>
//
// \subsection matrix_operations_min_max min() / max()
//
// The \c min() and \c max() functions can be used for a single matrix, multiple matrices, and
// a matrix and a scalar.
//
// <b>Single Matrix</b>
//
// If passed a single matrix, the functions return the smallest and largest element of the given
// dense matrix or the smallest and largest non-zero element of the given sparse matrix,
// respectively:
\code
blaze::StaticMatrix<int,2UL,3UL> A{ { -5, 2, 7 },
{ -4, 0, 1 } };
min( A ); // Returns -5
max( A ); // Returns 7
\endcode
\code
blaze::CompressedMatrix<int> B{ { 1, 0, 3 },
{ 0, 0, 0 } };
min( B ); // Returns 1
max( B ); // Returns 3
\endcode
// For more information on the unary \c min() and \c max() reduction operations see the
// \ref matrix_operations_reduction_operations section.
//
// <b>Multiple Matrices</b>
//
// If passed two or more dense matrices, the \c min() and \c max() functions compute the
// componentwise minimum or maximum of the given matrices, respectively:
\code
blaze::StaticMatrix<int,2UL,3UL,rowMajor> C{ { -5, 1, -7 }, { 4, 1, 0 } };
blaze::StaticMatrix<int,2UL,3UL,rowMajor> D{ { -5, 3, 0 }, { 2, 2, -2 } };
min( A, C ); // Results in the matrix ( -5, 1, -7 ) ( -4, 0, 0 )
max( A, C, D ); // Results in the matrix ( -5, 3, 7 ) ( 4, 2, 1 )
\endcode
// Please note that sparse matrices can only be used in the unary \c min() and \c max() functions.
// Also note that all forms of the \c min() and \c max() functions can be used to compute the
// smallest and largest element of a matrix expression:
\code
min( A + B + C ); // Returns -9, i.e. the smallest value of the resulting matrix
max( A - B - C ); // Returns 11, i.e. the largest value of the resulting matrix
\endcode
// <b>Matrix and Scalar</b>
//
// If passed a dense matrix and a scalar, the \c min() and \c max() functions compute the
// componentwise minimum or maximum between the given matrix and a uniform matrix represented by
// the scalar value:
\code
min( A, 0 ); // Results in the matrix ( 0, 2, 7 ) ( 0, 0, 1 )
min( 0, A ); // Results in the matrix ( 0, 2, 7 ) ( 0, 0, 1 )
max( A, 0 ); // Results in the matrix ( -5, 0, 0 ) ( -4, 0, 0 )
max( 0, A ); // Results in the matrix ( -5, 0, 0 ) ( -4, 0, 0 )
\endcode
// \n \subsection matrix_operators_softmax softmax()
//
// The <a href="https://en.wikipedia.org/wiki/Softmax_function">softmax function</a>, also called
// the normalized exponential function, of a given dense matrix can be computed via \c softmax().
// The resulting dense matrix consists of real values in the range (0..1], which add up to 1.
\code
blaze::StaticMatrix<double,3UL,3UL> A{ { 1.0, 2.0, 3.0 }
, { 4.0, 1.0, 2.0 }
, { 3.0, 4.0, 1.0 } };
blaze::StaticMatrix<double,3UL,3UL> B;
// Evaluating the softmax function
B = softmax( A ); // Results in ( 0.0157764 0.0428847 0.116573 )
// ( 0.316878 0.0157764 0.0428847 )
// ( 0.116573 0.316878 0.0157764 )
double b = sum( B ); // Results in 1
\endcode
// Alternatively it is possible to compute a row- or columnwise \c softmax() function. The
// resulting dense matrix consists of real values in the range (0..1], which add up to the number
// of rows or columns, respectively.
\code
using blaze::rowwise;
using blaze::columnwise;
blaze::StaticMatrix<double,3UL,3UL> C, D;
// Evaluating the rowwise softmax function
C = softmax<rowwise>( A ); // Results in ( 0.0900306 0.244728 0.665241 )
// ( 0.843795 0.0420101 0.114195 )
// ( 0.259496 0.705385 0.035119 )
double c = sum( C ); // Results in 3 (the number of rows of A)
// Evaluating the columnwise softmax function
D = softmax<columnwise>( A ); // Results in ( 0.035119 0.114195 0.665241 )
// ( 0.705385 0.0420101 0.244728 )
// ( 0.259496 0.843795 0.0900306 )
double d = sum( D ); // Results in 3 (the number of columns of A)
\endcode
// \n \subsection matrix_operators_trace trace()
//
// The \c trace() function sums the diagonal elements of a square dense or sparse matrix:
\code
blaze::StaticMatrix<int,3UL,3UL> A{ { -1, 2, -3 }
, { -4, -5, 6 }
, { 7, -8, -9 } };
trace( A ); // Returns the sum of the diagonal elements, i.e. -15
\endcode
// In case the given matrix is not a square matrix, a \c std::invalid_argument exception is
// thrown.
//
//
// \n \subsection matrix_operations_matrix_determinant det()
//
// The determinant of a square dense matrix can be computed by means of the \c det() function:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
double d = det( A ); // Compute the determinant of A
\endcode
// In case the given dense matrix is not a square matrix, a \c std::invalid_argument exception is
// thrown.
//
// \note The \c det() function can only be used for dense matrices with \c float, \c double,
// \c complex<float> or \c complex<double> element type. The attempt to call the function with
// matrices of any other element type or with a sparse matrix results in a compile time error!
//
// \note The function is depending on LAPACK kernels. Thus the function can only be used if a
// fitting LAPACK library is available and linked to the executable. Otherwise a linker error
// will be created.
//
//
// \n \subsection matrix_operators_rank rank()
//
// The \c rank() function computes the rank of a given dense matrix:
\code
blaze::DynamicMatrix<double> A( 5UL, 8UL );
// ... Initialization
rank( A );
\endcode
// The rank is determined as the number of singular values greater than a given tolerance. This
// tolerance is computed as
\code
tolerance = max(m,n) * max(s) * epsilon,
\endcode
// where \c m is the number of rows of the dense matrix, \c n is the number of columns of the
// dense matrix, \c max(s) is the maximum singular value of the dense matrix and \c epsilon is
// the difference between 1 and the least value greater than 1 that is representable by the
// floating point type of the singular values.
//
// \note The \c rank() function can only be used for dense matrices with \c float, \c double,
// \c complex<float> or \c complex<double> element type. The attempt to call the function with
// matrices of any other element type or with a sparse matrix results in a compile time error!
//
// \note The function is depending on LAPACK kernels. Thus the function can only be used if a
// fitting LAPACK library is available and linked to the executable. Otherwise a linker error
// will be created.
//
//
// \n \subsection matrix_operators_abs abs()
//
// The \c abs() function can be used to compute the absolute values of each element of a matrix.
// For instance, the following computation
\code
blaze::StaticMatrix<int,2UL,3UL,rowMajor> A{ { -1, 2, -3 },
{ 4, -5, 6 } };
blaze::StaticMatrix<int,2UL,3UL,rowMajor> B( abs( A ) );
\endcode
// results in the matrix
\f$ B = \left(\begin{array}{*{3}{c}}
1 & 2 & 3 \\
4 & 5 & 6 \\
\end{array}\right)\f$
// \n \subsection matrix_operators_sign sign()
//
// The \c sign() function can be used to evaluate the sign of each element of a matrix \a A. For
// each element \c (i,j) the corresponding result is 1 if \a A(i,j) is greater than zero, 0 if
// \a A(i,j) is zero, and -1 if \a A(i,j) is less than zero. For instance, the following use of
// the \c sign() function
\code
blaze::StaticMatrix<int,2UL,3UL,rowMajor> A{ { -1, 2, 0 },
{ 4, 0, -6 } };
blaze::StaticMatrix<int,2UL,3UL,rowMajor> B( sign( A ) );
\endcode
// results in the matrix
\f$ B = \left(\begin{array}{*{3}{c}}
-1 & 1 & 0 \\
1 & 0 & -1 \\
\end{array}\right)\f$
// \n \subsection matrix_operators_rounding_functions floor() / ceil() / trunc() / round()
//
// The \c floor(), \c ceil(), \c trunc(), and \c round() functions can be used to round down/up
// each element of a matrix, respectively:
\code
blaze::StaticMatrix<double,3UL,3UL> A, B;
B = floor( A ); // Rounding down each element of the matrix
B = ceil ( A ); // Rounding up each element of the matrix
B = trunc( A ); // Truncating each element of the matrix
B = round( A ); // Rounding each element of the matrix
\endcode
// \n \subsection matrix_operators_conj conj()
//
// The \c conj() function can be applied on a dense or sparse matrix to compute the complex
// conjugate of each element of the matrix:
\code
using blaze::StaticMatrix;
using cplx = std::complex<double>;
// Creating the matrix
// ( (1,0) (-2,-1) )
// ( (1,1) ( 0, 1) )
StaticMatrix<cplx,2UL,2UL> A{ { cplx( 1.0, 0.0 ), cplx( -2.0, -1.0 ) },
{ cplx( 1.0, 1.0 ), cplx( 0.0, 1.0 ) } };
// Computing the matrix of conjugate values
// ( (1, 0) (-2, 1) )
// ( (1,-1) ( 0,-1) )
StaticMatrix<cplx,2UL,2UL> B;
B = conj( A );
\endcode
// Additionally, matrices can be conjugated in-place via the \c conjugate() function:
\code
blaze::DynamicMatrix<cplx> C( 5UL, 2UL );
conjugate( C ); // In-place conjugate operation.
C = conj( C ); // Same as above
\endcode
// \n \subsection matrix_operators_real real()
//
// The \c real() function can be used on a dense or sparse matrix to extract the real part of
// each element of the matrix:
\code
using blaze::StaticMatrix;
using cplx = std::complex<double>;
// Creating the matrix
// ( (1,0) (-2,-1) )
// ( (1,1) ( 0, 1) )
StaticMatrix<cplx,2UL,2UL> A{ { cplx( 1.0, 0.0 ), cplx( -2.0, -1.0 ) },
{ cplx( 1.0, 1.0 ), cplx( 0.0, 1.0 ) } };
// Extracting the real part of each matrix element
// ( 1 -2 )
// ( 1 0 )
StaticMatrix<double,2UL,2UL> B;
B = real( A );
\endcode
// \n \subsection matrix_operators_imag imag()
//
// The \c imag() function can be used on a dense or sparse matrix to extract the imaginary part
// of each element of the matrix:
\code
using blaze::StaticMatrix;
using cplx = std::complex<double>;
// Creating the matrix
// ( (1,0) (-2,-1) )
// ( (1,1) ( 0, 1) )
StaticMatrix<cplx,2UL,2UL> A{ { cplx( 1.0, 0.0 ), cplx( -2.0, -1.0 ) },
{ cplx( 1.0, 1.0 ), cplx( 0.0, 1.0 ) } };
// Extracting the imaginary part of each matrix element
// ( 0 -1 )
// ( 1 1 )
StaticMatrix<double,2UL,2UL> B;
B = imag( A );
\endcode
// \n \subsection matrix_operators_arg arg()
//
// The \c arg() function can be used on a dense or sparse matrix to compute the phase angle for
// each element of the matrix:
\code
using blaze::StaticMatrix;
using cplx = std::complex<double>;
// Creating the matrix
// ( (1,0) (-2,-1) )
// ( (1,1) ( 0, 1) )
StaticMatrix<cplx,2UL,2UL> A{ { cplx( 1.0, 0.0 ), cplx( -2.0, -1.0 ) },
{ cplx( 1.0, 1.0 ), cplx( 0.0, 1.0 ) } };
// Computing the phase angle of each matrix element
// ( 0.0 -2.67795 )
// ( 0.785398 1.5708 )
StaticMatrix<double,2UL,2UL> B;
B = arg( A );
\endcode
// \n \subsection matrix_operators_sqrt sqrt() / invsqrt()
//
// Via the \c sqrt() and \c invsqrt() functions the (inverse) square root of each element of a
// matrix can be computed:
\code
blaze::StaticMatrix<double,3UL,3UL> A, B, C;
B = sqrt( A ); // Computes the square root of each element
C = invsqrt( A ); // Computes the inverse square root of each element
\endcode
// Note that in case of sparse matrices only the non-zero elements are taken into account!
//
//
// \n \subsection matrix_operators_cbrt cbrt() / invcbrt()
//
// The \c cbrt() and \c invcbrt() functions can be used to compute the the (inverse) cubic root
// of each element of a matrix:
\code
blaze::DynamicMatrix<double> A, B, C;
B = cbrt( A ); // Computes the cubic root of each element
C = invcbrt( A ); // Computes the inverse cubic root of each element
\endcode
// Note that in case of sparse matrices only the non-zero elements are taken into account!
//
//
// \n \subsection matrix_operations_hypot hypot()
//
// The \c hypot() function can be used to compute the componentwise hypotenous for a pair of
// dense matrices:
\code
blaze::StaticMatrix<double,3UL,3UL> A, B, C;
C = hypot( A, B ); // Computes the componentwise hypotenuous
\endcode
// \n \subsection matrix_operators_clamp clamp()
//
// The \c clamp() function can be used to restrict all elements of a matrix to a specific range:
\code
blaze::DynamicMatrix<double> A, B;
B = clamp( A, -1.0, 1.0 ); // Restrict all elements to the range [-1..1]
\endcode
// Note that in case of sparse matrices only the non-zero elements are taken into account!
//
//
// \n \subsection matrix_operators_pow pow()
//
// The \c pow() function can be used to compute the exponential value of each element of a matrix.
// If passed a matrix and a numeric exponent, the function computes the exponential value of each
// element of the matrix using the same exponent. If passed a second matrix, the function computes
// the componentwise exponential value:
\code
blaze::StaticMatrix<double,3UL,3UL> A, B, C;
C = pow( A, 1.2 ); // Computes the exponential value of each element
C = pow( A, B ); // Computes the componentwise exponential value
\endcode
// \n \subsection matrix_operators_exp exp() / exp2() / exp10()
//
// \c exp(), \c exp2() and \c exp10() compute the base e/2/10 exponential of each element of a
// matrix, respectively:
\code
blaze::HybridMatrix<double,3UL,3UL> A, B;
B = exp( A ); // Computes the base e exponential of each element
B = exp2( A ); // Computes the base 2 exponential of each element
B = exp10( A ); // Computes the base 10 exponential of each element
\endcode
// Note that in case of sparse matrices only the non-zero elements are taken into account!
//
//
// \n \subsection matrix_operators_log log() / log2() / log10() / log1p() / lgamma()
//
// The \c log(), \c log2(), \c log10(), \c log1p() and \c lgamma() functions can be used to
// compute the natural, binary and common logarithm of each element of a matrix:
\code
blaze::StaticMatrix<double,3UL,3UL> A, B;
B = log( A ); // Computes the natural logarithm of each element
B = log2( A ); // Computes the binary logarithm of each element
B = log10( A ); // Computes the common logarithm of each element
B = log1p( A ); // Computes the natural logarithm of x+1 of each element
B = lgamma( A ); // Computes the natural logarithm of the absolute value of the gamma function
\endcode
// \n \subsection matrix_operators_trigonometric_functions sin() / cos() / tan() / asin() / acos() / atan()
//
// The following trigonometric functions are available for both dense and sparse matrices:
\code
blaze::DynamicMatrix<double> A, B;
B = sin( A ); // Computes the sine of each element of the matrix
B = cos( A ); // Computes the cosine of each element of the matrix
B = tan( A ); // Computes the tangent of each element of the matrix
B = asin( A ); // Computes the inverse sine of each element of the matrix
B = acos( A ); // Computes the inverse cosine of each element of the matrix
B = atan( A ); // Computes the inverse tangent of each element of the matrix
\endcode
// Note that in case of sparse matrices only the non-zero elements are taken into account!
//
//
// \n \subsection matrix_operators_hyperbolic_functions sinh() / cosh() / tanh() / asinh() / acosh() / atanh()
//
// The following hyperbolic functions are available for both dense and sparse matrices:
\code
blaze::DynamicMatrix<double> A, B;
B = sinh( A ); // Computes the hyperbolic sine of each element of the matrix
B = cosh( A ); // Computes the hyperbolic cosine of each element of the matrix
B = tanh( A ); // Computes the hyperbolic tangent of each element of the matrix
B = asinh( A ); // Computes the inverse hyperbolic sine of each element of the matrix
B = acosh( A ); // Computes the inverse hyperbolic cosine of each element of the matrix
B = atanh( A ); // Computes the inverse hyperbolic tangent of each element of the matrix
\endcode
// \n \subsection matrix_operations_atan2 atan2()
//
// The multi-valued inverse tangent is available for a pair of dense matrices:
\code
blaze::DynamicMatrix<double> A, B, C;
C = atan2( A, B ); // Computes the componentwise multi-valued inverse tangent
\endcode
// \n \subsection matrix_operators_erf erf() / erfc()
//
// The \c erf() and \c erfc() functions compute the (complementary) error function of each
// element of a matrix:
\code
blaze::StaticMatrix<double,3UL,3UL> A, B;
B = erf( A ); // Computes the error function of each element
B = erfc( A ); // Computes the complementary error function of each element
\endcode
// Note that in case of sparse matrices only the non-zero elements are taken into account!
//
//
// \n \subsection matrix_operations_map map() / forEach()
//
// Via the \c map() functions it is possible to execute componentwise custom operations on matrices.
// The unary \c map() function can be used to apply a custom operation on each element of a
// dense or sparse matrix. For instance, the following example demonstrates a custom square root
// computation via a lambda:
\code
blaze::DynamicMatrix<double> A, B;
B = map( A, []( double d ) { return std::sqrt( d ); } );
\endcode
// The N-ary \c map() functions can be used to apply an operation componentwise to the elements
// of N dense matrices (where \f$ N <= 6 \f$). The following example demonstrates the merging of
// two matrices of double precision values into a matrix of double precision complex numbers:
\code
blaze::DynamicMatrix<double> real{ { 2.1, -4.2 }, { 1.0, 0.6 } };
blaze::DynamicMatrix<double> imag{ { 0.3, 1.4 }, { 2.9, -3.4 } };
blaze::DynamicMatrix< complex<double> > cplx;
// Creating the matrix
// ( ( 2.1, 0.3) (-4.2, 1.4) )
// ( ( 1.0, 2.9) ( 0.6, -3.4) )
cplx = map( real, imag, []( double r, double i ){ return complex<double>( r, i ); } );
\endcode
// Although the computation can be parallelized it is not vectorized and thus cannot perform at
// peak performance. However, it is also possible to create vectorized custom operations. See
// \ref custom_operations for a detailed overview of the possibilities of custom operations.
//
// Please note that unary custom operations on vectors have been introduced in \b Blaze 3.0 in
// form of the \c forEach() function. With the introduction of binary custom functions, the
// \c forEach() function has been renamed to \c map(). The \c forEach() function can still be
// used, but the function might be deprecated in future releases of \b Blaze.
//
//
// \n \subsection matrix_operations_select select()
//
// The \c select() function performs a componentwise, conditional selection of elements. Given
// the three dense matrices \c cond, \c A, and \c B, in case an element in the \c cond vector
// evaluates to \c true, the according element of \a A is selected, in case the \a cond element
// evaluates to \c false, the according element of \a B is selected. The following example
// demonstrates the use of the \a select() function:
\code
blaze::DynamicMatrix<bool> cond{ { true, false }, { true false } };
blaze::DynamicMatrix<int> A{ { 1, -1 }, { 1, -1 } };
blaze::DynamicMatrix<int> B{ { -2, 2 }, { -2, 2 } };
blaze::DynamicMatrix<int> C;
// ... Resizing and initialization
C = select( cond, A, B ); // Results in ( 1, 2 ) ( 1, 2 )
\endcode
// \n \section matrix_operations_reduction_operations Reduction Operations
// <hr>
//
// \subsection matrix_operations_reduction_operations_reduce reduce()
//
// The \c reduce() function performs either a total reduction, a rowwise reduction or a columnwise
// reduction of the elements of the given dense matrix or the non-zero elements of the given sparse
// matrix. The following examples demonstrate the total reduction of a dense and sparse matrix:
\code
blaze::DynamicMatrix<double> A;
// ... Resizing and initialization
const double totalsum1 = reduce( A, blaze::Add() );
const double totalsum2 = reduce( A, []( double a, double b ){ return a + b; } );
\endcode
\code
blaze::CompressedMatrix<double> A;
// ... Resizing and initialization
const double totalsum1 = reduce( A, blaze::Add() );
const double totalsum2 = reduce( A, []( double a, double b ){ return a + b; } );
\endcode
// By specifying \c blaze::columnwise or \c blaze::rowwise the \c reduce() function performs a
// column-wise or row-wise reduction, respectively. In case \c blaze::columnwise is specified, the
// (non-zero) elements of the matrix are reduced column-wise and the result is a row vector. In
// case \c blaze::rowwise is specified, the (non-zero) elements of the matrix are reduced row-wise
// and the result is a column vector:
\code
blaze::DynamicMatrix<double> A;
blaze::CompressedMatrix<double> B;
blaze::DynamicVector<double,rowVector> colsum1, colsum2;
// ... Resizing and initialization
colsum1 = reduce<columnwise>( A, blaze::Add() );
colsum2 = reduce<columnwise>( B, []( double a, double b ){ return a + b; } );
\endcode
\code
blaze::DynamicMatrix<double> A;
blaze::CompressedMatrix<double> B;
blaze::DynamicVector<double,columnVector> rowsum1, rowsum2;
// ... Resizing and initialization
rowsum1 = reduce<rowwise>( A, blaze::Add() );
rowsum2 = reduce<rowwise>( B, []( double a, double b ){ return a + b; } );
\endcode
// As demonstrated in the examples it is possible to pass any binary callable as custom reduction
// operation. However, for instance in the case of lambdas the vectorization of the reduction
// operation is compiler dependent and might not perform at peak performance. However, it is also
// possible to create vectorized custom operations. See \ref custom_operations for a detailed
// overview of the possibilities of custom operations.
//
// Please note that the evaluation order of the \c reduce() function is unspecified. Thus the
// behavior is non-deterministic if the given reduction operation is not associative or not
// commutative. Also, the operation is undefined if the given reduction operation modifies the
// values.
//
// \n \subsection matrix_operations_reduction_operations_sum sum()
//
// The \c sum() function reduces the elements of the given dense vector or the non-zero elements
// of the given sparse vector by means of addition:
\code
blaze::DynamicMatrix<int> A{ { 1, 2 }, { 3, 4 } };
const int totalsum = sum( A ); // Results in 10
\endcode
\code
blaze::CompressedMatrix<int> a{ { 1, 2 }, { 3, 4 } };
const int totalsum = sum( A ); // Results in 10
\endcode
// By specifying \c blaze::columnwise or \c blaze::rowwise the \c sum() function performs a
// column-wise or row-wise summation, respectively. In case \c blaze::columnwise is specified,
// the (non-zero) elements of the matrix are summed up column-wise and the result is a row vector.
// In case \c blaze::rowwise is specified, the (non-zero) elements of the matrix are summed up
// row-wise and the result is a column vector:
\code
using blaze::columnwise;
blaze::DynamicMatrix<int> A{ { 1, 0, 2 }, { 1, 3, 4 } };
blaze::CompressedMatrix<int> B{ { 1, 0, 2 }, { 1, 3, 4 } };
blaze::DynamicVector<int,rowVector> colsum1, colsum2;
colsum1 = sum<columnwise>( A ); // Results in ( 2, 3, 6 )
colsum2 = sum<columnwise>( B ); // Same result
\endcode
\code
using blaze::rowwise;
blaze::DynamicMatrix<int> A{ { 1, 0, 2 }, { 1, 3, 4 } };
blaze::CompressedMatrix<int> B{ { 1, 0, 2 }, { 1, 3, 4 } };
blaze::DynamicVector<int,columnVector> rowsum1, rowsum2;
rowsum1 = sum<rowwise>( A ); // Results in ( 3, 8 )
rowsum2 = sum<rowwise>( B ); // Same result
\endcode
// Please note that the evaluation order of the \c sum() function is unspecified.
//
// \n \subsection matrix_operations_reduction_operations_prod prod()
//
// The \c prod() function reduces the elements of the given dense vector or the non-zero elements
// of the given sparse vector by means of multiplication:
\code
blaze::DynamicMatrix<int> A{ { 1, 2 }, { 3, 4 } };
const int totalprod = prod( A ); // Results in 24
\endcode
\code
blaze::CompressedMatrix<int> A{ { 1, 2 }, { 3, 4 } };
const int totalprod = prod( A ); // Results in 24
\endcode
// By specifying \c blaze::columnwise or \c blaze::rowwise the \c prod() function performs a
// column-wise or row-wise multiplication, respectively. In case \c blaze::columnwise is specified,
// the (non-zero) elements of the matrix are multiplied column-wise and the result is a row vector.
// In case \c blaze::rowwise is specified, the (non-zero) elements of the matrix are multiplied
// row-wise and the result is a column vector:
\code
using blaze::columnwise;
blaze::DynamicMatrix<int> A{ { 1, 0, 2 }, { 1, 3, 4 } };
blaze::CompressedMatrix<int> B{ { 1, 0, 2 }, { 1, 3, 4 } };
blaze::DynamicVector<int,rowVector> colprod1, colprod2;
colprod1 = prod<columnwise>( A ); // Results in ( 1, 0, 8 )
colprod2 = prod<columnwise>( A ); // Results in ( 1, 3, 8 )
\endcode
\code
using blaze::rowwise;
blaze::DynamicMatrix<int> A{ { 1, 0, 2 }, { 1, 3, 4 } };
blaze::CompressedMatrix<int> B{ { 1, 0, 2 }, { 1, 3, 4 } };
blaze::DynamicVector<int,columnVector> rowprod1, rowprod2;
rowprod1 = prod<rowwise>( A ); // Results in ( 0, 12 )
rowprod2 = prod<rowwise>( A ); // Results in ( 2, 12 )
\endcode
// Please note that the evaluation order of the \c prod() function is unspecified.
//
// \n \subsection matrix_operations_reduction_operations_min min()
//
// The unary \c min() function returns the smallest element of the given dense matrix or the
// smallest non-zero element of the given sparse matrix. This function can only be used for
// element types that support the smaller-than relationship. In case the given matrix currently
// has either 0 rows or 0 columns, the returned value is the default value (e.g. 0 in case of
// fundamental data types).
\code
blaze::DynamicMatrix<int> A{ { 1, 2 }, { 3, 4 } };
const int totalmin = min( A ); // Results in 1
\endcode
\code
blaze::CompressedMatrix<int> A{ { 1, 0 }, { 3, 0 } };
const int totalmin = min( A ); // Results in 1
\endcode
// \note In case the sparse matrix is not completely filled, the implicit zero elements are NOT
// taken into account. In the previous example the compressed matrix has only 2 non-zero elements.
// However, the minimum of this matrix is 1.
//
// By specifying \c blaze::columnwise or \c blaze::rowwise the \c min() function determines the
// smallest (non-zero) element in each row or column, respectively. In case \c blaze::columnwise
// is specified, the smallest (non-zero) element of each column is determined and the result is
// a row vector. In case \c blaze::rowwise is specified, the smallest (non-zero) element of each
// row is determined and the result is a column vector.
\code
using blaze::columnwise;
blaze::DynamicMatrix<int> A{ { 1, 0, 2 }, { 1, 3, 4 } };
blaze::CompressedMatrix<int> B{ { 1, 0, 2 }, { 1, 3, 4 } };
blaze::DynamicVector<int,rowVector> colmin1, colmin2;
colmin1 = min<columnwise>( A ); // Results in ( 1, 0, 2 )
colmin2 = min<columnwise>( B ); // Results in ( 1, 3, 2 )
\endcode
\code
using blaze::rowwise;
blaze::DynamicMatrix<int> A{ { 1, 0, 2 }, { 1, 3, 4 } };
blaze::CompressedMatrix<int> B{ { 1, 0, 2 }, { 1, 3, 4 } };
blaze::DynamicVector<int,columnVector> rowmin1, rowmin2;
rowmin1 = min<rowwise>( A ); // Results in ( 0, 1 )
rowmin2 = min<rowwise>( B ); // Results in ( 1, 1 )
\endcode
// \note In case the sparse matrix is not completely filled, the implicit zero elements are NOT
// taken into account.
//
// \n \subsection matrix_operations_reduction_operations_max max()
//
// The unary \c max() function returns the largest element of the given dense matrix or the
// largest non-zero element of the given sparse matrix. This function can only be used for
// element types that support the smaller-than relationship. In case the given matrix currently
// has either 0 rows or 0 columns, the returned value is the default value (e.g. 0 in case of
// fundamental data types).
\code
blaze::DynamicMatrix<int> A{ { 1, 2 }, { 3, 4 } };
const int totalmax = max( A ); // Results in 4
\endcode
\code
blaze::CompressedMatrix<int> A{ { -1, 0 }, { -3, 0 } };
const int totalmax = max( A ); // Results in -1
\endcode
// \note In case the sparse matrix is not completely filled, the implicit zero elements are NOT
// taken into account. In the previous example the compressed matrix has only 2 non-zero elements.
// However, the maximum of this matrix is -1.
//
// By specifying \c blaze::columnwise or \c blaze::rowwise the \c max() function determines the
// largest (non-zero) element in each row or column, respectively. In case \c blaze::columnwise
// is specified, the largest (non-zero) element of each column is determined and the result is
// a row vector. In case \c blaze::rowwise is specified, the largest (non-zero) element of each
// row is determined and the result is a column vector.
\code
using blaze::columnwise;
blaze::DynamicMatrix<int> A{ { 1, 0, 2 }, { 1, 3, 4 } };
blaze::CompressedMatrix<int> B{ { -1, 0, -2 }, { -1, -3, -4 } };
blaze::DynamicVector<int,rowVector> colmax1, colmax2;
colmax1 = max<columnwise>( A ); // Results in ( 1, 3, 4 )
colmax2 = max<columnwise>( B ); // Results in ( -1, -3, -2 )
\endcode
\code
using blaze::rowwise;
blaze::DynamicMatrix<int> A{ { 1, 0, 2 }, { 1, 3, 4 } };
blaze::CompressedMatrix<int> B{ { -1, 0, -2 }, { -1, -3, -4 } };
blaze::DynamicVector<int,columnVector> rowmax1, rowmax2;
rowmax1 = max<rowwise>( A ); // Results in ( 2, 4 )
rowmax2 = max<rowwise>( B ); // Results in ( -1, -1 )
\endcode
// \note In case the sparse matrix is not completely filled, the implicit zero elements are NOT
// taken into account.
//
//
// \n \section matrix_operations_norms Norms
// <hr>
//
// \subsection matrix_operations_norms_norm norm()
//
// The \c norm() function computes the L2 norm of the given dense or sparse matrix:
\code
blaze::DynamicMatrix<double> A;
blaze::CompressedMatrix<double> B;
// ... Resizing and initialization
const double norm1 = norm( A );
const double norm2 = norm( B );
\endcode
// \n \subsection matrix_operations_norms_sqrnorm sqrNorm()
//
// The \c sqrNorm() function computes the squared L2 norm of the given dense or sparse matrix:
\code
blaze::DynamicMatrix<double> A;
blaze::CompressedMatrix<double> B;
// ... Resizing and initialization
const double norm1 = sqrNorm( A );
const double norm2 = sqrNorm( B );
\endcode
// \n \subsection matrix_operations_norms_l1norm l1Norm()
//
// The \c l1Norm() function computes the squared L1 norm of the given dense or sparse matrix:
\code
blaze::DynamicMatrix<double> A;
blaze::CompressedMatrix<double> B;
// ... Resizing and initialization
const double norm1 = l1Norm( A );
const double norm2 = l1Norm( B );
\endcode
// \n \subsection matrix_operations_norms_l2norm l2Norm()
//
// The \c l2Norm() function computes the squared L2 norm of the given dense or sparse matrix:
\code
blaze::DynamicMatrix<double> A;
blaze::CompressedMatrix<double> B;
// ... Resizing and initialization
const double norm1 = l2Norm( A );
const double norm2 = l2Norm( B );
\endcode
// \n \subsection matrix_operations_norms_l3norm l3Norm()
//
// The \c l3Norm() function computes the squared L3 norm of the given dense or sparse matrix:
\code
blaze::DynamicMatrix<double> A;
blaze::CompressedMatrix<double> B;
// ... Resizing and initialization
const double norm1 = l3Norm( A );
const double norm2 = l3Norm( B );
\endcode
// \n \subsection matrix_operations_norms_l4norm l4Norm()
//
// The \c l4Norm() function computes the squared L4 norm of the given dense or sparse matrix:
\code
blaze::DynamicMatrix<double> A;
blaze::CompressedMatrix<double> B;
// ... Resizing and initialization
const double norm1 = l4Norm( A );
const double norm2 = l4Norm( B );
\endcode
// \n \subsection matrix_operations_norms_lpnorm lpNorm()
//
// The \c lpNorm() function computes the general Lp norm of the given dense or sparse matrix,
// where the norm is specified by either a compile time or a runtime argument:
\code
blaze::DynamicMatrix<double> A;
blaze::CompressedMatrix<double> B;
// ... Resizing and initialization
const double norm1 = lpNorm<2>( A ); // Compile time argument
const double norm2 = lpNorm( B, 2.3 ); // Runtime argument
\endcode
// \n \subsection matrix_operations_norms_maxnorm linfNorm() / maxNorm()
//
// The \c linfNorm() and \c maxNorm() functions compute the infinity/maximum norm of the given
// dense or sparse matrix:
\code
blaze::DynamicMatrix<double> A;
blaze::CompressedMatrix<double> B;
// ... Resizing and initialization
const double norm1 = linfNorm( A );
const double norm2 = maxNorm( B );
\endcode
// \n \subsection matrix_operations_norms_minnorm minNorm()
//
// The \c minNorm() function computes the minimum norm of the given dense or sparse matrix:
\code
blaze::DynamicMatrix<double> A;
blaze::CompressedMatrix<double> B;
// ... Resizing and initialization
const double norm = minNorm( B );
\endcode
// \n \section matrix_operations_scalar_expansion Scalar Expansion
// <hr>
//
// By means of the \c uniform() function it is possible to expand a scalar value into a dense,
// uniform matrix. By default, the resulting uniform matrix is a row-major matrix, but it is
// possible to specify the storage order explicitly:
\code
using blaze::rowMajor;
int scalar = 5;
blaze::DynamicMatrix<int,rowMajor> A;
// ... Resizing and initialization
// Expansion of 'scalar' to a 3x5 row-major matrix
//
// ( 5 5 5 5 5 )
// ( 5 5 5 5 5 )
// ( 5 5 5 5 5 )
//
A = uniform( 3UL, 5UL, scalar );
A = uniform<columnMajor>( 3UL, 5UL, scalar );
\endcode
// \n \section matrix_operations_matrix_repetition Matrix Repetition
// <hr>
//
// Via the \c repeat() function it is possible to repeat a dense or sparse matrix multiple times
// to represent a larger matrix. Repeating a row-major matrix results in a row-major matrix,
// repeating a column-major matrix results in a column-major matrix. As demonstrated by the
// following examples, \c repeat() can be used with both runtime and compile time parameters:
\code
using blaze::rowMajor;
using blaze::columnMajor;
blaze::DynamicMatrix<int,rowMajor> A1{ { 1, 0, -2 }, { 0, 5, 0 } };
blaze::CompressedMatrix<int,columnMajor> B1{ { 0, -1 }, { 0, 4 }, { 7, 0 } };
blaze::DynamicMatrix<int,rowMajor> A2;
blaze::CompressedMatrix<int,columnMajor> B2;
// ... Resizing and initialization
// Repeating the 2x3 dense row-major matrix 'A1' 2x rowwise and 3x columnwise results in
//
// ( 1 0 -2 1 0 -2 1 0 -2 )
// ( 0 5 0 0 5 0 0 5 0 )
// ( 1 0 -2 1 0 -2 1 0 -2 )
// ( 0 5 0 0 5 0 0 5 0 )
//
A2 = repeat( A1, 2UL, 3UL );
A2 = repeat<2UL,3UL>( A1 );
// Repeating the 3x2 sparse column-major matrix 'B1' 2x rowwise and 3x columnwise results in
//
// ( 0 -1 0 -1 0 -1 )
// ( 0 4 0 4 0 4 )
// ( 7 0 7 0 7 0 )
// ( 0 -1 0 -1 0 -1 )
// ( 0 4 0 4 0 4 )
// ( 7 0 7 0 7 0 )
//
B2 = repeat( B1, 2UL, 3UL );
B2 = repeat<2UL,3UL>( B1 );
\endcode
// \n \section matrix_operations_statistic_operations Statistic Operations
// <hr>
//
// \subsection matrix_operations_mean mean()
//
// The <a href="https://en.wikipedia.org/wiki/Arithmetic_mean">(arithmetic) mean</a> of a dense or
// sparse matrix can be computed via the \c mean() function. In case of a sparse matrix, both the
// non-zero and zero elements are taken into account. The following example demonstrates the
// computation of the mean of a dense matrix:
\code
blaze::DynamicMatrix<int> A{ { 1, 4, 3, 6, 7 }
, { 2, 6, 3, 1, 0 } };
const double m = mean( A ); // Results in 3.3 (i.e. 33/10)
\endcode
// In case the number of rows or columns of the given matrix is 0, a \c std::invalid_argument is
// thrown.
//
// Alternatively it is possible to compute the row- or columnwise mean:
\code
using blaze::columnVector;
using blaze::rowVector;
blaze::DynamicMatrix<int> A{ { 1, 4, 3, 6, 7 }
, { 2, 6, 3, 1, 0 } };
blaze::DynamicVector<double,columnVector> rm;
blaze::DynamicVector<double,rowVector> cm;
rm = mean<rowwise>( A ); // Results in ( 4.2 2.4 )
cm = mean<columnwise>( A ); // Results in ( 1.5 5.0 3.0 3.5 3.5 )
\endcode
// In case the rowwise mean is computed and the number of columns of the given matrix is 0 or
// in case the columnwise mean is computed and the number of rows of the given matrix is 0, a
// \c std::invalid_argument is thrown.
//
// \n \subsection matrix_operations_var var()
//
// The <a href="https://en.wikipedia.org/wiki/Variance">variance</a> of a dense or sparse matrix
// can be computed via the \c var() function. In case of a sparse vector, both the non-zero and
// zero elements are taken into account. The following example demonstrates the computation of
// the variance of a dense matrix:
\code
blaze::DynamicMatrix<int> A{ { 1, 3, 2 }
, { 2, 6, 4 }
, { 9, 6, 3 } };
const double v = var( A ); // Results in 6.5
\endcode
// In case the size of the given matrix is smaller than 2, a \c std::invalid_argument is thrown.
//
// Alternatively it is possible to compute the row- or columnwise variance:
\code
using blaze::columnVector;
using blaze::rowVector;
blaze::DynamicMatrix<int> A{ { 1, 3, 2 }
, { 2, 6, 4 }
, { 9, 6, 3 } };
blaze::DynamicVector<double,columnVector> rv;
blaze::DynamicVector<double,rowVector> cv;
rv = var<rowwise>( A ); // Results in ( 1 4 9 )
cv = var<columnwise>( A ); // Results in ( 19 3 1 )
\endcode
// In case the rowwise varoamce is computed and the number of columns of the given matrix is
// smaller than 2 or in case the columnwise mean is computed and the number of rows of the given
// matrix is smaller than 2, a \c std::invalid_argument is thrown.
//
// \n \subsection matrix_operations_stddev stddev()
//
// The <a href="https://en.wikipedia.org/wiki/Standard_deviation">standard deviation</a> of a
// dense or sparse matrix can be computed via the \c stddev() function. In case of a sparse
// vector, both the non-zero and zero elements are taken into account. The following example
// demonstrates the computation of the standard deviation of a dense matrix:
\code
blaze::DynamicMatrix<int> A{ { 1, 3, 2 }
, { 2, 6, 4 }
, { 9, 6, 3 } };
const double s = stddev( A ); // Results in sqrt(6.5)
\endcode
// In case the size of the given matrix is smaller than 2, a \c std::invalid_argument is thrown.
//
// Alternatively it is possible to compute the row- or columnwise standard deviation:
\code
using blaze::columnVector;
using blaze::rowVector;
blaze::DynamicMatrix<int> A{ { 1, 3, 2 }
, { 2, 6, 4 }
, { 9, 6, 3 } };
blaze::DynamicVector<double,columnVector> rs;
blaze::DynamicVector<double,rowVector> cs;
rs = stddev<rowwise>( A ); // Results in ( 1 2 3 )
cs = stddev<columnwise>( A ); // Results in ( sqrt(19) sqrt(3) 1 )
\endcode
// In case the rowwise standard deviation is computed and the number of columns of the given
// matrix is smaller than 2 or in case the columnwise mean is computed and the number of rows of
// the given matrix is smaller than 2, a \c std::invalid_argument is thrown.
//
//
// \n \section matrix_operations_declaration_operations Declaration Operations
// <hr>
//
// \subsection matrix_operations_declsym declsym()
//
// The \c declsym() operation can be used to explicitly declare any matrix or matrix expression
// as symmetric:
\code
blaze::DynamicMatrix<double> A, B;
// ... Resizing and initialization
B = declsym( A );
\endcode
// Any matrix or matrix expression that has been declared as symmetric via \c declsym() will
// gain all the benefits of a symmetric matrix, which range from reduced runtime checking to
// a considerable speed-up in computations:
\code
using blaze::DynamicMatrix;
using blaze::SymmetricMatrix;
DynamicMatrix<double> A, B, C;
SymmetricMatrix< DynamicMatrix<double> > S;
// ... Resizing and initialization
isSymmetric( declsym( A ) ); // Will always return true without runtime effort
S = declsym( A ); // Omit any runtime check for symmetry
C = declsym( A * B ); // Declare the result of the matrix multiplication as symmetric,
// i.e. perform an optimized matrix multiplication
\endcode
// \warning The \c declsym() operation has the semantics of a cast: The caller is completely
// responsible and the system trusts the given information. Declaring a non-symmetric matrix or
// matrix expression as symmetric via the \c declsym() operation leads to undefined behavior
// (which can be violated invariants or wrong computation results)!
//
//
// \n \subsection matrix_operations_declherm declherm()
//
// The \c declherm() operation can be used to explicitly declare any matrix or matrix expression
// as Hermitian:
\code
blaze::DynamicMatrix<double> A, B;
// ... Resizing and initialization
B = declherm( A );
\endcode
// Any matrix or matrix expression that has been declared as Hermitian via \c declherm() will
// gain all the benefits of an Hermitian matrix, which range from reduced runtime checking to
// a considerable speed-up in computations:
\code
using blaze::DynamicMatrix;
using blaze::HermitianMatrix;
DynamicMatrix<double> A, B, C;
HermitianMatrix< DynamicMatrix<double> > S;
// ... Resizing and initialization
isHermitian( declherm( A ) ); // Will always return true without runtime effort
S = declherm( A ); // Omit any runtime check for Hermitian symmetry
C = declherm( A * B ); // Declare the result of the matrix multiplication as Hermitian,
// i.e. perform an optimized matrix multiplication
\endcode
// \warning The \c declherm() operation has the semantics of a cast: The caller is completely
// responsible and the system trusts the given information. Declaring a non-Hermitian matrix or
// matrix expression as Hermitian via the \c declherm() operation leads to undefined behavior
// (which can be violated invariants or wrong computation results)!
//
//
// \n \subsection matrix_operations_decllow decllow()
//
// The \c decllow() operation can be used to explicitly declare any matrix or matrix expression
// as lower triangular:
\code
blaze::DynamicMatrix<double> A, B;
// ... Resizing and initialization
B = decllow( A );
\endcode
// Any matrix or matrix expression that has been declared as lower triangular via \c decllow()
// will gain all the benefits of a lower triangular matrix, which range from reduced runtime
// checking to a considerable speed-up in computations:
\code
using blaze::DynamicMatrix;
using blaze::LowerMatrix;
DynamicMatrix<double> A, B, C;
LowerMatrix< DynamicMatrix<double> > L;
// ... Resizing and initialization
isLower( decllow( A ) ); // Will always return true without runtime effort
L = decllow( A ); // Omit any runtime check for A being a lower matrix
C = decllow( A * B ); // Declare the result of the matrix multiplication as lower triangular,
// i.e. perform an optimized matrix multiplication
\endcode
// \warning The \c decllow() operation has the semantics of a cast: The caller is completely
// responsible and the system trusts the given information. Declaring a non-lower matrix or
// matrix expression as lower triangular via the \c decllow() operation leads to undefined
// behavior (which can be violated invariants or wrong computation results)!
//
//
// \n \subsection matrix_operations_declunilow declunilow()
//
// The \c declunilow() operation can be used to explicitly declare any matrix or matrix expression
// as lower unitriangular:
\code
blaze::DynamicMatrix<double> A, B;
// ... Resizing and initialization
B = declunilow( A );
\endcode
// Any matrix or matrix expression that has been declared as lower unitriangular via \c declunilow()
// will gain all the benefits of a lower unitriangular matrix, which range from reduced runtime
// checking to a considerable speed-up in computations:
\code
using blaze::DynamicMatrix;
using blaze::UniLowerMatrix;
DynamicMatrix<double> A, B, C;
UniLowerMatrix< DynamicMatrix<double> > L;
// ... Resizing and initialization
isUniLower( declunilow( A ) ); // Will always return true without runtime effort
L = declunilow( A ); // Omit any runtime check for A being an unilower matrix
C = declunilow( A * B ); // Declare the result of the matrix multiplication as lower
// unitriangular, i.e. perform an optimized matrix multiplication
\endcode
// \warning The \c declunilow() operation has the semantics of a cast: The caller is completely
// responsible and the system trusts the given information. Declaring a non-unilower matrix or
// matrix expression as lower unitriangular via the \c declunilow() operation leads to undefined
// behavior (which can be violated invariants or wrong computation results)!
//
//
// \n \subsection matrix_operations_declstrlow declstrlow()
//
// The \c declstrlow() operation can be used to explicitly declare any matrix or matrix expression
// as strictly lower triangular:
\code
blaze::DynamicMatrix<double> A, B;
// ... Resizing and initialization
B = declstrlow( A );
\endcode
// Any matrix or matrix expression that has been declared as strictly lower triangular via
// \c declstrlow() will gain all the benefits of a strictly lower triangular matrix, which range
// from reduced runtime checking to a considerable speed-up in computations:
\code
using blaze::DynamicMatrix;
using blaze::StrictlyLowerMatrix;
DynamicMatrix<double> A, B, C;
StrictlyLowerMatrix< DynamicMatrix<double> > L;
// ... Resizing and initialization
isStrictlyLower( declstrlow( A ) ); // Will always return true without runtime effort
L = declstrlow( A ); // Omit any runtime check for A being a strictly lower matrix
C = declstrlow( A * B ); // Declare the result of the matrix multiplication as strictly lower
// triangular, i.e. perform an optimized matrix multiplication
\endcode
// \warning The \c declstrlow() operation has the semantics of a cast: The caller is completely
// responsible and the system trusts the given information. Declaring a non-strictly-lower matrix
// or matrix expression as strictly lower triangular via the \c declstrlow() operation leads to
// undefined behavior (which can be violated invariants or wrong computation results)!
//
//
// \n \subsection matrix_operations_declupp declupp()
//
// The \c declupp() operation can be used to explicitly declare any matrix or matrix expression
// as upper triangular:
\code
blaze::DynamicMatrix<double> A, B;
// ... Resizing and initialization
B = declupp( A );
\endcode
// Any matrix or matrix expression that has been declared as upper triangular via \c declupp()
// will gain all the benefits of an upper triangular matrix, which range from reduced runtime
// checking to a considerable speed-up in computations:
\code
using blaze::DynamicMatrix;
using blaze::UpperMatrix;
DynamicMatrix<double> A, B, C;
UpperMatrix< DynamicMatrix<double> > U;
// ... Resizing and initialization
isUpper( declupp( A ) ); // Will always return true without runtime effort
U = declupp( A ); // Omit any runtime check for A being an upper matrix
C = declupp( A * B ); // Declare the result of the matrix multiplication as upper triangular,
// i.e. perform an optimized matrix multiplication
\endcode
// \warning The \c declupp() operation has the semantics of a cast: The caller is completely
// responsible and the system trusts the given information. Declaring a non-upper matrix or
// matrix expression as upper triangular via the \c declupp() operation leads to undefined
// behavior (which can be violated invariants or wrong computation results)!
//
//
// \n \subsection matrix_operations_decluniupp decluniupp()
//
// The \c decluniupp() operation can be used to explicitly declare any matrix or matrix expression
// as upper unitriangular:
\code
blaze::DynamicMatrix<double> A, B;
// ... Resizing and initialization
B = decluniupp( A );
\endcode
// Any matrix or matrix expression that has been declared as upper unitriangular via \c decluniupp()
// will gain all the benefits of a upper unitriangular matrix, which range from reduced runtime
// checking to a considerable speed-up in computations:
\code
using blaze::DynamicMatrix;
using blaze::UniUpperMatrix;
DynamicMatrix<double> A, B, C;
UniUpperMatrix< DynamicMatrix<double> > L;
// ... Resizing and initialization
isUniUpper( decluniupp( A ) ); // Will always return true without runtime effort
L = decluniupp( A ); // Omit any runtime check for A being an uniupper matrix
C = decluniupp( A * B ); // Declare the result of the matrix multiplication as upper
// unitriangular, i.e. perform an optimized matrix multiplication
\endcode
// \warning The \c decluniupp() operation has the semantics of a cast: The caller is completely
// responsible and the system trusts the given information. Declaring a non-uniupper matrix or
// matrix expression as upper unitriangular via the \c decluniupp() operation leads to undefined
// behavior (which can be violated invariants or wrong computation results)!
//
//
// \n \subsection matrix_operations_declstrupp declstrupp()
//
// The \c declstrupp() operation can be used to explicitly declare any matrix or matrix expression
// as strictly upper triangular:
\code
blaze::DynamicMatrix<double> A, B;
// ... Resizing and initialization
B = declstrupp( A );
\endcode
// Any matrix or matrix expression that has been declared as strictly upper triangular via
// \c declstrupp() will gain all the benefits of a strictly upper triangular matrix, which range
// from reduced runtime checking to a considerable speed-up in computations:
\code
using blaze::DynamicMatrix;
using blaze::StrictlyUpperMatrix;
DynamicMatrix<double> A, B, C;
StrictlyUpperMatrix< DynamicMatrix<double> > L;
// ... Resizing and initialization
isStrictlyUpper( declstrupp( A ) ); // Will always return true without runtime effort
L = declstrupp( A ); // Omit any runtime check for A being a strictly upper matrix
C = declstrupp( A * B ); // Declare the result of the matrix multiplication as strictly upper
// triangular, i.e. perform an optimized matrix multiplication
\endcode
// \warning The \c declstrupp() operation has the semantics of a cast: The caller is completely
// responsible and the system trusts the given information. Declaring a non-strictly-upper matrix
// or matrix expression as strictly upper triangular via the \c declstrupp() operation leads to
// undefined behavior (which can be violated invariants or wrong computation results)!
//
//
// \n \subsection matrix_operations_decldiag decldiag()
//
// The \c decldiag() operation can be used to explicitly declare any matrix or matrix expression
// as diagonal:
\code
blaze::DynamicMatrix<double> A, B;
// ... Resizing and initialization
B = decldiag( A );
\endcode
// Any matrix or matrix expression that has been declared as diagonal via \c decldiag() will
// gain all the benefits of a diagonal matrix, which range from reduced runtime checking to
// a considerable speed-up in computations:
\code
using blaze::DynamicMatrix;
using blaze::DiagonalMatrix;
DynamicMatrix<double> A, B, C;
DiagonalMatrix< DynamicMatrix<double> > D;
// ... Resizing and initialization
isDiagonal( decldiag( A ) ); // Will always return true without runtime effort
D = decldiag( A ); // Omit any runtime check for A being a diagonal matrix
C = decldiag( A * B ); // Declare the result of the matrix multiplication as diagonal,
// i.e. perform an optimized matrix multiplication
\endcode
// \warning The \c decldiag() operation has the semantics of a cast: The caller is completely
// responsible and the system trusts the given information. Declaring a non-diagonal matrix
// or matrix expression as diagonal via the \c decldiag() operation leads to undefined
// behavior (which can be violated invariants or wrong computation results)!
//
//
// \n \subsection matrix_operations_declid declid()
//
// The \c declid() operation can be used to explicitly declare any matrix or matrix expression
// as identity matrix:
\code
blaze::DynamicMatrix<double> A, B;
// ... Resizing and initialization
B = declid( A );
\endcode
// Any matrix or matrix expression that has been declared as identity matrix via \c declid() will
// gain all the benefits of an identity matrix, which range from reduced runtime checking to a
// considerable speed-up in computations:
\code
using blaze::DynamicMatrix;
using blaze::DiagonalMatrix;
DynamicMatrix<double> A, B, C;
DiagonalMatrix< DynamicMatrix<double> > D;
// ... Resizing and initialization
isIdentity( declid( A ) ); // Will always return true without runtime effort
D = declid( A ); // Omit any runtime check for A being a diagonal matrix
C = declid( A ) * B; // Declare the left operand of the matrix multiplication as an
// identity matrix, i.e. perform an optimized matrix multiplication
\endcode
// \warning The \c declid() operation has the semantics of a cast: The caller is completely
// responsible and the system trusts the given information. Declaring a non-identity matrix
// or matrix expression as identity matrix via the \c declid() operation leads to undefined
// behavior (which can be violated invariants or wrong computation results)!
//
//
// \n \subsection matrix_operations_declzero declzero()
//
// The \c declzero() operation can be used to explicitly declare any matrix or matrix expression
// as zero matrix:
\code
blaze::DynamicMatrix<double> A, B;
// ... Resizing and initialization
B = declzero( A );
\endcode
// Any matrix or matrix expression that has been declared as zero matrix via \c declzero() will
// gain all the benefits of a zero matrix, which range from reduced runtime checking to a
// considerable speed-up in computations:
\code
using blaze::DynamicMatrix;
DynamicMatrix<double> A, B, C;
// ... Resizing and initialization
isZero( declzero( A ) ); // Will always return true without runtime effort
C = declzero( A ) + B; // Declare the left operand of the matrix addition as a
// zero matrix, i.e. no addition needs to be performed
\endcode
// \warning The \c declzero() operation has the semantics of a cast: The caller is completely
// responsible and the system trusts the given information. Declaring a non-zero matrix or
// matrix expression as zero matrix via the \c declzero() operation leads to undefined behavior
// (which can be violated invariants or wrong computation results)!
//
//
// \n \section matrix_operations_matrix_generators Matrix Generators
// <hr>
//
// \subsection matrix_operations_generate generate()
//
// The \c generate() function returns a dense matrix filled elementwise via the given custom
// binary operation. By default, the returned matrix is a row-major matrix, but this setting can
// be changed via the \c BLAZE_DEFAULT_STORAGE_ORDER switch (see \ref storage_order). Alternatively
// it is possible to specify the storage order explicitly.\n
// The following example demonstrates the use of the \c generate() function:
\code
using blaze::generate;
using blaze::rowMajor;
using blaze::columnMajor>
// Generates the uniform integer matrix ( ( 2, 2, 2 ), ( 2, 2, 2 ) )
blaze::DynamicMatrix<int,rowMajor> A;
A = generate( 2UL, 3UL, []( size_t i, size_t j ){ return 2; } );
// Generates the linearly spaced float matrix ( ( 2.1, 3.2, 4.3 ), ( 5.4, 6.5, 7.6 ) )
blaze::DynamicMatrix<float,rowMajor> B;
B = generate( 2UL, 3UL, []( size_t i, size_t j ){ return 2.1F + 1.1F*(i*3UL+j); } );
// Generates the logarithmically spaced double vector ( ( 1.0, 10.0 ), ( 100.0, 1000.0 ) )
blaze::DynamicMatrix<double,rowMajor> C;
C = generate<rowMajor>( 2UL, 2UL, []( size_t i, size_t j ) { return blaze::exp10( 1.0 + 1.0*(i*2UL+j) ); } );
// Generates the vector of integer vectors ( ( 1, 2 ), ( 2, 3 ), ( 3, 4 ), ( 4, 5 ) )
using VT = StaticVector<int,2UL>;
blaze::DynamicMatrix<VT,columnMajor> D;
D = generate<columnMajor>( 2UL, 2UL, []( size_t i, size_t j ) { return evaluate( VT{ 1, 2 } + (i*2UL+j) ); } );
\endcode
// \n \subsection matrix_operations_uniform uniform()
//
// The \c uniform() function creates a uniform matrix of the given size. By default, the
// resulting uniform matrix is a row-major matrix, but this setting can be changed via the
// \c BLAZE_DEFAULT_STORAGE_ORDER switch (see \ref storage_order). Alternatively it is
// possible to specify the storage order explicitly.\n
// The following example demonstrates the use of the \c uniform() function:
\code
using blaze::uniform;
using blaze::rowMajor;
using blaze::columnMajor;
// Creates the uniform row-major matrix
// ( 1, 1, 1, 1, 1 )
// ( 1, 1, 1, 1, 1 )
auto U1 = uniform( 2UL, 5UL, 1 );
// Creates the uniform row-major matrix
// ( 1.2, 1.2 )
// ( 1.2, 1.2 )
// ( 1.2, 1.2 )
auto U2 = uniform<rowMajor>( 3UL, 2UL, 1.2 );
// Creates the uniform column-major matrix
// ( 5U, 5U, 5U, 5U, 5U, 5U, 5U )
// ( 5U, 5U, 5U, 5U, 5U, 5U, 5U )
auto U3 = uniform<columnMajor>( 2UL, 7UL, 5U );
\endcode
// \n \subsection matrix_operations_zero zero()
//
// The \c zero() function creates a zero matrix of the given element type and size. By default,
// the resulting zero matrix is a row-major matrix, but this setting can be changed via the
// \c BLAZE_DEFAULT_STORAGE_ORDER switch (see \ref storage_order). Alternatively it is possible
// to specify the storage order explicitly.\n
// The following example demonstrates the use of the \c zero() function:
\code
using blaze::zero;
using blaze::rowMajor;
using blaze::columnMajor;
// Creates the row-major zero matrix
// ( 0, 0, 0, 0, 0 )
// ( 0, 0, 0, 0, 0 )
auto Z1 = zero<int>( 2UL, 5UL );
// Creates the row-major zero matrix
// ( 0.0, 0.0 )
// ( 0.0, 0.0 )
// ( 0.0, 0.0 )
auto Z2 = zero<double,rowMajor>( 3UL, 2UL );
// Creates the column-major zero matrix
// ( 0U, 0U, 0U, 0U, 0U, 0U, 0U )
// ( 0U, 0U, 0U, 0U, 0U, 0U, 0U )
auto Z3 = zero<unsigned int,columnMajor>( 2UL, 7UL );
\endcode
// \n \section matrix_operations_matrix_inversion Matrix Inversion
// <hr>
//
// The inverse of a square dense matrix can be computed via the \c inv() function:
\code
blaze::DynamicMatrix<float,blaze::rowMajor> A, B;
// ... Resizing and initialization
B = inv( A ); // Compute the inverse of A
\endcode
// Alternatively, an in-place inversion of a dense matrix can be performed via the \c invert()
// function:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
invert( A ); // In-place matrix inversion
\endcode
// Both the \c inv() and the \c invert() functions will automatically select the most suited matrix
// inversion algorithm depending on the size and type of the given matrix. For small matrices of
// up to 6x6, both functions use manually optimized kernels for maximum performance. For matrices
// larger than 6x6 the inversion is performed by means of the most suited matrix decomposition
// method: In case of a general matrix the LU decomposition is used, for symmetric matrices the
// LDLT decomposition is applied, for Hermitian matrices the LDLH decomposition is performed, and
// for triangular matrices the inverse is computed via a forward or back substitution.
//
// In case the type of the matrix does not provide additional compile time information about its
// structure (symmetric, lower, upper, diagonal, ...), the information can be provided manually
// by means of \ref matrix_operations_declaration_operations when calling the \c invert() function:
\code
invert( declsym( A ) ); // In-place inversion of a symmetric matrix
invert( declherm( A ) ); // In-place inversion of an Hermitian matrix
invert( decllow( A ) ); // In-place inversion of a lower triangular matrix
invert( declunilow( A ) ); // In-place inversion of a lower unitriangular matrix
invert( declupp( A ) ); // In-place inversion of an upper triangular matrix
invert( decluniupp( A ) ); // In-place inversion of an upper unitriangular matrix
invert( decldiag( A ) ); // In-place inversion of a diagonal matrix
\endcode
// Alternatively, via the \c invert() function it is possible to explicitly specify the inversion
// algorithm:
\code
using blaze::byLU;
using blaze::byLDLT;
using blaze::byLDLH;
using blaze::byLLH;
// In-place inversion of a general matrix by means of an LU decomposition
invert<byLU>( A );
// In-place inversion of a symmetric indefinite matrix by means of a Bunch-Kaufman decomposition
invert<byLDLT>( A );
// In-place inversion of an Hermitian indefinite matrix by means of a Bunch-Kaufman decomposition
invert<byLDLH>( A );
// In-place inversion of a positive definite matrix by means of a Cholesky decomposition
invert<byLLH>( A );
\endcode
// Whereas the inversion by means of an LU decomposition works for every general square matrix,
// the inversion by LDLT only works for symmetric indefinite matrices, the inversion by LDLH is
// restricted to Hermitian indefinite matrices and the Cholesky decomposition (LLH) only works
// for Hermitian positive definite matrices. Please note that it is in the responsibility of the
// function caller to guarantee that the selected algorithm is suited for the given matrix. In
// case this precondition is violated the result can be wrong and might not represent the inverse
// of the given matrix!
//
// For both the \c inv() and \c invert() function the matrix inversion fails if ...
//
// - ... the given matrix is not a square matrix;
// - ... the given matrix is singular and not invertible.
//
// In all failure cases either a compilation error is created if the failure can be predicted at
// compile time or a \c std::invalid_argument exception is thrown.
//
// \note The matrix inversion can only be used for dense matrices with \c float, \c double,
// \c complex<float> or \c complex<double> element type. The attempt to call the function with
// matrices of any other element type or with a sparse matrix results in a compile time error!
//
// \note The functions invert the dense matrix by means of LAPACK kernels. Thus the functions can
// only be used if a fitting LAPACK library is available and linked to the executable. Otherwise
// a linker error will be created.
//
// \note It is not possible to use any kind of view on the expression object returned by the
// \c inv() function. Also, it is not possible to access individual elements via the function call
// operator on the expression object:
\code
row( inv( A ), 2UL ); // Compilation error: Views cannot be used on an inv() expression!
inv( A )(1,2); // Compilation error: It is not possible to access individual elements!
\endcode
// \note The inversion functions do not provide any exception safety guarantee, i.e. in case an
// exception is thrown the matrix may already have been modified.
//
//
// \n \section matrix_operations_matrix_exponential Matrix Exponential
// <hr>
//
// The matrix exponential of a \f$N \times N\f$ matrix \f$ X \f$ is defined as
\f[ e^X = \sum\limits_{k=0}^\infty \frac{1}{k!} X^k. \f]
// In order to compute the matrix exponential of a square dense matrix, the \c matexp() function
// can be used:
\code
blaze::DynamicMatrix<float,blaze::rowMajor> A, B;
// ... Resizing and initialization
B = matexp( A ); // Compute the exponential of A
\endcode
// \note The matrix exponential can only be used for dense matrices with \c float, \c double,
// \c complex<float> or \c complex<double> element type. The attempt to call the function with
// matrices of any other element type results in a compile time error!
//
// \note It is not possible to use any kind of view on the expression object returned by the
// \c matexp() function. Also, it is not possible to access individual elements via the function
// call operator on the expression object:
\code
row( matexp( A ), 2UL ); // Compilation error: Views cannot be used on an matexp() expression!
matexp( A )(1,2); // Compilation error: It is not possible to access individual elements!
\endcode
// \n \section matrix_operations_decomposition Matrix Decomposition
// <hr>
//
// \note All decomposition functions can only be used for dense matrices with \c float, \c double,
// \c complex<float> or \c complex<double> element type. The attempt to call the function with
// matrices of any other element type or with a sparse matrix results in a compile time error!
//
// \note The functions decompose a dense matrix by means of LAPACK kernels. Thus the functions can
// only be used if a fitting LAPACK library is available and linked to the executable. Otherwise
// a linker error will be created.
//
// \subsection matrix_operations_decomposition_lu LU Decomposition
//
// The LU decomposition of a dense matrix can be computed via the \c lu() function:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
blaze::DynamicMatrix<double,blaze::rowMajor> L, U, P;
lu( A, L, U, P ); // LU decomposition of a row-major matrix
assert( A == L * U * P );
\endcode
\code
blaze::DynamicMatrix<double,blaze::columnMajor> A;
// ... Resizing and initialization
blaze::DynamicMatrix<double,blaze::columnMajor> L, U, P;
lu( A, L, U, P ); // LU decomposition of a column-major matrix
assert( A == P * L * U );
\endcode
// The function works for both \c rowMajor and \c columnMajor matrices. Note, however, that the
// three matrices \c A, \c L and \c U are required to have the same storage order. Also, please
// note that the way the permutation matrix \c P needs to be applied differs between row-major and
// column-major matrices, since the algorithm uses column interchanges for row-major matrices and
// row interchanges for column-major matrices.
//
// Furthermore, \c lu() can be used with adaptors. For instance, the following example demonstrates
// the LU decomposition of a symmetric matrix into a lower and upper triangular matrix:
\code
blaze::SymmetricMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > A;
// ... Resizing and initialization
blaze::LowerMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > L;
blaze::UpperMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > U;
blaze::DynamicMatrix<double,blaze::columnMajor> P;
lu( A, L, U, P ); // LU decomposition of A
\endcode
// \n \subsection matrix_operations_decomposition_llh Cholesky Decomposition
//
// The Cholesky (LLH) decomposition of a dense matrix can be computed via the \c llh() function:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
blaze::DynamicMatrix<double,blaze::rowMajor> L;
llh( A, L ); // LLH decomposition of a row-major matrix
assert( A == L * ctrans( L ) );
\endcode
// The function works for both \c rowMajor and \c columnMajor matrices and the two matrices \c A
// and \c L can have any storage order.
//
// Furthermore, \c llh() can be used with adaptors. For instance, the following example demonstrates
// the LLH decomposition of a symmetric matrix into a lower triangular matrix:
\code
blaze::SymmetricMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > A;
// ... Resizing and initialization
blaze::LowerMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > L;
llh( A, L ); // Cholesky decomposition of A
\endcode
// \n \subsection matrix_operations_decomposition_qr QR Decomposition
//
// The QR decomposition of a dense matrix can be computed via the \c qr() function:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
blaze::DynamicMatrix<double,blaze::columnMajor> Q;
blaze::DynamicMatrix<double,blaze::rowMajor> R;
qr( A, Q, R ); // QR decomposition of a row-major matrix
assert( A == Q * R );
\endcode
// The function works for both \c rowMajor and \c columnMajor matrices and the three matrices
// \c A, \c Q and \c R can have any storage order.
//
// Furthermore, \c qr() can be used with adaptors. For instance, the following example demonstrates
// the QR decomposition of a symmetric matrix into a general matrix and an upper triangular matrix:
\code
blaze::SymmetricMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > A;
// ... Resizing and initialization
blaze::DynamicMatrix<double,blaze::rowMajor> Q;
blaze::UpperMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > R;
qr( A, Q, R ); // QR decomposition of A
\endcode
// \n \subsection matrix_operations_decomposition_rq RQ Decomposition
//
// Similar to the QR decomposition, the RQ decomposition of a dense matrix can be computed via
// the \c rq() function:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
blaze::DynamicMatrix<double,blaze::rowMajor> R;
blaze::DynamicMatrix<double,blaze::columnMajor> Q;
rq( A, R, Q ); // RQ decomposition of a row-major matrix
assert( A == R * Q );
\endcode
// The function works for both \c rowMajor and \c columnMajor matrices and the three matrices
// \c A, \c R and \c Q can have any storage order.
//
// Also the \c rq() function can be used in combination with matrix adaptors. For instance, the
// following example demonstrates the RQ decomposition of an Hermitian matrix into a general
// matrix and an upper triangular matrix:
\code
blaze::HermitianMatrix< blaze::DynamicMatrix<complex<double>,blaze::columnMajor> > A;
// ... Resizing and initialization
blaze::UpperMatrix< blaze::DynamicMatrix<complex<double>,blaze::columnMajor> > R;
blaze::DynamicMatrix<complex<double>,blaze::rowMajor> Q;
rq( A, R, Q ); // RQ decomposition of A
\endcode
// \n \subsection matrix_operations_decomposition_ql QL Decomposition
//
// The QL decomposition of a dense matrix can be computed via the \c ql() function:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
blaze::DynamicMatrix<double,blaze::rowMajor> Q;
blaze::DynamicMatrix<double,blaze::columnMajor> L;
ql( A, Q, L ); // QL decomposition of a row-major matrix
assert( A == Q * L );
\endcode
// The function works for both \c rowMajor and \c columnMajor matrices and the three matrices
// \c A, \c Q and \c L can have any storage order.
//
// Also the \c ql() function can be used in combination with matrix adaptors. For instance, the
// following example demonstrates the QL decomposition of a symmetric matrix into a general
// matrix and a lower triangular matrix:
\code
blaze::SymmetricMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > A;
// ... Resizing and initialization
blaze::DynamicMatrix<double,blaze::rowMajor> Q;
blaze::LowerMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > L;
ql( A, Q, L ); // QL decomposition of A
\endcode
// \n \subsection matrix_operations_decomposition_lq LQ Decomposition
//
// The LQ decomposition of a dense matrix can be computed via the \c lq() function:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
blaze::DynamicMatrix<double,blaze::rowMajor> L;
blaze::DynamicMatrix<double,blaze::columnMajor> Q;
lq( A, L, Q ); // LQ decomposition of a row-major matrix
assert( A == L * Q );
\endcode
// The function works for both \c rowMajor and \c columnMajor matrices and the three matrices
// \c A, \c L and \c Q can have any storage order.
//
// Furthermore, \c lq() can be used with adaptors. For instance, the following example demonstrates
// the LQ decomposition of an Hermitian matrix into a lower triangular matrix and a general matrix:
\code
blaze::HermitianMatrix< blaze::DynamicMatrix<complex<double>,blaze::columnMajor> > A;
// ... Resizing and initialization
blaze::LowerMatrix< blaze::DynamicMatrix<complex<double>,blaze::columnMajor> > L;
blaze::DynamicMatrix<complex<double>,blaze::rowMajor> Q;
lq( A, L, Q ); // LQ decomposition of A
\endcode
// \n \section matrix_operations_linear_systems Linear Systems
// <hr>
//
// The \c solve() function computes a solution for the given dense linear system of equations (LSE)
// \f$ A*x=b \f$, where \c A is the given system matrix, \c x is the solution vector, and \c b is
// the given dense right-hand side vector:
\code
blaze::DynamicMatrix<double> A; // The square general system matrix
blaze::DynamicVector<double> b; // The right-hand side vector
// ... Resizing and initialization
blaze::DynamicVector<double> x; // The solution vector
solve( A, x, b ); // Computing the solution x
x = solve( A, b ); // Alternative syntax
\endcode
// Alternatively, \c solve() computes a solution for the given dense LSE \f$ A*X=B \f$, where \c A
// is the given dense system matrix, the columns of \c X are the solution vectors, and the columns
// of \c B are the given right-hand side vectors:
\code
blaze::DynamicMatrix<double> A; // The square general system matrix
blaze::DynamicMatrix<double> B; // The right-hand side matrix
// ... Resizing and initialization
blaze::DynamicMatrix<double> X; // The solution matrix
solve( A, X, B ); // Computing the solutions X
X = solve( A, B ); // Alternative syntax
\endcode
// Both \c solve() functions will automatically select the most suited direct solver algorithm
// depending on the size and type of the given system matrix. For small matrices of up to 6x6,
// both functions use manually optimized kernels for maximum performance. For matrices larger
// than 6x6 the computation is performed by means of the most suited LAPACK solver method (see
// \ref lapack_linear_system_solver).
//
// In case the type of the matrix does not provide additional compile time information about
// its structure (symmetric, lower, upper, diagonal, ...), the information can be provided
// manually by means of \ref matrix_operations_declaration_operations when calling the \c solve()
// functions:
\code
blaze::DynamicMatrix<double> A; // The square lower system matrix
blaze::DynamicVector<double> b; // The right-hand side vector
// ... Resizing and initialization
blaze::DynamicVector<double> x; // The solution vector
solve( declsym( A ), x, b ); // Solving the LSE with a symmetric system matrix
solve( declherm( A ), x, b ); // Solving the LSE with an Hermitian system matrix
solve( decllow( A ), x, b ); // Solving the LSE with a lower system matrix
solve( declunilow( A ), x, b ); // Solving the LSE with an unilower system matrix
solve( declupp( A ), x, b ); // Solving the LSE with an upper system matrix
solve( decluniupp( A ), x, b ); // Solving the LSE with an uniupper system matrix
solve( decldiag( A ), x, b ); // Solving the LSE with a diagonal system matrix
\endcode
// For both \c solve() functions the computation fails if ...
//
// - ... the given matrix is not a square matrix;
// - ... the size of the right-hand side vector doesn't match the dimensions of the system matrix;
// - ... the number of rows of the right-hand side matrix doesn't match the dimensions of the system matrix;
// - ... the given matrix is singular and not invertible.
//
// In all failure cases either a compilation error is created if the failure can be predicted at
// compile time or a \c std::invalid_argument exception is thrown.
//
// \note The \c solve() functions can only be used for dense matrices with \c float, \c double,
// \c complex<float> or \c complex<double> element type. The attempt to call the function with
// matrices of any other element type or with a sparse matrix results in a compile time error!
//
// \note The functions may make use of LAPACK kernels. Thus the functions can only be used if a
// fitting LAPACK library is available and linked to the executable. Otherwise a linker error will
// be created.
//
// \note It is not possible to use any kind of view on the expression object returned by the
// two-argument \c solve() function. Also, it is not possible to access individual elements via
// the function call operator on the expression object:
\code
row( solve( A, b ), 2UL ); // Compilation error: Views cannot be used on an solve() expression!
solve( A, b )[2]; // Compilation error: It is not possible to access individual elements!
rows( solve( A, B ), { 2UL, 4UL } ); // Compilation error: Views cannot be used on an solve() expression!
solve( A, B )(1,2); // Compilation error: It is not possible to access individual elements!
\endcode
// \note The \c solve() functions do not provide any exception safety guarantee, i.e. in case an
// exception is thrown the solution vector or matrix may already have been modified.
//
//
// \n \section matrix_operations_eigenvalues Eigenvalues/Eigenvectors
// <hr>
//
// The eigenvalues and eigenvectors of a dense matrix can be computed via the \c eigen() functions.
// The following examples give an impression of the computation of eigenvalues and eigenvectors
// for a general, a symmetric, and an Hermitian matrix:
\code
using blaze::DynamicMatrix;
using blaze::DynamicVector;
using blaze::rowMajor;
using blaze::columnVector;
DynamicMatrix<double,rowMajor> A( 5UL, 5UL ); // The general matrix A
// ... Initialization
DynamicVector<complex<double>,columnVector> w( 5UL ); // The vector for the complex eigenvalues
DynamicMatrix<complex<double>,rowMajor> V( 5UL, 5UL ); // The matrix for the left eigenvectors
w = eigen( A ); // Computing only the eigenvalues of A (one argument)
eigen( A, w ); // Computing only the eigenvalues of A (two arguments)
eigen( A, w, V ); // Computing both the eigenvalues and eigenvectors of A (three arguments)
\endcode
\code
using blaze::SymmetricMatrix;
using blaze::DynamicMatrix;
using blaze::DynamicVector;
using blaze::rowMajor;
using blaze::columnVector;
SymmetricMatrix< DynamicMatrix<double,rowMajor> > A( 5UL ); // The symmetric matrix A
// ... Initialization
DynamicVector<double,columnVector> w( 5UL ); // The vector for the real eigenvalues
DynamicMatrix<double,rowMajor> V( 5UL, 5UL ); // The matrix for the left eigenvectors
w = eigen( A ); // Computing only the eigenvalues of A (one argument)
eigen( A, w ); // Computing only the eigenvalues of A (two arguments)
eigen( A, w, V ); // Computing both the eigenvalues and eigenvectors of A (three arguments)
\endcode
\code
using blaze::HermitianMatrix;
using blaze::DynamicMatrix;
using blaze::DynamicVector;
using blaze::rowMajor;
using blaze::columnVector;
HermitianMatrix< DynamicMatrix<complex<double>,rowMajor> > A( 5UL ); // The Hermitian matrix A
// ... Initialization
DynamicVector<double,columnVector> w( 5UL ); // The vector for the real eigenvalues
DynamicMatrix<complex<double>,rowMajor> V( 5UL, 5UL ); // The matrix for the left eigenvectors
w = eigen( A ); // Computing only the eigenvalues of A (one argument)
eigen( A, w ); // Computing only the eigenvalues of A (two arguments)
eigen( A, w, V ); // Computing both the eigenvalues and eigenvectors of A (three arguments)
\endcode
// The one- and two-argument functions compute only the eigenvalues of the given \a n-by-\a n
// matrix, the three-argument function additionally computes the eigenvectors. The eigenvalues
// are returned in the given vector \a w and the eigenvectors are returned in the given matrix
// \a V, which are both resized to the correct dimensions (if possible and necessary).
//
// Depending on the given matrix type, the resulting eigenvalues are either of floating point
// or complex type: In case the given matrix is either a compile time symmetric matrix with
// floating point elements or an Hermitian matrix with complex elements, the resulting eigenvalues
// will be of floating point type and therefore the elements of the given eigenvalue vector are
// expected to be of floating point type. In all other cases they are expected to be of complex
// type. Please note that for complex eigenvalues no order of eigenvalues can be assumed, except
// that complex conjugate pairs of eigenvalues appear consecutively with the eigenvalue having
// the positive imaginary part first.
//
// In case \a A is a row-major matrix, \a V will contain the left eigenvectors, otherwise \a V
// will contain the right eigenvectors. In case \a V is a row-major matrix the eigenvectors are
// returned in the rows of \a V, in case \a V is a column-major matrix the eigenvectors are
// returned in the columns of \a V. In case the given matrix is a compile time symmetric matrix
// with floating point elements, the resulting eigenvectors will be of floating point type and
// therefore the elements of the given eigenvector matrix are expected to be of floating point
// type. In all other cases they are expected to be of complex type.
//
// The functions fail if ...
//
// - ... the given matrix \a A is not a square matrix;
// - ... the given vector \a w is a fixed size vector and the size doesn't match;
// - ... the given matrix \a V is a fixed size matrix and the dimensions don't match;
// - ... the eigenvalue computation fails.
//
// In all failure cases an exception is thrown.
//
// \note All \c eigen() functions can only be used for dense matrices with \c float, \c double,
// \c complex<float> or \c complex<double> element type. The attempt to call the function with
// matrices of any other element type or with a sparse matrix results in a compile time error!
//
// \note The functions compute the eigenvalues and/or eigenvectors of a dense matrix by means of
// LAPACK kernels. Thus the functions can only be used if a fitting LAPACK library is available
// and linked to the executable. Otherwise a linker error will be created.
//
//
// \n \section matrix_operations_singularvalues Singular Values/Singular Vectors
// <hr>
//
// The singular value decomposition (SVD) of a dense matrix can be computed via the \c svd()
// functions. The following two examples give an impression of the computation of singular values
// and singular vectors for a general dense matrix with \c double and \c complex<double> element
// type, respectively:
\code
using blaze::DynamicMatrix;
using blaze::DynamicVector;
using blaze::rowMajor;
using blaze::columnVector;
DynamicMatrix<double,rowMajor> A( 5UL, 8UL ); // The general matrix A
// ... Initialization
DynamicMatrix<double,rowMajor> U; // The matrix for the left singular vectors
DynamicVector<double,columnVector> s; // The vector for the singular values
DynamicMatrix<double,rowMajor> V; // The matrix for the right singular vectors
s = svd( A ); // (1) Computing only the singular values of A
svd( A, s ); // (2) Computing only the singular values of A
svd( A, U, s, V ); // (3) Computing the singular values and vectors of A
svd( A, s, 0.0, 1.0 ); // (4) Computing all singular values in the floating point range [0.0..1.0)
svd( A, U, s, V, 0, 2 ); // (5) Computing the singular values and vectors in the index range [0..2]
\endcode
\code
using blaze::DynamicMatrix;
using blaze::DynamicVector;
using blaze::rowMajor;
using blaze::columnVector;
DynamicMatrix<complex<double>,rowMajor> A( 5UL, 8UL ); // The general matrix A
// ... Initialization
DynamicMatrix<complex<double>,rowMajor> U; // The matrix for the left singular vectors
DynamicVector<double,columnVector> s; // The vector for the singular values
DynamicMatrix<complex<double>,rowMajor> V; // The matrix for the right singular vectors
s = svd( A ); // (1) Computing only the singular values of A
svd( A, s ); // (2) Computing only the singular values of A
svd( A, U, s, V ); // (3) Computing the singular values and vectors of A
svd( A, s, 0.0, 1.0 ); // (4) Computing all singular values in the floating point range [0.0..1.0)
svd( A, U, s, V, 0, 2 ); // (5) Computing the singular values and vectors in the index range [0..2]
\endcode
// Functions (1), (2) and (4) compute only singular values of the given general \a m-by-\a n
// matrix, functions (3) and (5) additionally compute singular vectors. The resulting singular
// values are returned in the given vector \a s, the left singular vectors are returned in the
// given matrix \a U, and the right singular vectors are returned in the matrix \a V. \a s, \a U,
// and \a V are resized to the correct dimensions (if possible and necessary).
//
// Functions (4) and (5) allow for the specification of a subset of singular values and/or
// vectors. The number of singular values and vectors to be computed is specified by the lower
// bound \a low and the upper bound \a upp, which either form an integral or a floating point
// range.
//
// In case \a low and \a upp form are of integral type, the function computes all singular values
// in the index range \f$[low..upp]\f$. The \a num resulting real and non-negative singular values
// are stored in descending order in the given vector \a s, which is either resized (if possible)
// or expected to be a \a num-dimensional vector. The resulting left singular vectors are stored
// in the given matrix \a U, which is either resized (if possible) or expected to be a
// \a m-by-\a num matrix. The resulting right singular vectors are stored in the given matrix \a V,
// which is either resized (if possible) or expected to be a \a num-by-\a n matrix.
//
// In case \a low and \a upp are of floating point type, the function computes all singular values
// in the half-open interval \f$(low..upp]\f$. The resulting real and non-negative singular values
// are stored in descending order in the given vector \a s, which is either resized (if possible)
// or expected to be a min(\a m,\a n)-dimensional vector. The resulting left singular vectors are
// stored in the given matrix \a U, which is either resized (if possible) or expected to be a
// \a m-by-min(\a m,\a n) matrix. The resulting right singular vectors are stored in the given
// matrix \a V, which is either resized (if possible) or expected to be a min(\a m,\a n)-by-\a n
// matrix.
//
// The functions fail if ...
//
// - ... the given matrix \a U is a fixed size matrix and the dimensions don't match;
// - ... the given vector \a s is a fixed size vector and the size doesn't match;
// - ... the given matrix \a V is a fixed size matrix and the dimensions don't match;
// - ... the given scalar values don't form a proper range;
// - ... the singular value decomposition fails.
//
// In all failure cases an exception is thrown.
//
// \note All \c svd() functions can only be used for dense matrices with \c float, \c double,
// \c complex<float> or \c complex<double> element type. The attempt to call the function with
// matrices of any other element type or with a sparse matrix results in a compile time error!
//
// \note The functions compute the singular values and/or singular vectors of a dense matrix by
// means of LAPACK kernels. Thus the functions can only be used if a fitting LAPACK library is
// available and linked to the executable. Otherwise a linker error will be created.
//
//
// \n Previous: \ref matrix_types Next: \ref adaptors
*/
//*************************************************************************************************
//**Adaptors***************************************************************************************
/*!\page adaptors Adaptors
//
// \tableofcontents
//
//
// \section adaptors_general General Concepts
// <hr>
//
// Adaptors act as wrappers around the general \ref matrix_types. They adapt the interface of the
// matrices such that certain invariants are preserved. Due to this adaptors can provide a compile
// time guarantee of certain properties, which can be exploited for optimized performance.
//
// The \b Blaze library provides a total of 9 different adaptors:
//
// <ul>
// <li> \ref adaptors_symmetric_matrices </li>
// <li> \ref adaptors_hermitian_matrices </li>
// <li> \ref adaptors_triangular_matrices
// <ul>
// <li> \ref adaptors_triangular_matrices "Lower Triangular Matrices"
// <ul>
// <li> \ref adaptors_triangular_matrices_lowermatrix </li>
// <li> \ref adaptors_triangular_matrices_unilowermatrix </li>
// <li> \ref adaptors_triangular_matrices_strictlylowermatrix </li>
// </ul>
// </li>
// <li> \ref adaptors_triangular_matrices "Upper Triangular Matrices"
// <ul>
// <li> \ref adaptors_triangular_matrices_uppermatrix </li>
// <li> \ref adaptors_triangular_matrices_uniuppermatrix </li>
// <li> \ref adaptors_triangular_matrices_strictlyuppermatrix </li>
// </ul>
// </li>
// <li> \ref adaptors_triangular_matrices "Diagonal Matrices"
// <ul>
// <li> \ref adaptors_triangular_matrices_diagonalmatrix </li>
// </ul>
// </li>
// </ul>
// </li>
// </ul>
//
// In combination with the general matrix types, \b Blaze provides a total of 40 different matrix
// types that make it possible to exactly adapt the type of matrix to every specific problem.
//
//
// \n \section adaptors_examples Examples
// <hr>
//
// The following code examples give an impression on the use of adaptors. The first example shows
// the multiplication between two lower matrices:
\code
using blaze::DynamicMatrix;
using blaze::LowerMatrix;
using blaze::rowMajor;
using blaze::columnMajor;
LowerMatrix< DynamicMatrix<double,rowMajor> > A;
LowerMatrix< DynamicMatrix<double,columnMajor> > B;
DynamicMatrix<double,columnMajor> C;
// ... Resizing and initialization
C = A * B;
\endcode
// When multiplying two matrices, at least one of which is triangular, \b Blaze can exploit the
// fact that either the lower or upper part of the matrix contains only default elements and
// restrict the algorithm to the non-zero elements. Thus the adaptor provides a significant
// performance advantage in comparison to a general matrix multiplication, especially for large
// matrices.
//
// The second example shows the \c SymmetricMatrix adaptor in a row-major dense matrix/sparse
// vector multiplication:
\code
using blaze::DynamicMatrix;
using blaze::DynamicVector;
using blaze::CompressedVector;
using blaze::rowMajor;
using blaze::columnVector;
SymmetricMatrix< DynamicMatrix<double,rowMajor> > A;
CompressedVector<double,columnVector> x;
DynamicVector<double,columnVector> y;
// ... Resizing and initialization
y = A * x;
\endcode
// In this example it is not intuitively apparent that using a row-major matrix is not the best
// possible choice in terms of performance since the computation cannot be vectorized. Choosing
// a column-major matrix instead, however, would enable a vectorized computation. Therefore
// \b Blaze exploits the fact that \c A is symmetric, selects the best suited storage order and
// evaluates the multiplication as
\code
y = trans( A ) * x;
\endcode
// which significantly increases the performance.
//
// \n Previous: \ref matrix_operations Next: \ref adaptors_symmetric_matrices
*/
//*************************************************************************************************
//**Symmetric Matrices*****************************************************************************
/*!\page adaptors_symmetric_matrices Symmetric Matrices
//
// \tableofcontents
//
//
// \n \section adaptors_symmetric_matrices_general Symmetric Matrices
// <hr>
//
// In contrast to general matrices, which have no restriction in their number of rows and columns
// and whose elements can have any value, symmetric matrices provide the compile time guarantee
// to be square matrices with pair-wise identical values. Mathematically, this means that a
// symmetric matrix is always equal to its transpose (\f$ A = A^T \f$) and that all non-diagonal
// values have an identical counterpart (\f$ a_{ij} == a_{ji} \f$). This symmetry property can
// be exploited to provide higher efficiency and/or lower memory consumption. Within the \b Blaze
// library, symmetric matrices are realized by the \ref adaptors_symmetric_matrices_symmetricmatrix
// class template.
//
//
// \n \section adaptors_symmetric_matrices_symmetricmatrix SymmetricMatrix
// <hr>
//
// The SymmetricMatrix class template is an adapter for existing dense and sparse matrix types.
// It inherits the properties and the interface of the given matrix type \c MT and extends it
// by enforcing the additional invariant of symmetry (i.e. the matrix is always equal to its
// transpose \f$ A = A^T \f$). It can be included via the header files
\code
#include <blaze/Blaze.h>
// or
#include <blaze/Math.h>
// or
#include <blaze/math/SymmetricMatrix.h>
\endcode
// and forward declared via the header file
\code
#include <blaze/Forward.h>
\endcode
// The type of the adapted matrix can be specified via template parameter:
\code
namespace blaze {
template< typename MT >
class SymmetricMatrix;
} // namespace blaze
\endcode
// \c MT specifies the type of the matrix to be adapted. SymmetricMatrix can be used with any
// non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Note
// that the given matrix type must be either resizable (as for instance blaze::HybridMatrix or
// blaze::DynamicMatrix) or must be square at compile time (as for instance blaze::StaticMatrix).
//
// The following examples give an impression of several possible symmetric matrices:
\code
using blaze::unaligned;
using blaze::unpadded;
using blaze::rowMajor;
using blaze::columnMajor;
// Definition of a 3x3 row-major dense symmetric matrix with static memory
blaze::SymmetricMatrix< blaze::StaticMatrix<int,3UL,3UL,rowMajor> > A;
// Definition of a resizable column-major dense symmetric matrix based on HybridMatrix
blaze::SymmetricMatrix< blaze::HybridMatrix<float,4UL,4UL,columnMajor> B;
// Definition of a resizable row-major dense symmetric matrix based on DynamicMatrix
blaze::SymmetricMatrix< blaze::DynamicMatrix<double,rowMajor> > C;
// Definition of a fixed size row-major dense symmetric matrix based on CustomMatrix
blaze::SymmetricMatrix< blaze::CustomMatrix<double,unaligned,unpadded,rowMajor> > D;
// Definition of a compressed row-major single precision symmetric matrix
blaze::SymmetricMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > E;
\endcode
// The storage order of a symmetric matrix is depending on the storage order of the adapted matrix
// type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified as
// blaze::rowMajor), the symmetric matrix will also be a row-major matrix. Otherwise, if the
// adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the symmetric matrix
// will also be a column-major matrix.
//
//
// \n \section adaptors_symmetric_matrices_special_properties Special Properties of Symmetric Matrices
// <hr>
//
// A symmetric matrix is used exactly like a matrix of the underlying, adapted matrix type \c MT.
// It also provides (nearly) the same interface as the underlying matrix type. However, there are
// some important exceptions resulting from the symmetry constraint:
//
// -# <b>\ref adaptors_symmetric_matrices_square</b>
// -# <b>\ref adaptors_symmetric_matrices_symmetry</b>
// -# <b>\ref adaptors_symmetric_matrices_initialization</b>
//
// \n \subsection adaptors_symmetric_matrices_square Symmetric Matrices Must Always be Square!
//
// In case a resizable matrix is used (as for instance blaze::HybridMatrix, blaze::DynamicMatrix,
// or blaze::CompressedMatrix), this means that the according constructors, the \c resize() and
// the \c extend() functions only expect a single parameter, which specifies both the number of
// rows and columns, instead of two (one for the number of rows and one for the number of columns):
\code
using blaze::DynamicMatrix;
using blaze::SymmetricMatrix;
using blaze::rowMajor;
// Default constructed, default initialized, row-major 3x3 symmetric dynamic matrix
SymmetricMatrix< DynamicMatrix<double,rowMajor> > A( 3 );
// Resizing the matrix to 5x5
A.resize( 5 );
// Extending the number of rows and columns by 2, resulting in a 7x7 matrix
A.extend( 2 );
\endcode
// In case a matrix with a fixed size is used (as for instance blaze::StaticMatrix), the number
// of rows and number of columns must be specified equally:
\code
using blaze::StaticMatrix;
using blaze::SymmetricMatrix;
using blaze::columnMajor;
// Correct setup of a fixed size column-major 3x3 symmetric static matrix
SymmetricMatrix< StaticMatrix<int,3UL,3UL,columnMajor> > A;
// Compilation error: the provided matrix type is not a square matrix type
SymmetricMatrix< StaticMatrix<int,3UL,4UL,columnMajor> > B;
\endcode
// \n \subsection adaptors_symmetric_matrices_symmetry The Symmetric Property is Always Enforced!
//
// This means that modifying the element \f$ a_{ij} \f$ of a symmetric matrix also modifies its
// counterpart element \f$ a_{ji} \f$. Also, it is only possible to assign matrices that are
// symmetric themselves:
\code
using blaze::CompressedMatrix;
using blaze::DynamicMatrix;
using blaze::StaticMatrix;
using blaze::SymmetricMatrix;
using blaze::rowMajor;
// Default constructed, row-major 3x3 symmetric compressed matrix
SymmetricMatrix< CompressedMatrix<double,rowMajor> > A( 3 );
// Initializing three elements via the function call operator
A(0,0) = 1.0; // Initialization of the diagonal element (0,0)
A(0,2) = 2.0; // Initialization of the elements (0,2) and (2,0)
// Inserting three more elements via the insert() function
A.insert( 1, 1, 3.0 ); // Inserting the diagonal element (1,1)
A.insert( 1, 2, 4.0 ); // Inserting the elements (1,2) and (2,1)
// Access via a non-const iterator
*A.begin(1UL) = 10.0; // Modifies both elements (1,0) and (0,1)
// Erasing elements via the erase() function
A.erase( 0, 0 ); // Erasing the diagonal element (0,0)
A.erase( 0, 2 ); // Erasing the elements (0,2) and (2,0)
// Construction from a symmetric dense matrix
StaticMatrix<double,3UL,3UL> B{ { 3.0, 8.0, -2.0 },
{ 8.0, 0.0, -1.0 },
{ -2.0, -1.0, 4.0 } };
SymmetricMatrix< DynamicMatrix<double,rowMajor> > C( B ); // OK
// Assignment of a non-symmetric dense matrix
StaticMatrix<double,3UL,3UL> D{ { 3.0, 7.0, -2.0 },
{ 8.0, 0.0, -1.0 },
{ -2.0, -1.0, 4.0 } };
C = D; // Throws an exception; symmetric invariant would be violated!
\endcode
// The same restriction also applies to the \c append() function for sparse matrices: Appending
// the element \f$ a_{ij} \f$ additionally inserts the element \f$ a_{ji} \f$ into the matrix.
// Despite the additional insertion, the \c append() function still provides the most efficient
// way to set up a symmetric sparse matrix. In order to achieve the maximum efficiency, the
// capacity of the individual rows/columns of the matrix should to be specifically prepared with
// \c reserve() calls:
\code
using blaze::CompressedMatrix;
using blaze::SymmetricMatrix;
using blaze::rowMajor;
// Setup of the symmetric matrix
//
// ( 0 1 3 )
// A = ( 1 2 0 )
// ( 3 0 0 )
//
SymmetricMatrix< CompressedMatrix<double,rowMajor> > A( 3 );
A.reserve( 5 ); // Reserving enough space for 5 non-zero elements
A.reserve( 0, 2 ); // Reserving two non-zero elements in the first row
A.reserve( 1, 2 ); // Reserving two non-zero elements in the second row
A.reserve( 2, 1 ); // Reserving a single non-zero element in the third row
A.append( 0, 1, 1.0 ); // Appending the value 1 at position (0,1) and (1,0)
A.append( 1, 1, 2.0 ); // Appending the value 2 at position (1,1)
A.append( 2, 0, 3.0 ); // Appending the value 3 at position (2,0) and (0,2)
\endcode
// The symmetry property is also enforced for symmetric custom matrices: In case the given array
// of elements does not represent a symmetric matrix, a \c std::invalid_argument exception is
// thrown:
\code
using blaze::CustomMatrix;
using blaze::SymmetricMatrix;
using blaze::unaligned;
using blaze::unpadded;
using blaze::rowMajor;
using CustomSymmetric = SymmetricMatrix< CustomMatrix<double,unaligned,unpadded,rowMajor> >;
// Creating a 3x3 symmetric custom matrix from a properly initialized array
double array[9] = { 1.0, 2.0, 4.0,
2.0, 3.0, 5.0,
4.0, 5.0, 6.0 };
CustomSymmetric A( array, 3UL ); // OK
// Attempt to create a second 3x3 symmetric custom matrix from an uninitialized array
std::unique_ptr<double[]> memory( new double[9UL] );
CustomSymmetric B( memory.get(), 3UL ); // Throws an exception
\endcode
// Finally, the symmetry property is enforced for views (rows, columns, submatrices, ...) on the
// symmetric matrix. The following example demonstrates that modifying the elements of an entire
// row of the symmetric matrix also affects the counterpart elements in the according column of
// the matrix:
\code
using blaze::DynamicMatrix;
using blaze::SymmetricMatrix;
// Setup of the symmetric matrix
//
// ( 0 1 0 2 )
// A = ( 1 3 4 0 )
// ( 0 4 0 5 )
// ( 2 0 5 0 )
//
SymmetricMatrix< DynamicMatrix<int> > A( 4 );
A(0,1) = 1;
A(0,3) = 2;
A(1,1) = 3;
A(1,2) = 4;
A(2,3) = 5;
// Setting all elements in the 1st row to 0 results in the matrix
//
// ( 0 0 0 2 )
// A = ( 0 0 0 0 )
// ( 0 0 0 5 )
// ( 2 0 5 0 )
//
row( A, 1 ) = 0;
\endcode
// The next example demonstrates the (compound) assignment to submatrices of symmetric matrices.
// Since the modification of element \f$ a_{ij} \f$ of a symmetric matrix also modifies the
// element \f$ a_{ji} \f$, the matrix to be assigned must be structured such that the symmetry
// of the symmetric matrix is preserved. Otherwise a \c std::invalid_argument exception is
// thrown:
\code
using blaze::DynamicMatrix;
using blaze::SymmetricMatrix;
// Setup of two default 4x4 symmetric matrices
SymmetricMatrix< DynamicMatrix<int> > A1( 4 ), A2( 4 );
// Setup of the 3x2 dynamic matrix
//
// ( 1 2 )
// B = ( 3 4 )
// ( 5 6 )
//
DynamicMatrix<int> B{ { 1, 2 }, { 3, 4 }, { 5, 6 } };
// OK: Assigning B to a submatrix of A1 such that the symmetry can be preserved
//
// ( 0 0 1 2 )
// A1 = ( 0 0 3 4 )
// ( 1 3 5 6 )
// ( 2 4 6 0 )
//
submatrix( A1, 0UL, 2UL, 3UL, 2UL ) = B; // OK
// Error: Assigning B to a submatrix of A2 such that the symmetry cannot be preserved!
// The elements marked with X cannot be assigned unambiguously!
//
// ( 0 1 2 0 )
// A2 = ( 1 3 X 0 )
// ( 2 X 6 0 )
// ( 0 0 0 0 )
//
submatrix( A2, 0UL, 1UL, 3UL, 2UL ) = B; // Assignment throws an exception!
\endcode
// \n \subsection adaptors_symmetric_matrices_initialization The Elements of a Dense Symmetric Matrix are Always Default Initialized!
//
// Although this results in a small loss of efficiency (especially in case all default values are
// overridden afterwards), this property is important since otherwise the symmetric property of
// dense symmetric matrices could not be guaranteed:
\code
using blaze::DynamicMatrix;
using blaze::SymmetricMatrix;
// Uninitialized, 5x5 row-major dynamic matrix
DynamicMatrix<int,rowMajor> A( 5, 5 );
// Default initialized, 5x5 row-major symmetric dynamic matrix
SymmetricMatrix< DynamicMatrix<int,rowMajor> > B( 5 );
\endcode
// \n \section adaptors_symmetric_matrices_arithmetic_operations Arithmetic Operations
// <hr>
//
// A SymmetricMatrix matrix can participate in numerical operations in any way any other dense
// or sparse matrix can participate. It can also be combined with any other dense or sparse vector
// or matrix. The following code example gives an impression of the use of SymmetricMatrix within
// arithmetic operations:
\code
using blaze::SymmetricMatrix;
using blaze::DynamicMatrix;
using blaze::HybridMatrix;
using blaze::StaticMatrix;
using blaze::CompressedMatrix;
using blaze::rowMajor;
using blaze::columnMajor;
DynamicMatrix<double,rowMajor> A( 3, 3 );
CompressedMatrix<double,rowMajor> B( 3, 3 );
SymmetricMatrix< DynamicMatrix<double,rowMajor> > C( 3 );
SymmetricMatrix< CompressedMatrix<double,rowMajor> > D( 3 );
SymmetricMatrix< HybridMatrix<float,3UL,3UL,rowMajor> > E;
SymmetricMatrix< StaticMatrix<float,3UL,3UL,columnMajor> > F;
E = A + B; // Matrix addition and assignment to a row-major symmetric matrix (includes runtime check)
F = C - D; // Matrix subtraction and assignment to a column-major symmetric matrix (only compile time check)
F = A * D; // Matrix multiplication between a dense and a sparse matrix (includes runtime check)
C *= 2.0; // In-place scaling of matrix C
E = 2.0 * B; // Scaling of matrix B (includes runtime check)
F = C * 2.0; // Scaling of matrix C (only compile time check)
E += A - B; // Addition assignment (includes runtime check)
F -= C + D; // Subtraction assignment (only compile time check)
F *= A * D; // Multiplication assignment (includes runtime check)
\endcode
// Note that it is possible to assign any kind of matrix to a symmetric matrix. In case the matrix
// to be assigned is not symmetric at compile time, a runtime check is performed.
//
//
// \n \section adaptors_symmetric_matrices_block_matrices Symmetric Block Matrices
// <hr>
//
// It is also possible to use symmetric block matrices:
\code
using blaze::CompressedMatrix;
using blaze::StaticMatrix;
using blaze::SymmetricMatrix;
// Definition of a 3x3 symmetric block matrix based on CompressedMatrix
SymmetricMatrix< CompressedMatrix< StaticMatrix<int,3UL,3UL> > > A( 3 );
\endcode
// Also in this case, the SymmetricMatrix class template enforces the invariant of symmetry and
// guarantees that a modifications of element \f$ a_{ij} \f$ of the adapted matrix is also
// applied to element \f$ a_{ji} \f$:
\code
// Inserting the elements (2,4) and (4,2)
A.insert( 2, 4, StaticMatrix<int,3UL,3UL>{ { 1, -4, 5 },
{ 6, 8, -3 },
{ 2, -1, 2 } } );
// Manipulating the elements (2,4) and (4,2)
A(2,4)(1,1) = -5;
\endcode
// For more information on block matrices, see the tutorial on \ref block_vectors_and_matrices.
//
//
// \n \section adaptors_symmetric_matrices_performance Performance Considerations
// <hr>
//
// When the symmetric property of a matrix is known beforehands using the SymmetricMatrix adaptor
// instead of a general matrix can be a considerable performance advantage. The \b Blaze library
// tries to exploit the properties of symmetric matrices whenever possible. However, there are
// also situations when using a symmetric matrix introduces some overhead. The following examples
// demonstrate several situations where symmetric matrices can positively or negatively impact
// performance.
//
// \n \subsection adaptors_symmetric_matrices_matrix_matrix_multiplication Positive Impact: Matrix/Matrix Multiplication
//
// When multiplying two matrices, at least one of which is symmetric, \b Blaze can exploit the fact
// that \f$ A = A^T \f$ and choose the fastest and most suited combination of storage orders for the
// multiplication. The following example demonstrates this by means of a dense matrix/sparse matrix
// multiplication:
\code
using blaze::DynamicMatrix;
using blaze::SymmetricMatrix;
using blaze::rowMajor;
using blaze::columnMajor;
SymmetricMatrix< DynamicMatrix<double,rowMajor> > A;
SymmetricMatrix< CompressedMatrix<double,columnMajor> > B;
DynamicMatrix<double,columnMajor> C;
// ... Resizing and initialization
C = A * B;
\endcode
// Intuitively, the chosen combination of a row-major and a column-major matrix is the most suited
// for maximum performance. However, \b Blaze evaluates the multiplication as
\code
C = A * trans( B );
\endcode
// which significantly increases the performance since in contrast to the original formulation the
// optimized form can be vectorized. Therefore, in the context of matrix multiplications, using the
// SymmetricMatrix adapter is obviously an advantage.
//
// \n \subsection adaptors_symmetric_matrices_matrix_vector_multiplication Positive Impact: Matrix/Vector Multiplication
//
// A similar optimization is possible in case of matrix/vector multiplications:
\code
using blaze::DynamicMatrix;
using blaze::DynamicVector;
using blaze::CompressedVector;
using blaze::rowMajor;
using blaze::columnVector;
SymmetricMatrix< DynamicMatrix<double,rowMajor> > A;
CompressedVector<double,columnVector> x;
DynamicVector<double,columnVector> y;
// ... Resizing and initialization
y = A * x;
\endcode
// In this example it is not intuitively apparent that using a row-major matrix is not the best
// possible choice in terms of performance since the computation cannot be vectorized. Choosing
// a column-major matrix instead, however, would enable a vectorized computation. Therefore
// \b Blaze exploits the fact that \c A is symmetric, selects the best suited storage order and
// evaluates the multiplication as
\code
y = trans( A ) * x;
\endcode
// which also significantly increases the performance.
//
// \n \subsection adaptors_symmetric_matrices_views Positive Impact: Row/Column Views on Column/Row-Major Matrices
//
// Another example is the optimization of a row view on a column-major symmetric matrix:
\code
using blaze::DynamicMatrix;
using blaze::SymmetricMatrix;
using blaze::columnMajor;
SymmetricMatrix< DynamicMatrix<double,columnMajor> > A( 10UL );
auto row5 = row( A, 5UL );
\endcode
// Usually, a row view on a column-major matrix results in a considerable performance decrease in
// comparison to a row view on a row-major matrix due to the non-contiguous storage of the matrix
// elements. However, in case of symmetric matrices, \b Blaze instead uses the according column of
// the matrix, which provides the same performance as if the matrix would be row-major. Note that
// this also works for column views on row-major matrices, where \b Blaze can use the according
// row instead of a column in order to provide maximum performance.
//
// \n \subsection adaptors_symmetric_matrices_assignment Negative Impact: Assignment of a General Matrix
//
// In contrast to using a symmetric matrix on the right-hand side of an assignment (i.e. for read
// access), which introduces absolutely no performance penalty, using a symmetric matrix on the
// left-hand side of an assignment (i.e. for write access) may introduce additional overhead when
// it is assigned a general matrix, which is not symmetric at compile time:
\code
using blaze::DynamicMatrix;
using blaze::SymmetricMatrix;
SymmetricMatrix< DynamicMatrix<double> > A, C;
DynamicMatrix<double> B;
B = A; // Only read-access to the symmetric matrix; no performance penalty
C = A; // Assignment of a symmetric matrix to another symmetric matrix; no runtime overhead
C = B; // Assignment of a general matrix to a symmetric matrix; some runtime overhead
\endcode
// When assigning a general, potentially not symmetric matrix to a symmetric matrix it is necessary
// to check whether the matrix is symmetric at runtime in order to guarantee the symmetry property
// of the symmetric matrix. In case it turns out to be symmetric, it is assigned as efficiently as
// possible, if it is not, an exception is thrown. In order to prevent this runtime overhead it is
// therefore generally advisable to assign symmetric matrices to other symmetric matrices.\n
// In this context it is especially noteworthy that in contrast to additions and subtractions the
// multiplication of two symmetric matrices does not necessarily result in another symmetric matrix:
\code
SymmetricMatrix< DynamicMatrix<double> > A, B, C;
C = A + B; // Results in a symmetric matrix; no runtime overhead
C = A - B; // Results in a symmetric matrix; no runtime overhead
C = A * B; // Is not guaranteed to result in a symmetric matrix; some runtime overhead
\endcode
// \n Previous: \ref adaptors Next: \ref adaptors_hermitian_matrices
*/
//*************************************************************************************************
//**Hermitian Matrices*****************************************************************************
/*!\page adaptors_hermitian_matrices Hermitian Matrices
//
// \tableofcontents
//
//
// \n \section adaptors_hermitian_matrices_general Hermitian Matrices
// <hr>
//
// In addition to symmetric matrices, \b Blaze also provides an adaptor for Hermitian matrices.
// Hermitian matrices provide the compile time guarantee to be square matrices with pair-wise
// conjugate complex values. Mathematically, this means that an Hermitian matrix is always equal
// to its conjugate transpose (\f$ A = \overline{A^T} \f$) and that all non-diagonal values have
// a complex conjugate counterpart (\f$ a_{ij} == \overline{a_{ji}} \f$). Within the \b Blaze
// library, Hermitian matrices are realized by the \ref adaptors_hermitian_matrices_hermitianmatrix
// class template.
//
//
// \n \section adaptors_hermitian_matrices_hermitianmatrix HermitianMatrix
// <hr>
//
// The HermitianMatrix class template is an adapter for existing dense and sparse matrix types.
// It inherits the properties and the interface of the given matrix type \c MT and extends it by
// enforcing the additional invariant of Hermitian symmetry (i.e. the matrix is always equal to
// its conjugate transpose \f$ A = \overline{A^T} \f$). It can be included via the header files
\code
#include <blaze/Blaze.h>
// or
#include <blaze/Math.h>
// or
#include <blaze/math/HermitianMatrix.h>
\endcode
// and forward declared via the header file
\code
#include <blaze/Forward.h>
\endcode
// The type of the adapted matrix can be specified via template parameter:
\code
namespace blaze {
template< typename MT >
class HermitianMatrix;
} // namespace blaze
\endcode
// \c MT specifies the type of the matrix to be adapted. HermitianMatrix can be used with any
// non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Also,
// the given matrix type must have numeric element types (i.e. all integral types except \c bool,
// floating point and complex types). Note that the given matrix type must be either resizable (as
// for instance blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as
// for instance blaze::StaticMatrix).
//
// The following examples give an impression of several possible Hermitian matrices:
\code
using blaze::unaligned;
using blaze::unpadded;
using blaze::rowMajor;
using blaze::columnMajor;
// Definition of a 3x3 row-major dense Hermitian matrix with static memory
blaze::HermitianMatrix< blaze::StaticMatrix<int,3UL,3UL,rowMajor> > A;
// Definition of a resizable column-major dense Hermitian matrix based on HybridMatrix
blaze::HermitianMatrix< blaze::HybridMatrix<float,4UL,4UL,columnMajor> B;
// Definition of a resizable row-major dense Hermitian matrix based on DynamicMatrix
blaze::HermitianMatrix< blaze::DynamicMatrix<std::complex<double>,rowMajor> > C;
// Definition of a fixed size row-major dense Hermitian matrix based on CustomMatrix
blaze::HermitianMatrix< blaze::CustomMatrix<double,unaligned,unpadded,rowMajor> > D;
// Definition of a compressed row-major single precision complex Hermitian matrix
blaze::HermitianMatrix< blaze::CompressedMatrix<std::complex<float>,rowMajor> > E;
\endcode
// The storage order of an Hermitian matrix is depending on the storage order of the adapted matrix
// type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified as
// blaze::rowMajor), the Hermitian matrix will also be a row-major matrix. Otherwise, if the
// adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the Hermitian matrix
// will also be a column-major matrix.
//
//
// \n \section adaptors_hermitian_matrices_vs_symmetric_matrices Hermitian Matrices vs. Symmetric Matrices
//
// The blaze::HermitianMatrix adaptor and the blaze::SymmetricMatrix adaptor share several traits.
// However, there are a couple of differences, both from a mathematical point of view as well as
// from an implementation point of view.
//
// From a mathematical point of view, a matrix is called symmetric when it is equal to its
// transpose (\f$ A = A^T \f$) and it is called Hermitian when it is equal to its conjugate
// transpose (\f$ A = \overline{A^T} \f$). For matrices of real values, however, these two
// conditions coincide, which means that symmetric matrices of real values are also Hermitian
// and Hermitian matrices of real values are also symmetric.
//
// From an implementation point of view, \b Blaze restricts Hermitian matrices to numeric data
// types (i.e. all integral types except \c bool, floating point and complex types), whereas
// symmetric matrices can also be block matrices (i.e. can have vector or matrix elements).
// For built-in element types, the HermitianMatrix adaptor behaves exactly like the according
// SymmetricMatrix implementation. For complex element types, however, the Hermitian property
// is enforced (see also \ref adaptors_hermitian_matrices_hermitian).
\code
using blaze::DynamicMatrix;
using blaze::DynamicVector;
using blaze::HermitianMatrix;
using blaze::SymmetricMatrix;
// The following two matrices provide an identical experience (including performance)
HermitianMatrix< DynamicMatrix<double> > A; // Both Hermitian and symmetric
SymmetricMatrix< DynamicMatrix<double> > B; // Both Hermitian and symmetric
// The following two matrices will behave differently
HermitianMatrix< DynamicMatrix< complex<double> > > C; // Only Hermitian
SymmetricMatrix< DynamicMatrix< complex<double> > > D; // Only symmetric
// Hermitian block matrices are not allowed
HermitianMatrix< DynamicMatrix< DynamicVector<double> > > E; // Compilation error!
SymmetricMatrix< DynamicMatrix< DynamicVector<double> > > F; // Symmetric block matrix
\endcode
// \n \section adaptors_hermitian_matrices_special_properties Special Properties of Hermitian Matrices
// <hr>
//
// An Hermitian matrix is used exactly like a matrix of the underlying, adapted matrix type \c MT.
// It also provides (nearly) the same interface as the underlying matrix type. However, there are
// some important exceptions resulting from the Hermitian symmetry constraint:
//
// -# <b>\ref adaptors_hermitian_matrices_square</b>
// -# <b>\ref adaptors_hermitian_matrices_hermitian</b>
// -# <b>\ref adaptors_hermitian_matrices_initialization</b>
//
// \n \subsection adaptors_hermitian_matrices_square Hermitian Matrices Must Always be Square!
//
// In case a resizable matrix is used (as for instance blaze::HybridMatrix, blaze::DynamicMatrix,
// or blaze::CompressedMatrix), this means that the according constructors, the \c resize() and
// the \c extend() functions only expect a single parameter, which specifies both the number of
// rows and columns, instead of two (one for the number of rows and one for the number of columns):
\code
using blaze::DynamicMatrix;
using blaze::HermitianMatrix;
using blaze::rowMajor;
// Default constructed, default initialized, row-major 3x3 Hermitian dynamic matrix
HermitianMatrix< DynamicMatrix<std::complex<double>,rowMajor> > A( 3 );
// Resizing the matrix to 5x5
A.resize( 5 );
// Extending the number of rows and columns by 2, resulting in a 7x7 matrix
A.extend( 2 );
\endcode
// In case a matrix with a fixed size is used (as for instance blaze::StaticMatrix), the number
// of rows and number of columns must be specified equally:
\code
using blaze::StaticMatrix;
using blaze::HermitianMatrix;
using blaze::columnMajor;
// Correct setup of a fixed size column-major 3x3 Hermitian static matrix
HermitianMatrix< StaticMatrix<std::complex<float>,3UL,3UL,columnMajor> > A;
// Compilation error: the provided matrix type is not a square matrix type
HermitianMatrix< StaticMatrix<std::complex<float>,3UL,4UL,columnMajor> > B;
\endcode
// \n \subsection adaptors_hermitian_matrices_hermitian The Hermitian Property is Always Enforced!
//
// This means that the following properties of an Hermitian matrix are always guaranteed:
//
// - The diagonal elements are real numbers, i.e. the imaginary part is zero
// - Element \f$ a_{ij} \f$ is always the complex conjugate of element \f$ a_{ji} \f$
//
// Thus modifying the element \f$ a_{ij} \f$ of an Hermitian matrix also modifies its
// counterpart element \f$ a_{ji} \f$. Also, it is only possible to assign matrices that
// are Hermitian themselves:
\code
using blaze::CompressedMatrix;
using blaze::DynamicMatrix;
using blaze::StaticMatrix;
using blaze::HermitianMatrix;
using blaze::rowMajor;
using cplx = std::complex<double>;
// Default constructed, row-major 3x3 Hermitian compressed matrix
HermitianMatrix< CompressedMatrix<cplx,rowMajor> > A( 3 );
// Initializing the matrix via the function call operator
//
// ( (1, 0) (0,0) (2,1) )
// ( (0, 0) (0,0) (0,0) )
// ( (2,-1) (0,0) (0,0) )
//
A(0,0) = cplx( 1.0, 0.0 ); // Initialization of the diagonal element (0,0)
A(0,2) = cplx( 2.0, 1.0 ); // Initialization of the elements (0,2) and (2,0)
// Inserting three more elements via the insert() function
//
// ( (1,-3) (0,0) (2, 1) )
// ( (0, 0) (2,0) (4,-2) )
// ( (2,-1) (4,2) (0, 0) )
//
A.insert( 1, 1, cplx( 2.0, 0.0 ) ); // Inserting the diagonal element (1,1)
A.insert( 1, 2, cplx( 4.0, -2.0 ) ); // Inserting the elements (1,2) and (2,1)
// Access via a non-const iterator
//
// ( (1,-3) (8,1) (2, 1) )
// ( (8,-1) (2,0) (4,-2) )
// ( (2,-1) (4,2) (0, 0) )
//
*A.begin(1UL) = cplx( 8.0, -1.0 ); // Modifies both elements (1,0) and (0,1)
// Erasing elements via the erase() function
//
// ( (0, 0) (8,1) (0, 0) )
// ( (8,-1) (2,0) (4,-2) )
// ( (0, 0) (4,2) (0, 0) )
//
A.erase( 0, 0 ); // Erasing the diagonal element (0,0)
A.erase( 0, 2 ); // Erasing the elements (0,2) and (2,0)
// Construction from an Hermitian dense matrix
StaticMatrix<cplx,3UL,3UL> B{ { cplx( 3.0, 0.0 ), cplx( 8.0, 2.0 ), cplx( -2.0, 2.0 ) },
{ cplx( 8.0, 1.0 ), cplx( 0.0, 0.0 ), cplx( -1.0, -1.0 ) },
{ cplx( -2.0, -2.0 ), cplx( -1.0, 1.0 ), cplx( 4.0, 0.0 ) } };
HermitianMatrix< DynamicMatrix<double,rowMajor> > C( B ); // OK
// Assignment of a non-Hermitian dense matrix
StaticMatrix<cplx,3UL,3UL> D{ { cplx( 3.0, 0.0 ), cplx( 7.0, 2.0 ), cplx( 3.0, 2.0 ) },
{ cplx( 8.0, 1.0 ), cplx( 0.0, 0.0 ), cplx( 6.0, 4.0 ) },
{ cplx( -2.0, 2.0 ), cplx( -1.0, 1.0 ), cplx( 4.0, 0.0 ) } };
C = D; // Throws an exception; Hermitian invariant would be violated!
\endcode
// The same restriction also applies to the \c append() function for sparse matrices: Appending
// the element \f$ a_{ij} \f$ additionally inserts the element \f$ a_{ji} \f$ into the matrix.
// Despite the additional insertion, the \c append() function still provides the most efficient
// way to set up an Hermitian sparse matrix. In order to achieve the maximum efficiency, the
// capacity of the individual rows/columns of the matrix should to be specifically prepared with
// \c reserve() calls:
\code
using blaze::CompressedMatrix;
using blaze::HermitianMatrix;
using blaze::rowMajor;
using cplx = std::complex<double>;
// Setup of the Hermitian matrix
//
// ( (0, 0) (1,2) (3,-4) )
// A = ( (1,-2) (2,0) (0, 0) )
// ( (3, 4) (0,0) (0, 0) )
//
HermitianMatrix< CompressedMatrix<cplx,rowMajor> > A( 3 );
A.reserve( 5 ); // Reserving enough space for 5 non-zero elements
A.reserve( 0, 2 ); // Reserving two non-zero elements in the first row
A.reserve( 1, 2 ); // Reserving two non-zero elements in the second row
A.reserve( 2, 1 ); // Reserving a single non-zero element in the third row
A.append( 0, 1, cplx( 1.0, 2.0 ) ); // Appending an element at position (0,1) and (1,0)
A.append( 1, 1, cplx( 2.0, 0.0 ) ); // Appending an element at position (1,1)
A.append( 2, 0, cplx( 3.0, 4.0 ) ); // Appending an element at position (2,0) and (0,2)
\endcode
// The Hermitian property is also enforced for Hermitian custom matrices: In case the given array
// of elements does not represent an Hermitian matrix, a \c std::invalid_argument exception is
// thrown:
\code
using blaze::CustomMatrix;
using blaze::HermitianMatrix;
using blaze::unaligned;
using blaze::unpadded;
using blaze::rowMajor;
using CustomHermitian = HermitianMatrix< CustomMatrix<double,unaligned,unpadded,rowMajor> >;
// Creating a 3x3 Hermitian custom matrix from a properly initialized array
double array[9] = { 1.0, 2.0, 4.0,
2.0, 3.0, 5.0,
4.0, 5.0, 6.0 };
CustomHermitian A( array, 3UL ); // OK
// Attempt to create a second 3x3 Hermitian custom matrix from an uninitialized array
std::unique_ptr<double[]> memory( new double[9UL] );
CustomHermitian B( memory.get(), 3UL ); // Throws an exception
\endcode
// Finally, the Hermitian property is enforced for views (rows, columns, submatrices, ...) on the
// Hermitian matrix. The following example demonstrates that modifying the elements of an entire
// row of the Hermitian matrix also affects the counterpart elements in the according column of
// the matrix:
\code
using blaze::DynamicMatrix;
using blaze::HermtianMatrix;
using cplx = std::complex<double>;
// Setup of the Hermitian matrix
//
// ( (0, 0) (1,-1) (0,0) (2, 1) )
// A = ( (1, 1) (3, 0) (4,2) (0, 0) )
// ( (0, 0) (4,-2) (0,0) (5,-3) )
// ( (2,-1) (0, 0) (5,3) (0, 0) )
//
HermitianMatrix< DynamicMatrix<int> > A( 4 );
A(0,1) = cplx( 1.0, -1.0 );
A(0,3) = cplx( 2.0, 1.0 );
A(1,1) = cplx( 3.0, 0.0 );
A(1,2) = cplx( 4.0, 2.0 );
A(2,3) = cplx( 5.0, 3.0 );
// Setting all elements in the 1st row to 0 results in the matrix
//
// ( (0, 0) (0,0) (0,0) (2, 1) )
// A = ( (0, 0) (0,0) (0,0) (0, 0) )
// ( (0, 0) (0,0) (0,0) (5,-3) )
// ( (2,-1) (0,0) (5,3) (0, 0) )
//
row( A, 1 ) = cplx( 0.0, 0.0 );
\endcode
// The next example demonstrates the (compound) assignment to submatrices of Hermitian matrices.
// Since the modification of element \f$ a_{ij} \f$ of an Hermitian matrix also modifies the
// element \f$ a_{ji} \f$, the matrix to be assigned must be structured such that the Hermitian
// symmetry of the matrix is preserved. Otherwise a \c std::invalid_argument exception is thrown:
\code
using blaze::DynamicMatrix;
using blaze::HermitianMatrix;
std::complex<double> cplx;
// Setup of two default 4x4 Hermitian matrices
HermitianMatrix< DynamicMatrix<cplx> > A1( 4 ), A2( 4 );
// Setup of the 3x2 dynamic matrix
//
// ( (1,-1) (2, 5) )
// B = ( (3, 0) (4,-6) )
// ( (5, 0) (6, 0) )
//
DynamicMatrix<int> B( 3UL, 2UL );
B(0,0) = cplx( 1.0, -1.0 );
B(0,1) = cplx( 2.0, 5.0 );
B(1,0) = cplx( 3.0, 0.0 );
B(1,1) = cplx( 4.0, -6.0 );
B(2,1) = cplx( 5.0, 0.0 );
B(2,2) = cplx( 6.0, 7.0 );
// OK: Assigning B to a submatrix of A1 such that the Hermitian property is preserved
//
// ( (0, 0) (0, 0) (1,-1) (2, 5) )
// A1 = ( (0, 0) (0, 0) (3, 0) (4,-6) )
// ( (1, 1) (3, 0) (5, 0) (6, 0) )
// ( (2,-5) (4, 6) (6, 0) (0, 0) )
//
submatrix( A1, 0UL, 2UL, 3UL, 2UL ) = B; // OK
// Error: Assigning B to a submatrix of A2 such that the Hermitian property isn't preserved!
// The elements marked with X cannot be assigned unambiguously!
//
// ( (0, 0) (1,-1) (2,5) (0,0) )
// A2 = ( (1, 1) (3, 0) (X,X) (0,0) )
// ( (2,-5) (X, X) (6,0) (0,0) )
// ( (0, 0) (0, 0) (0,0) (0,0) )
//
submatrix( A2, 0UL, 1UL, 3UL, 2UL ) = B; // Assignment throws an exception!
\endcode
// \n \subsection adaptors_hermitian_matrices_initialization The Elements of a Dense Hermitian Matrix are Always Default Initialized!
//
// Although this results in a small loss of efficiency (especially in case all default values are
// overridden afterwards), this property is important since otherwise the Hermitian property of
// dense Hermitian matrices could not be guaranteed:
\code
using blaze::DynamicMatrix;
using blaze::HermitianMatrix;
// Uninitialized, 5x5 row-major dynamic matrix
DynamicMatrix<int,rowMajor> A( 5, 5 );
// Default initialized, 5x5 row-major Hermitian dynamic matrix
HermitianMatrix< DynamicMatrix<int,rowMajor> > B( 5 );
\endcode
// \n \section adaptors_hermitian_matrices_arithmetic_operations Arithmetic Operations
// <hr>
//
// An HermitianMatrix can be used within all numerical operations in any way any other dense or
// sparse matrix can be used. It can also be combined with any other dense or sparse vector or
// matrix. The following code example gives an impression of the use of HermitianMatrix within
// arithmetic operations:
\code
using blaze::HermitianMatrix;
using blaze::DynamicMatrix;
using blaze::HybridMatrix;
using blaze::StaticMatrix;
using blaze::CompressedMatrix;
using blaze::rowMajor;
using blaze::columnMajor;
using cplx = complex<float>;
DynamicMatrix<cplx,rowMajor> A( 3, 3 );
CompressedMatrix<cplx,rowMajor> B( 3, 3 );
HermitianMatrix< DynamicMatrix<cplx,rowMajor> > C( 3 );
HermitianMatrix< CompressedMatrix<cplx,rowMajor> > D( 3 );
HermitianMatrix< HybridMatrix<cplx,3UL,3UL,rowMajor> > E;
HermitianMatrix< StaticMatrix<cplx,3UL,3UL,columnMajor> > F;
E = A + B; // Matrix addition and assignment to a row-major Hermitian matrix (includes runtime check)
F = C - D; // Matrix subtraction and assignment to a column-major Hermitian matrix (only compile time check)
F = A * D; // Matrix multiplication between a dense and a sparse matrix (includes runtime check)
C *= 2.0; // In-place scaling of matrix C
E = 2.0 * B; // Scaling of matrix B (includes runtime check)
F = C * 2.0; // Scaling of matrix C (only compile time check)
E += A - B; // Addition assignment (includes runtime check)
F -= C + D; // Subtraction assignment (only compile time check)
F *= A * D; // Multiplication assignment (includes runtime check)
\endcode
// Note that it is possible to assign any kind of matrix to an Hermitian matrix. In case the matrix
// to be assigned is not Hermitian at compile time, a runtime check is performed.
//
//
// \n \section adaptors_hermitian_matrices_performance Performance Considerations
// <hr>
//
// When the Hermitian property of a matrix is known beforehands using the HermitianMatrix adaptor
// instead of a general matrix can be a considerable performance advantage. This is particularly
// true in case the Hermitian matrix is also symmetric (i.e. has built-in element types). The
// \b Blaze library tries to exploit the properties of Hermitian (symmetric) matrices whenever
// possible. However, there are also situations when using an Hermitian matrix introduces some
// overhead. The following examples demonstrate several situations where Hermitian matrices can
// positively or negatively impact performance.
//
// \n \subsection adaptors_hermitian_matrices_matrix_matrix_multiplication Positive Impact: Matrix/Matrix Multiplication
//
// When multiplying two matrices, at least one of which is symmetric, \b Blaze can exploit the fact
// that \f$ A = A^T \f$ and choose the fastest and most suited combination of storage orders for the
// multiplication. The following example demonstrates this by means of a dense matrix/sparse matrix
// multiplication:
\code
using blaze::DynamicMatrix;
using blaze::HermitianMatrix;
using blaze::rowMajor;
using blaze::columnMajor;
HermitianMatrix< DynamicMatrix<double,rowMajor> > A; // Both Hermitian and symmetric
HermitianMatrix< CompressedMatrix<double,columnMajor> > B; // Both Hermitian and symmetric
DynamicMatrix<double,columnMajor> C;
// ... Resizing and initialization
C = A * B;
\endcode
// Intuitively, the chosen combination of a row-major and a column-major matrix is the most suited
// for maximum performance. However, \b Blaze evaluates the multiplication as
\code
C = A * trans( B );
\endcode
// which significantly increases the performance since in contrast to the original formulation the
// optimized form can be vectorized. Therefore, in the context of matrix multiplications, using a
// symmetric matrix is obviously an advantage.
//
// \n \subsection adaptors_hermitian_matrices_matrix_vector_multiplication Positive Impact: Matrix/Vector Multiplication
//
// A similar optimization is possible in case of matrix/vector multiplications:
\code
using blaze::DynamicMatrix;
using blaze::DynamicVector;
using blaze::CompressedVector;
using blaze::HermitianMatrix;
using blaze::rowMajor;
using blaze::columnVector;
HermitianMatrix< DynamicMatrix<double,rowMajor> > A; // Hermitian and symmetric
CompressedVector<double,columnVector> x;
DynamicVector<double,columnVector> y;
// ... Resizing and initialization
y = A * x;
\endcode
// In this example it is not intuitively apparent that using a row-major matrix is not the best
// possible choice in terms of performance since the computation cannot be vectorized. Choosing
// a column-major matrix instead, however, would enable a vectorized computation. Therefore
// \b Blaze exploits the fact that \c A is symmetric, selects the best suited storage order and
// evaluates the multiplication as
\code
y = trans( A ) * x;
\endcode
// which also significantly increases the performance.
//
// \n \subsection adaptors_hermitian_matrices_views Positive Impact: Row/Column Views on Column/Row-Major Matrices
//
// Another example is the optimization of a row view on a column-major symmetric matrix:
\code
using blaze::DynamicMatrix;
using blaze::HermitianMatrix;
using blaze::columnMajor;
HermitianMatrix< DynamicMatrix<double,columnMajor> > A( 10UL ); // Both Hermitian and symmetric
auto row5 = row( A, 5UL );
\endcode
// Usually, a row view on a column-major matrix results in a considerable performance decrease in
// comparison to a row view on a row-major matrix due to the non-contiguous storage of the matrix
// elements. However, in case of symmetric matrices, \b Blaze instead uses the according column of
// the matrix, which provides the same performance as if the matrix would be row-major. Note that
// this also works for column views on row-major matrices, where \b Blaze can use the according
// row instead of a column in order to provide maximum performance.
//
// \n \subsection adaptors_hermitian_matrices_assignment Negative Impact: Assignment of a General Matrix
//
// In contrast to using an Hermitian matrix on the right-hand side of an assignment (i.e. for read
// access), which introduces absolutely no performance penalty, using an Hermitian matrix on the
// left-hand side of an assignment (i.e. for write access) may introduce additional overhead when
// it is assigned a general matrix, which is not Hermitian at compile time:
\code
using blaze::DynamicMatrix;
using blaze::HermitianMatrix;
HermitianMatrix< DynamicMatrix< complex<double> > > A, C;
DynamicMatrix<double> B;
B = A; // Only read-access to the Hermitian matrix; no performance penalty
C = A; // Assignment of an Hermitian matrix to another Hermitian matrix; no runtime overhead
C = B; // Assignment of a general matrix to an Hermitian matrix; some runtime overhead
\endcode
// When assigning a general, potentially not Hermitian matrix to an Hermitian matrix it is necessary
// to check whether the matrix is Hermitian at runtime in order to guarantee the Hermitian property
// of the Hermitian matrix. In case it turns out to be Hermitian, it is assigned as efficiently as
// possible, if it is not, an exception is thrown. In order to prevent this runtime overhead it is
// therefore generally advisable to assign Hermitian matrices to other Hermitian matrices.\n
// In this context it is especially noteworthy that in contrast to additions and subtractions the
// multiplication of two Hermitian matrices does not necessarily result in another Hermitian matrix:
\code
HermitianMatrix< DynamicMatrix<double> > A, B, C;
C = A + B; // Results in an Hermitian matrix; no runtime overhead
C = A - B; // Results in an Hermitian matrix; no runtime overhead
C = A * B; // Is not guaranteed to result in an Hermitian matrix; some runtime overhead
\endcode
// \n Previous: \ref adaptors_symmetric_matrices Next: \ref adaptors_triangular_matrices
*/
//*************************************************************************************************
//**Triangular Matrices****************************************************************************
/*!\page adaptors_triangular_matrices Triangular Matrices
//
// \tableofcontents
//
//
// \n \section adaptors_triangular_matrices_general Triangular Matrices
// <hr>
//
// Triangular matrices come in three flavors: Lower triangular matrices provide the compile time
// guarantee to be square matrices and that the upper part of the matrix contains only default
// elements that cannot be modified. Upper triangular matrices on the other hand provide the
// compile time guarantee to be square and that the lower part of the matrix contains only fixed
// default elements. Finally, diagonal matrices provide the compile time guarantee to be square
// and that both the lower and upper part of the matrix contain only immutable default elements.
// These properties can be exploited to gain higher performance and/or to save memory. Within the
// \b Blaze library, several kinds of lower and upper triangular and diagonal matrices are realized
// by the following class templates:
//
// Lower triangular matrices:
// - <b>\ref adaptors_triangular_matrices_lowermatrix</b>
// - <b>\ref adaptors_triangular_matrices_unilowermatrix</b>
// - <b>\ref adaptors_triangular_matrices_strictlylowermatrix</b>
//
// Upper triangular matrices:
// - <b>\ref adaptors_triangular_matrices_uppermatrix</b>
// - <b>\ref adaptors_triangular_matrices_uniuppermatrix</b>
// - <b>\ref adaptors_triangular_matrices_strictlyuppermatrix</b>
//
// Diagonal matrices
// - <b>\ref adaptors_triangular_matrices_diagonalmatrix</b>
//
//
// \n \section adaptors_triangular_matrices_lowermatrix LowerMatrix
// <hr>
//
// The blaze::LowerMatrix class template is an adapter for existing dense and sparse matrix types.
// It inherits the properties and the interface of the given matrix type \c MT and extends it by
// enforcing the additional invariant that all matrix elements above the diagonal are 0 (lower
// triangular matrix):
\f[\left(\begin{array}{*{5}{c}}
l_{0,0} & 0 & 0 & \cdots & 0 \\
l_{1,0} & l_{1,1} & 0 & \cdots & 0 \\
l_{2,0} & l_{2,1} & l_{2,2} & \cdots & 0 \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
l_{N,0} & l_{N,1} & l_{N,2} & \cdots & l_{N,N} \\
\end{array}\right).\f]
// It can be included via the header files
\code
#include <blaze/Blaze.h>
// or
#include <blaze/Math.h>
// or
#include <blaze/math/LowerMatrix.h>
\endcode
// and forward declared via the header file
\code
#include <blaze/Forward.h>
\endcode
// The type of the adapted matrix can be specified via the first template parameter:
\code
namespace blaze {
template< typename MT >
class LowerMatrix;
} // namespace blaze
\endcode
// \c MT specifies the type of the matrix to be adapted. blaze::LowerMatrix can be used with any
// non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Note
// that the given matrix type must be either resizable (as for instance blaze::HybridMatrix or
// blaze::DynamicMatrix) or must be square at compile time (as for instance blaze::StaticMatrix).
//
// The following examples give an impression of several possible lower matrices:
\code
using blaze::unaligned;
using blaze::unpadded;
using blaze::rowMajor;
using blaze::columnMajor;
// Definition of a 3x3 row-major dense lower matrix with static memory
blaze::LowerMatrix< blaze::StaticMatrix<int,3UL,3UL,rowMajor> > A;
// Definition of a resizable column-major dense lower matrix based on HybridMatrix
blaze::LowerMatrix< blaze::HybridMatrix<float,4UL,4UL,columnMajor> B;
// Definition of a resizable row-major dense lower matrix based on DynamicMatrix
blaze::LowerMatrix< blaze::DynamicMatrix<double,rowMajor> > C;
// Definition of a fixed size row-major dense lower matrix based on CustomMatrix
blaze::LowerMatrix< blaze::CustomMatrix<double,unaligned,unpadded,rowMajor> > D;
// Definition of a compressed row-major single precision lower matrix
blaze::LowerMatrix< blaze::CompressedMatrix<float,rowMajor> > E;
\endcode
// The storage order of a lower matrix is depending on the storage order of the adapted matrix
// type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified
// as blaze::rowMajor), the lower matrix will also be a row-major matrix. Otherwise, if the
// adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the lower matrix
// will also be a column-major matrix.
//
//
// \n \section adaptors_triangular_matrices_unilowermatrix UniLowerMatrix
// <hr>
//
// The blaze::UniLowerMatrix class template is an adapter for existing dense and sparse matrix
// types. It inherits the properties and the interface of the given matrix type \c MT and extends
// it by enforcing the additional invariant that all diagonal matrix elements are 1 and all matrix
// elements above the diagonal are 0 (lower unitriangular matrix):
\f[\left(\begin{array}{*{5}{c}}
1 & 0 & 0 & \cdots & 0 \\
l_{1,0} & 1 & 0 & \cdots & 0 \\
l_{2,0} & l_{2,1} & 1 & \cdots & 0 \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
l_{N,0} & l_{N,1} & l_{N,2} & \cdots & 1 \\
\end{array}\right).\f]
// It can be included via the header files
\code
#include <blaze/Blaze.h>
// or
#include <blaze/Math.h>
// or
#include <blaze/math/UniLowerMatrix.h>
\endcode
// and forward declared via the header file
\code
#include <blaze/Forward.h>
\endcode
// The type of the adapted matrix can be specified via the first template parameter:
\code
namespace blaze {
template< typename MT >
class UniLowerMatrix;
} // namespace blaze
\endcode
// \c MT specifies the type of the matrix to be adapted. blaze::UniLowerMatrix can be used with any
// non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Also,
// the given matrix type must have numeric element types (i.e. all integral types except \c bool,
// floating point and complex types). Note that the given matrix type must be either resizable (as
// for instance blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as
// for instance blaze::StaticMatrix).
//
// The following examples give an impression of several possible lower unitriangular matrices:
\code
// Definition of a 3x3 row-major dense unilower matrix with static memory
blaze::UniLowerMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A;
// Definition of a resizable column-major dense unilower matrix based on HybridMatrix
blaze::UniLowerMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B;
// Definition of a resizable row-major dense unilower matrix based on DynamicMatrix
blaze::UniLowerMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C;
// Definition of a compressed row-major single precision unilower matrix
blaze::UniLowerMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D;
\endcode
// The storage order of a lower unitriangular matrix is depending on the storage order of the
// adapted matrix type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e.
// is specified as blaze::rowMajor), the unilower matrix will also be a row-major matrix.
// Otherwise if the adapted matrix is column-major (i.e. is specified as blaze::columnMajor),
// the unilower matrix will also be a column-major matrix.
//
//
// \n \section adaptors_triangular_matrices_strictlylowermatrix StrictlyLowerMatrix
// <hr>
//
// The blaze::StrictlyLowerMatrix class template is an adapter for existing dense and sparse matrix
// types. It inherits the properties and the interface of the given matrix type \c MT and extends
// it by enforcing the additional invariant that all diagonal matrix elements and all matrix
// elements above the diagonal are 0 (strictly lower triangular matrix):
\f[\left(\begin{array}{*{5}{c}}
0 & 0 & 0 & \cdots & 0 \\
l_{1,0} & 0 & 0 & \cdots & 0 \\
l_{2,0} & l_{2,1} & 0 & \cdots & 0 \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
l_{N,0} & l_{N,1} & l_{N,2} & \cdots & 0 \\
\end{array}\right).\f]
// It can be included via the header files
\code
#include <blaze/Blaze.h>
// or
#include <blaze/Math.h>
// or
#include <blaze/math/StrictlyLowerMatrix.h>
\endcode
// and forward declared via the header file
\code
#include <blaze/Forward.h>
\endcode
// The type of the adapted matrix can be specified via the first template parameter:
\code
namespace blaze {
template< typename MT >
class StrictlyLowerMatrix;
} // namespace blaze
\endcode
// \c MT specifies the type of the matrix to be adapted. blaze::StrictlyLowerMatrix can be used
// with any non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix
// type. Note that the given matrix type must be either resizable (as for instance
// blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as for instance
// blaze::StaticMatrix).
//
// The following examples give an impression of several possible strictly lower triangular matrices:
\code
// Definition of a 3x3 row-major dense strictly lower matrix with static memory
blaze::StrictlyLowerMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A;
// Definition of a resizable column-major dense strictly lower matrix based on HybridMatrix
blaze::StrictlyLowerMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B;
// Definition of a resizable row-major dense strictly lower matrix based on DynamicMatrix
blaze::StrictlyLowerMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C;
// Definition of a compressed row-major single precision strictly lower matrix
blaze::StrictlyLowerMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D;
\endcode
// The storage order of a strictly lower triangular matrix is depending on the storage order of
// the adapted matrix type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e.
// is specified as blaze::rowMajor), the strictly lower matrix will also be a row-major matrix.
// Otherwise if the adapted matrix is column-major (i.e. is specified as blaze::columnMajor),
// the strictly lower matrix will also be a column-major matrix.
//
//
// \n \section adaptors_triangular_matrices_uppermatrix UpperMatrix
// <hr>
//
// The blaze::UpperMatrix class template is an adapter for existing dense and sparse matrix types.
// It inherits the properties and the interface of the given matrix type \c MT and extends it by
// enforcing the additional invariant that all matrix elements below the diagonal are 0 (upper
// triangular matrix):
\f[\left(\begin{array}{*{5}{c}}
u_{0,0} & u_{0,1} & u_{0,2} & \cdots & u_{0,N} \\
0 & u_{1,1} & u_{1,2} & \cdots & u_{1,N} \\
0 & 0 & u_{2,2} & \cdots & u_{2,N} \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
0 & 0 & 0 & \cdots & u_{N,N} \\
\end{array}\right).\f]
// It can be included via the header files
\code
#include <blaze/Blaze.h>
// or
#include <blaze/Math.h>
// or
#include <blaze/math/UpperMatrix.h>
\endcode
// and forward declared via the header file
\code
#include <blaze/Forward.h>
\endcode
// The type of the adapted matrix can be specified via the first template parameter:
\code
namespace blaze {
template< typename MT >
class UpperMatrix;
} // namespace blaze
\endcode
// \c MT specifies the type of the matrix to be adapted. blaze::UpperMatrix can be used with any
// non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Note
// that the given matrix type must be either resizable (as for instance blaze::HybridMatrix or
// blaze::DynamicMatrix) or must be square at compile time (as for instance blaze::StaticMatrix).
//
// The following examples give an impression of several possible upper matrices:
\code
// Definition of a 3x3 row-major dense upper matrix with static memory
blaze::UpperMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A;
// Definition of a resizable column-major dense upper matrix based on HybridMatrix
blaze::UpperMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B;
// Definition of a resizable row-major dense upper matrix based on DynamicMatrix
blaze::UpperMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C;
// Definition of a compressed row-major single precision upper matrix
blaze::UpperMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D;
\endcode
// The storage order of an upper matrix is depending on the storage order of the adapted matrix
// type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified
// as blaze::rowMajor), the upper matrix will also be a row-major matrix. Otherwise, if the
// adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the upper matrix
// will also be a column-major matrix.
//
//
// \n \section adaptors_triangular_matrices_uniuppermatrix UniUpperMatrix
// <hr>
//
// The blaze::UniUpperMatrix class template is an adapter for existing dense and sparse matrix
// types. It inherits the properties and the interface of the given matrix type \c MT and extends
// it by enforcing the additional invariant that all diagonal matrix elements are 1 and all matrix
// elements below the diagonal are 0 (upper unitriangular matrix):
\f[\left(\begin{array}{*{5}{c}}
1 & u_{0,1} & u_{0,2} & \cdots & u_{0,N} \\
0 & 1 & u_{1,2} & \cdots & u_{1,N} \\
0 & 0 & 1 & \cdots & u_{2,N} \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
0 & 0 & 0 & \cdots & 1 \\
\end{array}\right).\f]
// It can be included via the header files
\code
#include <blaze/Blaze.h>
// or
#include <blaze/Math.h>
// or
#include <blaze/math/UniUpperMatrix.h>
\endcode
// and forward declared via the header file
\code
#include <blaze/Forward.h>
\endcode
// The type of the adapted matrix can be specified via the first template parameter:
\code
namespace blaze {
template< typename MT >
class UniUpperMatrix;
} // namespace blaze
\endcode
// \c MT specifies the type of the matrix to be adapted. blaze::UniUpperMatrix can be used with any
// non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Also,
// the given matrix type must have numeric element types (i.e. all integral types except \c bool,
// floating point and complex types). Note that the given matrix type must be either resizable (as
// for instance blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as
// for instance blaze::StaticMatrix).
//
// The following examples give an impression of several possible upper unitriangular matrices:
\code
// Definition of a 3x3 row-major dense uniupper matrix with static memory
blaze::UniUpperMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A;
// Definition of a resizable column-major dense uniupper matrix based on HybridMatrix
blaze::UniUpperMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B;
// Definition of a resizable row-major dense uniupper matrix based on DynamicMatrix
blaze::UniUpperMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C;
// Definition of a compressed row-major single precision uniupper matrix
blaze::UniUpperMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D;
\endcode
// The storage order of an upper unitriangular matrix is depending on the storage order of the
// adapted matrix type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e.
// is specified as blaze::rowMajor), the uniupper matrix will also be a row-major matrix.
// Otherwise, if the adapted matrix is column-major (i.e. is specified as blaze::columnMajor),
// the uniupper matrix will also be a column-major matrix.
//
//
// \n \section adaptors_triangular_matrices_strictlyuppermatrix StrictlyUpperMatrix
// <hr>
//
// The blaze::StrictlyUpperMatrix class template is an adapter for existing dense and sparse matrix
// types. It inherits the properties and the interface of the given matrix type \c MT and extends
// it by enforcing the additional invariant that all diagonal matrix elements and all matrix
// elements below the diagonal are 0 (strictly upper triangular matrix):
\f[\left(\begin{array}{*{5}{c}}
0 & u_{0,1} & u_{0,2} & \cdots & u_{0,N} \\
0 & 0 & u_{1,2} & \cdots & u_{1,N} \\
0 & 0 & 0 & \cdots & u_{2,N} \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
0 & 0 & 0 & \cdots & 0 \\
\end{array}\right).\f]
// It can be included via the header files
\code
#include <blaze/Blaze.h>
// or
#include <blaze/Math.h>
// or
#include <blaze/math/StrictlyUpperMatrix.h>
\endcode
// and forward declared via the header file
\code
#include <blaze/Forward.h>
\endcode
// The type of the adapted matrix can be specified via the first template parameter:
\code
namespace blaze {
template< typename MT >
class StrictlyUpperMatrix;
} // namespace blaze
\endcode
// \c MT specifies the type of the matrix to be adapted. blaze::StrictlyUpperMatrix can be used
// with any non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix
// type. Note that the given matrix type must be either resizable (as for instance
// blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as for instance
// blaze::StaticMatrix).
//
// The following examples give an impression of several possible strictly upper triangular matrices:
\code
// Definition of a 3x3 row-major dense strictly upper matrix with static memory
blaze::StrictlyUpperMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A;
// Definition of a resizable column-major dense strictly upper matrix based on HybridMatrix
blaze::StrictlyUpperMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B;
// Definition of a resizable row-major dense strictly upper matrix based on DynamicMatrix
blaze::StrictlyUpperMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C;
// Definition of a compressed row-major single precision strictly upper matrix
blaze::StrictlyUpperMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D;
\endcode
// The storage order of a strictly upper triangular matrix is depending on the storage order of
// the adapted matrix type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e.
// is specified as blaze::rowMajor), the strictly upper matrix will also be a row-major matrix.
// Otherwise, if the adapted matrix is column-major (i.e. is specified as blaze::columnMajor),
// the strictly upper matrix will also be a column-major matrix.
//
//
// \n \section adaptors_triangular_matrices_diagonalmatrix DiagonalMatrix
// <hr>
//
// The blaze::DiagonalMatrix class template is an adapter for existing dense and sparse matrix
// types. It inherits the properties and the interface of the given matrix type \c MT and extends
// it by enforcing the additional invariant that all matrix elements above and below the diagonal
// are 0 (diagonal matrix):
\f[\left(\begin{array}{*{5}{c}}
l_{0,0} & 0 & 0 & \cdots & 0 \\
0 & l_{1,1} & 0 & \cdots & 0 \\
0 & 0 & l_{2,2} & \cdots & 0 \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
0 & 0 & 0 & \cdots & l_{N,N} \\
\end{array}\right).\f]
// It can be included via the header files
\code
#include <blaze/Blaze.h>
// or
#include <blaze/Math.h>
// or
#include <blaze/math/DiagonalMatrix.h>
\endcode
// and forward declared via the header file
\code
#include <blaze/Forward.h>
\endcode
// The type of the adapted matrix can be specified via the first template parameter:
\code
namespace blaze {
template< typename MT >
class DiagonalMatrix;
} // namespace blaze
\endcode
// \c MT specifies the type of the matrix to be adapted. blaze::DiagonalMatrix can be used with any
// non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Note
// that the given matrix type must be either resizable (as for instance blaze::HybridMatrix or
// blaze::DynamicMatrix) or must be square at compile time (as for instance blaze::StaticMatrix).
//
// The following examples give an impression of several possible diagonal matrices:
\code
// Definition of a 3x3 row-major dense diagonal matrix with static memory
blaze::DiagonalMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A;
// Definition of a resizable column-major dense diagonal matrix based on HybridMatrix
blaze::DiagonalMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B;
// Definition of a resizable row-major dense diagonal matrix based on DynamicMatrix
blaze::DiagonalMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C;
// Definition of a compressed row-major single precision diagonal matrix
blaze::DiagonalMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D;
\endcode
// The storage order of a diagonal matrix is depending on the storage order of the adapted matrix
// type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified
// as blaze::rowMajor), the diagonal matrix will also be a row-major matrix. Otherwise, if the
// adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the diagonal matrix
// will also be a column-major matrix.
//
//
// \n \section adaptors_triangular_matrices_special_properties Special Properties of Triangular Matrices
// <hr>
//
// A triangular matrix is used exactly like a matrix of the underlying, adapted matrix type \c MT.
// It also provides (nearly) the same interface as the underlying matrix type. However, there are
// some important exceptions resulting from the triangular matrix constraint:
//
// -# <b>\ref adaptors_triangular_matrices_square</b>
// -# <b>\ref adaptors_triangular_matrices_triangular</b>
// -# <b>\ref adaptors_triangular_matrices_initialization</b>
// -# <b>\ref adaptors_triangular_matrices_storage</b>
// -# <b>\ref adaptors_triangular_matrices_scaling</b>
//
// \n \subsection adaptors_triangular_matrices_square Triangular Matrices Must Always be Square!
//
// In case a resizable matrix is used (as for instance blaze::HybridMatrix, blaze::DynamicMatrix,
// or blaze::CompressedMatrix), this means that the according constructors, the \c resize() and
// the \c extend() functions only expect a single parameter, which specifies both the number of
// rows and columns, instead of two (one for the number of rows and one for the number of columns):
\code
using blaze::DynamicMatrix;
using blaze::LowerMatrix;
using blaze::rowMajor;
// Default constructed, default initialized, row-major 3x3 lower dynamic matrix
LowerMatrix< DynamicMatrix<double,rowMajor> > A( 3 );
// Resizing the matrix to 5x5
A.resize( 5 );
// Extending the number of rows and columns by 2, resulting in a 7x7 matrix
A.extend( 2 );
\endcode
// In case a matrix with a fixed size is used (as for instance blaze::StaticMatrix), the number
// of rows and number of columns must be specified equally:
\code
using blaze::StaticMatrix;
using blaze::LowerMatrix;
using blaze::columnMajor;
// Correct setup of a fixed size column-major 3x3 lower static matrix
LowerMatrix< StaticMatrix<int,3UL,3UL,columnMajor> > A;
// Compilation error: the provided matrix type is not a square matrix type
LowerMatrix< StaticMatrix<int,3UL,4UL,columnMajor> > B;
\endcode
// \n \subsection adaptors_triangular_matrices_triangular The Triangular Property is Always Enforced!
//
// This means that it is only allowed to modify elements in the lower part or the diagonal of
// a lower triangular matrix and in the upper part or the diagonal of an upper triangular matrix.
// Unitriangular and strictly triangular matrices are even more restrictive and don't allow the
// modification of diagonal elements. Also, triangular matrices can only be assigned matrices that
// don't violate their triangular property. The following example demonstrates this restriction
// by means of the blaze::LowerMatrix adaptor. For examples with other triangular matrix types
// see the according class documentations.
\code
using blaze::CompressedMatrix;
using blaze::DynamicMatrix;
using blaze::StaticMatrix;
using blaze::LowerMatrix;
using blaze::rowMajor;
using CompressedLower = LowerMatrix< CompressedMatrix<double,rowMajor> >;
// Default constructed, row-major 3x3 lower compressed matrix
CompressedLower A( 3 );
// Initializing elements via the function call operator
A(0,0) = 1.0; // Initialization of the diagonal element (0,0)
A(2,0) = 2.0; // Initialization of the lower element (2,0)
A(1,2) = 9.0; // Throws an exception; invalid modification of upper element
// Inserting two more elements via the insert() function
A.insert( 1, 0, 3.0 ); // Inserting the lower element (1,0)
A.insert( 2, 1, 4.0 ); // Inserting the lower element (2,1)
A.insert( 0, 2, 9.0 ); // Throws an exception; invalid insertion of upper element
// Appending an element via the append() function
A.reserve( 1, 3 ); // Reserving enough capacity in row 1
A.append( 1, 1, 5.0 ); // Appending the diagonal element (1,1)
A.append( 1, 2, 9.0 ); // Throws an exception; appending an element in the upper part
// Access via a non-const iterator
CompressedLower::Iterator it = A.begin(1);
*it = 6.0; // Modifies the lower element (1,0)
++it;
*it = 9.0; // Modifies the diagonal element (1,1)
// Erasing elements via the erase() function
A.erase( 0, 0 ); // Erasing the diagonal element (0,0)
A.erase( 2, 0 ); // Erasing the lower element (2,0)
// Construction from a lower dense matrix
StaticMatrix<double,3UL,3UL> B{ { 3.0, 0.0, 0.0 },
{ 8.0, 0.0, 0.0 },
{ -2.0, -1.0, 4.0 } };
LowerMatrix< DynamicMatrix<double,rowMajor> > C( B ); // OK
// Assignment of a non-lower dense matrix
StaticMatrix<double,3UL,3UL> D{ { 3.0, 0.0, -2.0 },
{ 8.0, 0.0, 0.0 },
{ -2.0, -1.0, 4.0 } };
C = D; // Throws an exception; lower matrix invariant would be violated!
\endcode
// The triangular property is also enforced during the construction of triangular custom matrices:
// In case the given array of elements does not represent the according triangular matrix type, a
// \c std::invalid_argument exception is thrown:
\code
using blaze::CustomMatrix;
using blaze::LowerMatrix;
using blaze::unaligned;
using blaze::unpadded;
using blaze::rowMajor;
using CustomLower = LowerMatrix< CustomMatrix<double,unaligned,unpadded,rowMajor> >;
// Creating a 3x3 lower custom matrix from a properly initialized array
double array[9] = { 1.0, 0.0, 0.0,
2.0, 3.0, 0.0,
4.0, 5.0, 6.0 };
CustomLower A( array, 3UL ); // OK
// Attempt to create a second 3x3 lower custom matrix from an uninitialized array
std::unique_ptr<double[]> memory( new double[9UL] );
CustomLower B( memory.get(), 3UL ); // Throws an exception
\endcode
// Finally, the triangular matrix property is enforced for views (rows, columns, submatrices, ...)
// on the triangular matrix. The following example demonstrates that modifying the elements of an
// entire row and submatrix of a lower matrix only affects the lower and diagonal matrix elements.
// Again, this example uses blaze::LowerMatrix, for examples with other triangular matrix types
// see the according class documentations.
\code
using blaze::DynamicMatrix;
using blaze::LowerMatrix;
// Setup of the lower matrix
//
// ( 0 0 0 0 )
// A = ( 1 2 0 0 )
// ( 0 3 0 0 )
// ( 4 0 5 0 )
//
LowerMatrix< DynamicMatrix<int> > A( 4 );
A(1,0) = 1;
A(1,1) = 2;
A(2,1) = 3;
A(3,0) = 4;
A(3,2) = 5;
// Setting the lower and diagonal elements in the 2nd row to 9 results in the matrix
//
// ( 0 0 0 0 )
// A = ( 1 2 0 0 )
// ( 9 9 9 0 )
// ( 4 0 5 0 )
//
row( A, 2 ) = 9;
// Setting the lower and diagonal elements in the 1st and 2nd column to 7 results in
//
// ( 0 0 0 0 )
// A = ( 1 7 0 0 )
// ( 9 7 7 0 )
// ( 4 7 7 0 )
//
submatrix( A, 0, 1, 4, 2 ) = 7;
\endcode
// The next example demonstrates the (compound) assignment to rows/columns and submatrices of
// triangular matrices. Since only lower/upper and potentially diagonal elements may be modified
// the matrix to be assigned must be structured such that the triangular matrix invariant of the
// matrix is preserved. Otherwise a \c std::invalid_argument exception is thrown:
\code
using blaze::DynamicMatrix;
using blaze::DynamicVector;
using blaze::LowerMatrix;
using blaze::rowVector;
// Setup of two default 4x4 lower matrices
LowerMatrix< DynamicMatrix<int> > A1( 4 ), A2( 4 );
// Setup of a 4-dimensional vector
//
// v = ( 1 2 3 0 )
//
DynamicVector<int,rowVector> v{ 1, 2, 3, 0 };
// OK: Assigning v to the 2nd row of A1 preserves the lower matrix invariant
//
// ( 0 0 0 0 )
// A1 = ( 0 0 0 0 )
// ( 1 2 3 0 )
// ( 0 0 0 0 )
//
row( A1, 2 ) = v; // OK
// Error: Assigning v to the 1st row of A1 violates the lower matrix invariant! The element
// marked with X cannot be assigned and triggers an exception.
//
// ( 0 0 0 0 )
// A1 = ( 1 2 X 0 )
// ( 1 2 3 0 )
// ( 0 0 0 0 )
//
row( A1, 1 ) = v; // Assignment throws an exception!
// Setup of the 3x2 dynamic matrix
//
// ( 0 0 )
// B = ( 7 0 )
// ( 8 9 )
//
DynamicMatrix<int> B( 3UL, 2UL, 0 );
B(1,0) = 7;
B(2,0) = 8;
B(2,1) = 9;
// OK: Assigning B to a submatrix of A2 such that the lower matrix invariant can be preserved
//
// ( 0 0 0 0 )
// A2 = ( 0 7 0 0 )
// ( 0 8 9 0 )
// ( 0 0 0 0 )
//
submatrix( A2, 0UL, 1UL, 3UL, 2UL ) = B; // OK
// Error: Assigning B to a submatrix of A2 such that the lower matrix invariant cannot be
// preserved! The elements marked with X cannot be assigned without violating the invariant!
//
// ( 0 0 0 0 )
// A2 = ( 0 7 X 0 )
// ( 0 8 8 X )
// ( 0 0 0 0 )
//
submatrix( A2, 0UL, 2UL, 3UL, 2UL ) = B; // Assignment throws an exception!
\endcode
// \n \subsection adaptors_triangular_matrices_initialization The Elements of a Dense Triangular Matrix are Always Default Initialized!
//
// Although this results in a small loss of efficiency during the creation of a dense lower or
// upper matrix this initialization is important since otherwise the lower/upper matrix property
// of dense lower matrices would not be guaranteed:
\code
using blaze::DynamicMatrix;
using blaze::LowerMatrix;
using blaze::UpperMatrix;
// Uninitialized, 5x5 row-major dynamic matrix
DynamicMatrix<int,rowMajor> A( 5, 5 );
// 5x5 row-major lower dynamic matrix with default initialized upper matrix
LowerMatrix< DynamicMatrix<int,rowMajor> > B( 5 );
// 7x7 column-major upper dynamic matrix with default initialized lower matrix
UpperMatrix< DynamicMatrix<int,columnMajor> > C( 7 );
// 3x3 row-major diagonal dynamic matrix with default initialized lower and upper matrix
DiagonalMatrix< DynamicMatrix<int,rowMajor> > D( 3 );
\endcode
// \n \subsection adaptors_triangular_matrices_storage Dense Triangular Matrices Store All Elements!
//
// All dense triangular matrices store all \f$ N \times N \f$ elements, including the immutable
// elements in the lower or upper part, respectively. Therefore dense triangular matrices don't
// provide any kind of memory reduction! There are two main reasons for this: First, storing also
// the zero elements guarantees maximum performance for many algorithms that perform vectorized
// operations on the triangular matrices, which is especially true for small dense matrices.
// Second, conceptually all triangular adaptors merely restrict the interface to the matrix type
// \c MT and do not change the data layout or the underlying matrix type.
//
// This property matters most for diagonal matrices. In order to achieve the perfect combination
// of performance and memory consumption for a diagonal matrix it is recommended to use dense
// matrices for small diagonal matrices and sparse matrices for large diagonal matrices:
\code
// Recommendation 1: use dense matrices for small diagonal matrices
using SmallDiagonalMatrix = blaze::DiagonalMatrix< blaze::StaticMatrix<float,3UL,3UL> >;
// Recommendation 2: use sparse matrices for large diagonal matrices
using LargeDiagonalMatrix = blaze::DiagonalMatrix< blaze::CompressedMatrix<float> >;
\endcode
// \n \subsection adaptors_triangular_matrices_scaling Unitriangular Matrices Cannot Be Scaled!
//
// Since the diagonal elements of a unitriangular matrix have a fixed value of 1 it is not possible
// to self-scale such a matrix:
\code
using blaze::DynamicMatrix;
using blaze::UniLowerMatrix;
UniLowerMatrix< DynamicMatrix<int> > A( 4 );
A *= 2; // Compilation error; Scale operation is not available on an unilower matrix
A /= 2; // Compilation error; Scale operation is not available on an unilower matrix
A.scale( 2 ); // Compilation error; Scale function is not available on an unilower matrix
A = A * 2; // Throws an exception; Invalid assignment of non-unilower matrix
A = A / 2; // Throws an exception; Invalid assignment of non-unilower matrix
\endcode
// \n \section adaptors_triangular_matrices_arithmetic_operations Arithmetic Operations
// <hr>
//
// A lower and upper triangular matrix can participate in numerical operations in any way any other
// dense or sparse matrix can participate. It can also be combined with any other dense or sparse
// vector or matrix. The following code example gives an impression of the use of blaze::LowerMatrix
// within arithmetic operations:
\code
using blaze::LowerMatrix;
using blaze::DynamicMatrix;
using blaze::HybridMatrix;
using blaze::StaticMatrix;
using blaze::CompressedMatrix;
using blaze::rowMajor;
using blaze::columnMajor;
DynamicMatrix<double,rowMajor> A( 3, 3 );
CompressedMatrix<double,rowMajor> B( 3, 3 );
LowerMatrix< DynamicMatrix<double,rowMajor> > C( 3 );
LowerMatrix< CompressedMatrix<double,rowMajor> > D( 3 );
LowerMatrix< HybridMatrix<float,3UL,3UL,rowMajor> > E;
LowerMatrix< StaticMatrix<float,3UL,3UL,columnMajor> > F;
E = A + B; // Matrix addition and assignment to a row-major lower matrix (includes runtime check)
F = C - D; // Matrix subtraction and assignment to a column-major lower matrix (only compile time check)
F = A * D; // Matrix multiplication between a dense and a sparse matrix (includes runtime check)
C *= 2.0; // In-place scaling of matrix C
E = 2.0 * B; // Scaling of matrix B (includes runtime check)
F = C * 2.0; // Scaling of matrix C (only compile time check)
E += A - B; // Addition assignment (includes runtime check)
F -= C + D; // Subtraction assignment (only compile time check)
F *= A * D; // Multiplication assignment (includes runtime check)
\endcode
// Note that it is possible to assign any kind of matrix to a triangular matrix. In case the
// matrix to be assigned does not satisfy the invariants of the triangular matrix at compile
// time, a runtime check is performed. Also note that upper triangular, diagonal, unitriangular
// and strictly triangular matrix types can be used in the same way, but may pose some additional
// restrictions (see the according class documentations).
//
//
// \n \section adaptors_triangular_matrices_block_matrices Triangular Block Matrices
// <hr>
//
// It is also possible to use triangular block matrices:
\code
using blaze::CompressedMatrix;
using blaze::DynamicMatrix;
using blaze::StaticMatrix;
using blaze::LowerMatrix;
using blaze::UpperMatrix;
// Definition of a 5x5 lower block matrix based on DynamicMatrix
LowerMatrix< DynamicMatrix< StaticMatrix<int,3UL,3UL> > > A( 5 );
// Definition of a 7x7 upper block matrix based on CompressedMatrix
UpperMatrix< CompressedMatrix< StaticMatrix<int,3UL,3UL> > > B( 7 );
\endcode
// Also in this case the triangular matrix invariant is enforced, i.e. it is not possible to
// manipulate elements in the upper part (lower triangular matrix) or the lower part (upper
// triangular matrix) of the matrix:
\code
const StaticMatrix<int,3UL,3UL> C{ { 1, -4, 5 },
{ 6, 8, -3 },
{ 2, -1, 2 } };
A(2,4)(1,1) = -5; // Invalid manipulation of upper matrix element; Results in an exception
B.insert( 4, 2, C ); // Invalid insertion of the elements (4,2); Results in an exception
\endcode
// Note that unitriangular matrices are restricted to numeric element types and therefore cannot
// be used for block matrices:
\code
using blaze::CompressedMatrix;
using blaze::DynamicMatrix;
using blaze::StaticMatrix;
using blaze::UniLowerMatrix;
using blaze::UniUpperMatrix;
// Compilation error: lower unitriangular matrices are restricted to numeric element types
UniLowerMatrix< DynamicMatrix< StaticMatrix<int,3UL,3UL> > > A( 5 );
// Compilation error: upper unitriangular matrices are restricted to numeric element types
UniUpperMatrix< CompressedMatrix< StaticMatrix<int,3UL,3UL> > > B( 7 );
\endcode
// For more information on block matrices, see the tutorial on \ref block_vectors_and_matrices.
//
//
// \n \section adaptors_triangular_matrices_performance Performance Considerations
// <hr>
//
// The \b Blaze library tries to exploit the properties of lower and upper triangular matrices
// whenever and wherever possible. Therefore using triangular matrices instead of a general
// matrices can result in a considerable performance improvement. However, there are also
// situations when using a triangular matrix introduces some overhead. The following examples
// demonstrate several common situations where triangular matrices can positively or negatively
// impact performance.
//
// \n \subsection adaptors_triangular_matrices_matrix_matrix_multiplication Positive Impact: Matrix/Matrix Multiplication
//
// When multiplying two matrices, at least one of which is triangular, \b Blaze can exploit the
// fact that either the lower or upper part of the matrix contains only default elements and
// restrict the algorithm to the non-zero elements. The following example demonstrates this by
// means of a dense matrix/dense matrix multiplication with lower triangular matrices:
\code
using blaze::DynamicMatrix;
using blaze::LowerMatrix;
using blaze::rowMajor;
using blaze::columnMajor;
LowerMatrix< DynamicMatrix<double,rowMajor> > A;
LowerMatrix< DynamicMatrix<double,columnMajor> > B;
DynamicMatrix<double,columnMajor> C;
// ... Resizing and initialization
C = A * B;
\endcode
// In comparison to a general matrix multiplication, the performance advantage is significant,
// especially for large matrices. Therefore is it highly recommended to use the blaze::LowerMatrix
// and blaze::UpperMatrix adaptors when a matrix is known to be lower or upper triangular,
// respectively. Note however that the performance advantage is most pronounced for dense matrices
// and much less so for sparse matrices.
//
// \n \subsection adaptors_triangular_matrices_matrix_vector_multiplication Positive Impact: Matrix/Vector Multiplication
//
// A similar performance improvement can be gained when using a triangular matrix in a matrix/vector
// multiplication:
\code
using blaze::DynamicMatrix;
using blaze::DynamicVector;
using blaze::rowMajor;
using blaze::columnVector;
LowerMatrix< DynamicMatrix<double,rowMajor> > A;
DynamicVector<double,columnVector> x, y;
// ... Resizing and initialization
y = A * x;
\endcode
// In this example, \b Blaze also exploits the structure of the matrix and approx. halves the
// runtime of the multiplication. Also in case of matrix/vector multiplications the performance
// improvement is most pronounced for dense matrices and much less so for sparse matrices.
//
// \n \subsection adaptors_triangular_matrices_assignment Negative Impact: Assignment of a General Matrix
//
// In contrast to using a triangular matrix on the right-hand side of an assignment (i.e. for
// read access), which introduces absolutely no performance penalty, using a triangular matrix
// on the left-hand side of an assignment (i.e. for write access) may introduce additional
// overhead when it is assigned a general matrix, which is not triangular at compile time:
\code
using blaze::DynamicMatrix;
using blaze::LowerMatrix;
LowerMatrix< DynamicMatrix<double> > A, C;
DynamicMatrix<double> B;
B = A; // Only read-access to the lower matrix; no performance penalty
C = A; // Assignment of a lower matrix to another lower matrix; no runtime overhead
C = B; // Assignment of a general matrix to a lower matrix; some runtime overhead
\endcode
// When assigning a general (potentially not lower triangular) matrix to a lower matrix or a
// general (potentially not upper triangular) matrix to an upper matrix it is necessary to check
// whether the matrix is lower or upper at runtime in order to guarantee the triangular property
// of the matrix. In case it turns out to be lower or upper, respectively, it is assigned as
// efficiently as possible, if it is not, an exception is thrown. In order to prevent this runtime
// overhead it is therefore generally advisable to assign lower or upper triangular matrices to
// other lower or upper triangular matrices.\n
// In this context it is especially noteworthy that the addition, subtraction, and multiplication
// of two triangular matrices of the same structure always results in another triangular matrix:
\code
LowerMatrix< DynamicMatrix<double> > A, B, C;
C = A + B; // Results in a lower matrix; no runtime overhead
C = A - B; // Results in a lower matrix; no runtime overhead
C = A * B; // Results in a lower matrix; no runtime overhead
\endcode
\code
UpperMatrix< DynamicMatrix<double> > A, B, C;
C = A + B; // Results in an upper matrix; no runtime overhead
C = A - B; // Results in an upper matrix; no runtime overhead
C = A * B; // Results in an upper matrix; no runtime overhead
\endcode
// \n Previous: \ref adaptors_hermitian_matrices Next: \ref views
*/
//*************************************************************************************************
//**Views******************************************************************************************
/*!\page views Views
//
// \tableofcontents
//
//
// \section views_general General Concepts
// <hr>
//
// Views represents parts of a vector or matrix, such as a subvector, a submatrix, or a specific
// row, column, or band of a matrix. As such, views act as a reference to specific elements of
// a vector or matrix. This reference is valid and can be used in every way as any other vector
// or matrix can be used as long as the referenced vector or matrix is not resized or entirely
// destroyed. Views also act as alias to the elements of the vector or matrix: Changes made to the
// elements (e.g. modifying values, inserting or erasing elements) via the view are immediately
// visible in the vector or matrix and changes made via the vector or matrix are immediately
// visible in the view.
//
// It is also possible to create nested views (compound views), such as for instance bands of
// submatrices or row selections on column selections. A compound view also acts as reference
// to specific elements of the underlying vector or matrix and is valid as long as the underlying,
// referenced vector or matrix is not resized or entirely destroyed.
//
// The \b Blaze library provides the following views on vectors and matrices:
//
// Vector views:
// - \ref views_subvectors
// - \ref views_element_selections
//
// Matrix views:
// - \ref views_submatrices
// - \ref views_rows
// - \ref views_row_selections
// - \ref views_columns
// - \ref views_column_selections
// - \ref views_bands
//
//
// \n \section views_examples Examples
\code
using blaze::DynamicMatrix;
using blaze::StaticVector;
// Setup of the 3x5 row-major matrix
DynamicMatrix<int> A{ { 1, 0, -2, 3, 0 },
{ 0, 2, 5, -1, -1 },
{ 1, 0, 0, 2, 1 } };
// Setup of the 2-dimensional row vector
StaticVector<int,2UL,rowVector> vec{ 18, 19 };
// Assigning to the elements (1,2) and (1,3) via a subvector of a row
//
// ( 1 0 -2 3 0 )
// ( 0 2 18 19 -1 )
// ( 1 0 0 2 1 )
//
subvector( row( A, 1UL ), 2UL, 2UL ) = vec;
// Switching rows 0 and 2 of A
//
// ( 1 0 0 2 1 )
// ( 0 2 18 19 -1 )
// ( 1 0 -2 3 0 )
//
rows<0,2>( A ) = rows<2,0>( A );
// Warning: It is the programmer's responsibility to ensure the view does not outlive
// the viewed vector or matrix (dangling reference)!
auto row1 = row<1UL>( DynamicMatrix<int>{ { 1, 2, 3 }, { 4, 5, 6 } } );
\endcode
// \n Previous: \ref adaptors_triangular_matrices Next: \ref views_subvectors
*/
//*************************************************************************************************
//**Subvectors*************************************************************************************
/*!\page views_subvectors Subvectors
//
// \tableofcontents
//
//
// Subvectors provide views on a specific part of a dense or sparse vector. As such, subvectors
// act as a reference to a specific range within a vector. This reference is valid and can be
// used in every way any other dense or sparse vector can be used as long as the vector containing
// the subvector is not resized or entirely destroyed. The subvector also acts as an alias to the
// vector elements in the specified range: Changes made to the elements (e.g. modifying values,
// inserting or erasing elements) are immediately visible in the vector and changes made via the
// vector are immediately visible in the subvector.
//
//
// \n \section views_subvectors_setup Setup of Subvectors
// <hr>
//
// A view on a dense or sparse subvector can be created very conveniently via the \c subvector()
// function. It can be included via the header files
\code
#include <blaze/Blaze.h>
// or
#include <blaze/Math.h>
// or
#include <blaze/math/Subvector.h>
\endcode
// and forward declared via the header file
\code
#include <blaze/Forward.h>
\endcode
// The first parameter specifies the offset of the subvector within the underlying dense or sparse
// vector, the second parameter specifies the size of the subvector. The two parameters can be
// specified either at compile time or at runtime:
\code
blaze::DynamicVector<double,blaze::rowVector> x;
// ... Resizing and initialization
// Create a subvector from index 4 with a size of 12 (i.e. in the range [4..15]) (compile time arguments)
auto sv1 = subvector<4UL,12UL>( x );
// Create a subvector from index 8 with a size of 16 (i.e. in the range [8..23]) (runtime arguments)
auto sv2 = subvector( x, 8UL, 16UL );
\endcode
// The \c subvector() function returns an expression representing the subvector view. The type of
// this expression depends on the given subvector arguments, primarily the type of the vector and
// the compile time arguments. If the type is required, it can be determined via the \c decltype
// specifier:
\code
using VectorType = blaze::DynamicVector<int>;
using SubvectorType = decltype( blaze::subvector<4UL,12UL>( std::declval<VectorType>() ) );
\endcode
// The resulting view can be treated as any other dense or sparse vector, i.e. it can be assigned
// to, it can be copied from, and it can be used in arithmetic operations. A subvector created
// from a row vector can be used as any other row vector, a subvector created from a column vector
// can be used as any other column vector. The view can also be used on both sides of an assignment:
// The subvector can either be used as an alias to grant write access to a specific subvector of a
// vector primitive on the left-hand side of an assignment or to grant read-access to a specific
// subvector of a vector primitive or expression on the right-hand side of an assignment. The
// following example demonstrates this in detail:
\code
blaze::DynamicVector<double,blaze::rowVector> x;
blaze::CompressedVector<double,blaze::rowVector> y;
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
// Create a subvector from index 0 with a size of 10 (i.e. in the range [0..9])
auto sv = subvector( x, 0UL, 10UL );
// Setting the first ten elements of x to the 2nd row of matrix A
sv = row( A, 2UL );
// Setting the second ten elements of x to y
subvector( x, 10UL, 10UL ) = y;
// Setting the 3rd row of A to a subvector of x
row( A, 3UL ) = subvector( x, 3UL, 10UL );
// Setting x to a subvector of the result of the addition between y and the 1st row of A
x = subvector( y + row( A, 1UL ), 2UL, 5UL );
\endcode
// \warning It is the programmer's responsibility to ensure the subvector does not outlive the
// viewed vector:
\code
// Creating a subvector on a temporary vector; results in a dangling reference!
auto sv = subvector<1UL,3UL>( DynamicVector<int>{ 1, 2, 3, 4, 5 } );
\endcode
// \n \section views_subvectors_element_access Element Access
// <hr>
//
// The elements of a subvector can be directly accessed via the subscript operator:
\code
blaze::DynamicVector<double,blaze::rowVector> v;
// ... Resizing and initialization
// Creating an 8-dimensional subvector, starting from index 4
auto sv = subvector( v, 4UL, 8UL );
// Setting the 1st element of the subvector, which corresponds to
// the element at index 5 in vector v
sv[1] = 2.0;
\endcode
// The numbering of the subvector elements is
\f[\left(\begin{array}{*{5}{c}}
0 & 1 & 2 & \cdots & N-1 \\
\end{array}\right),\f]
// where N is the specified size of the subvector. Alternatively, the elements of a subvector can
// be traversed via iterators. Just as with vectors, in case of non-const subvectors, \c begin()
// and \c end() return an iterator, which allows to manipulate the elements, in case of constant
// subvectors an iterator to immutable elements is returned:
\code
blaze::DynamicVector<int,blaze::rowVector> v( 256UL );
// ... Resizing and initialization
// Creating a reference to a specific subvector of vector v
auto sv = subvector( v, 16UL, 64UL );
// Traversing the elements via iterators to non-const elements
for( auto it=sv.begin(); it!=sv.end(); ++it ) {
*it = ...; // OK: Write access to the dense subvector value.
... = *it; // OK: Read access to the dense subvector value.
}
// Traversing the elements via iterators to const elements
for( auto it=sv.cbegin(); it!=sv.cend(); ++it ) {
*it = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid.
... = *it; // OK: Read access to the dense subvector value.
}
\endcode
\code
blaze::CompressedVector<int,blaze::rowVector> v( 256UL );
// ... Resizing and initialization
// Creating a reference to a specific subvector of vector v
auto sv = subvector( v, 16UL, 64UL );
// Traversing the elements via iterators to non-const elements
for( auto it=sv.begin(); it!=sv.end(); ++it ) {
it->value() = ...; // OK: Write access to the value of the non-zero element.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the sparse element.
}
// Traversing the elements via iterators to const elements
for( auto it=sv.cbegin(); it!=sv.cend(); ++it ) {
it->value() = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the sparse element.
}
\endcode
// \n \section views_subvectors_element_insertion Element Insertion
// <hr>
//
// Inserting/accessing elements in a sparse subvector can be done by several alternative functions.
// The following example demonstrates all options:
\code
blaze::CompressedVector<double,blaze::rowVector> v( 256UL ); // Non-initialized vector of size 256
auto sv = subvector( v, 10UL, 60UL ); // View on the range [10..69] of v
// The subscript operator provides access to all possible elements of the sparse subvector,
// including the zero elements. In case the subscript operator is used to access an element
// that is currently not stored in the sparse subvector, the element is inserted into the
// subvector.
sv[42] = 2.0;
// The second operation for inserting elements is the set() function. In case the element is
// not contained in the subvector it is inserted into the subvector, if it is already contained
// in the subvector its value is modified.
sv.set( 45UL, -1.2 );
// An alternative for inserting elements into the subvector is the insert() function. However,
// it inserts the element only in case the element is not already contained in the subvector.
sv.insert( 50UL, 3.7 );
// Just as in case of vectors, elements can also be inserted via the append() function. In
// case of subvectors, append() also requires that the appended element's index is strictly
// larger than the currently largest non-zero index of the subvector and that the subvector's
// capacity is large enough to hold the new element. Note however that due to the nature of
// a subvector, which may be an alias to the middle of a sparse vector, the append() function
// does not work as efficiently for a subvector as it does for a vector.
sv.reserve( 10UL );
sv.append( 51UL, -2.1 );
\endcode
// \n \section views_subvectors_common_operations Common Operations
// <hr>
//
// A subvector view can be used like any other dense or sparse vector. This means that with
// only a few exceptions all \ref vector_operations and \ref arithmetic_operations can be used.
// For instance, the current number of elements can be obtained via the \c size() function, the
// current capacity via the \c capacity() function, and the number of non-zero elements via the
// \c nonZeros() function. However, since subvectors are references to a specific range of a
// vector, several operations are not possible, such as resizing and swapping. The following
// example shows this by means of a dense subvector view:
\code
blaze::DynamicVector<int,blaze::rowVector> v( 42UL );
// ... Resizing and initialization
// Creating a view on the range [5..15] of vector v
auto sv = subvector( v, 5UL, 10UL );
sv.size(); // Returns the number of elements in the subvector
sv.capacity(); // Returns the capacity of the subvector
sv.nonZeros(); // Returns the number of non-zero elements contained in the subvector
sv.resize( 84UL ); // Compilation error: Cannot resize a subvector of a vector
auto sv2 = subvector( v, 15UL, 10UL );
swap( sv, sv2 ); // Compilation error: Swap operation not allowed
\endcode
// \n \section views_subvectors_arithmetic_operations Arithmetic Operations
// <hr>
//
// Both dense and sparse subvectors can be used in all arithmetic operations that any other dense
// or sparse vector can be used in. The following example gives an impression of the use of dense
// subvectors within arithmetic operations. All operations (addition, subtraction, multiplication,
// scaling, ...) can be performed on all possible combinations of dense and sparse subvectors with
// fitting element types:
\code
blaze::DynamicVector<double,blaze::rowVector> d1, d2, d3;
blaze::CompressedVector<double,blaze::rowVector> s1, s2;
// ... Resizing and initialization
blaze::DynamicMatrix<double,blaze::rowMajor> A;
auto sv( subvector( d1, 0UL, 10UL ) ); // View on the range [0..9] of vector d1
sv = d2; // Dense vector initialization of the range [0..9]
subvector( d1, 10UL, 10UL ) = s1; // Sparse vector initialization of the range [10..19]
d3 = sv + d2; // Dense vector/dense vector addition
s2 = s1 + subvector( d1, 10UL, 10UL ); // Sparse vector/dense vector addition
d2 = sv * subvector( d1, 20UL, 10UL ); // Component-wise vector multiplication
subvector( d1, 3UL, 4UL ) *= 2.0; // In-place scaling of the range [3..6]
d2 = subvector( d1, 7UL, 3UL ) * 2.0; // Scaling of the range [7..9]
d2 = 2.0 * subvector( d1, 7UL, 3UL ); // Scaling of the range [7..9]
subvector( d1, 0UL , 10UL ) += d2; // Addition assignment
subvector( d1, 10UL, 10UL ) -= s2; // Subtraction assignment
subvector( d1, 20UL, 10UL ) *= sv; // Multiplication assignment
double scalar = subvector( d1, 5UL, 10UL ) * trans( s1 ); // Scalar/dot/inner product between two vectors
A = trans( s1 ) * subvector( d1, 4UL, 16UL ); // Outer product between two vectors
\endcode
// \n \section views_aligned_subvectors Aligned Subvectors
// <hr>
//
// Usually subvectors can be defined anywhere within a vector. They may start at any position and
// may have an arbitrary size (only restricted by the size of the underlying vector). However, in
// contrast to vectors themselves, which are always properly aligned in memory and therefore can
// provide maximum performance, this means that subvectors in general have to be considered to be
// unaligned. This can be made explicit by the \c blaze::unaligned flag:
\code
using blaze::unaligned;
blaze::DynamicVector<double,blaze::rowVector> x;
// ... Resizing and initialization
// Identical creations of an unaligned subvector in the range [8..23]
auto sv1 = subvector ( x, 8UL, 16UL );
auto sv2 = subvector<unaligned>( x, 8UL, 16UL );
auto sv3 = subvector<8UL,16UL> ( x );
auto sv4 = subvector<unaligned,8UL,16UL>( x );
\endcode
// All of these calls to the \c subvector() function are identical. Whether the alignment flag is
// explicitly specified or not, it always returns an unaligned subvector. Whereas this may provide
// full flexibility in the creation of subvectors, this might result in performance disadvantages
// in comparison to vector primitives (even in case the specified subvector could be aligned).
// Whereas vector primitives are guaranteed to be properly aligned and therefore provide maximum
// performance in all operations, a general view on a vector might not be properly aligned. This
// may cause a performance penalty on some platforms and/or for some operations.
//
// However, it is also possible to create aligned subvectors. Aligned subvectors are identical to
// unaligned subvectors in all aspects, except that they may pose additional alignment restrictions
// and therefore have less flexibility during creation, but don't suffer from performance penalties
// and provide the same performance as the underlying vector. Aligned subvectors are created by
// explicitly specifying the \c blaze::aligned flag:
\code
using blaze::aligned;
// Creating an aligned subvector in the range [8..23]
auto sv1 = subvector<aligned>( x, 8UL, 16UL );
auto sv2 = subvector<aligned,8UL,16UL>( x );
\endcode
// The alignment restrictions refer to system dependent address restrictions for the used element
// type and the available vectorization mode (SSE, AVX, ...). In order to be properly aligned the
// first element of the subvector must be aligned. The following source code gives some examples
// for a double precision dynamic vector, assuming that AVX is available, which packs 4 \c double
// values into a SIMD vector:
\code
using blaze::aligned;
blaze::DynamicVector<double,blaze::columnVector> d( 17UL );
// ... Resizing and initialization
// OK: Starts at the beginning, i.e. the first element is aligned
auto dsv1 = subvector<aligned>( d, 0UL, 13UL );
// OK: Start index is a multiple of 4, i.e. the first element is aligned
auto dsv2 = subvector<aligned>( d, 4UL, 7UL );
// OK: The start index is a multiple of 4 and the subvector includes the last element
auto dsv3 = subvector<aligned>( d, 8UL, 9UL );
// Error: Start index is not a multiple of 4, i.e. the first element is not aligned
auto dsv4 = subvector<aligned>( d, 5UL, 8UL );
\endcode
// Note that the discussed alignment restrictions are only valid for aligned dense subvectors.
// In contrast, aligned sparse subvectors at this time don't pose any additional restrictions.
// Therefore aligned and unaligned sparse subvectors are truly fully identical. Still, in case
// the \c blaze::aligned flag is specified during setup, an aligned subvector is created:
\code
using blaze::aligned;
blaze::CompressedVector<double,blaze::rowVector> x;
// ... Resizing and initialization
// Creating an aligned subvector in the range [8..23]
auto sv1 = subvector<aligned>( x, 8UL, 16UL );
auto sv2 = subvector<aligned,8UL,16UL>( x );
\endcode
// \n Previous: \ref views Next: \ref views_element_selections
*/
//*************************************************************************************************
//**Element Selections*****************************************************************************
/*!\page views_element_selections Element Selections
//
// \tableofcontents
//
//
// Element selections provide views on arbitrary compositions of elements of dense and sparse
// vectors. These views act as a reference to the selected elements and represent them as another
// dense or sparse vector. This reference is valid and can be used in every way any other dense
// or sparse vector can be used as long as the vector containing the elements is not resized or
// entirely destroyed. The element selection also acts as an alias to the vector elements in the
// specified range: Changes made to the elements (e.g. modifying values, inserting or erasing
// elements) are immediately visible in the vector and changes made via the vector are immediately
// visible in the elements.
//
//
// \n \section views_element_selections_setup Setup of Element Selections
//
// An element selection can be created very conveniently via the \c elements() function. It can
// be included via the header files
\code
#include <blaze/Blaze.h>
// or
#include <blaze/Math.h>
// or
#include <blaze/math/Elements.h>
\endcode
// and forward declared via the header file
\code
#include <blaze/Forward.h>
\endcode
// The indices of the elements to be selected can be specified either at compile time or at runtime
// (by means of an initializer list, array or vector):
\code
blaze::DynamicVector<double,blaze::rowVector> x;
// ... Resizing and initialization
// Selecting the elements 4, 6, 8, and 10 (compile time arguments)
auto e1 = elements<4UL,6UL,8UL,10UL>( x );
// Selecting the elements 3, 2, and 1 (runtime arguments via an initializer list)
const std::initializer_list<size_t> list{ 3UL, 2UL, 1UL };
auto e2 = elements( x, { 3UL, 2UL, 1UL } );
auto e3 = elements( x, list );
// Selecting the elements 1, 2, 3, 3, 2, and 1 (runtime arguments via a std::array)
const std::array<size_t> array{ 1UL, 2UL, 3UL, 3UL, 2UL, 1UL };
auto e4 = elements( x, array );
auto e5 = elements( x, array.data(), array.size() );
// Selecting the element 4 fives times (runtime arguments via a std::vector)
const std::vector<size_t> vector{ 4UL, 4UL, 4UL, 4UL, 4UL };
auto e6 = elements( x, vector );
auto e7 = elements( x, vector.data(), vector.size() );
\endcode
// Note that it is possible to alias the elements of the underlying vector in any order. Also note
// that it is possible to use the same index multiple times.
//
// Alternatively it is possible to pass a callable such as a lambda or functor that produces the
// indices:
\code
blaze::DynamicVector<double,blaze::rowVector> x{ 0, 1, 2, 3, 4, 5, 6, 7, 8 };
// Selecting all even elements of the vector, i.e. selecting (0,2,4,6,8)
auto e1 = elements( x, []( size_t i ){ return i*2UL; }, 5UL );
// Selecting all odd elements of the vector, i.e. selecting (1,3,5,7)
auto e2 = elements( x, []( size_t i ){ return i*2UL+1UL; }, 4UL );
// Reversing the elements of the vector, i.e. selecting (8,7,6,5,4,3,2,1,0)
auto e3 = elements( x, [max=v.size()-1UL]( size_t i ){ return max-i; }, 9UL );
\endcode
// The \c elements() function returns an expression representing the view on the selected elements.
// The type of this expression depends on the given arguments, primarily the type of the vector and
// the compile time arguments. If the type is required, it can be determined via the \c decltype
// specifier:
\code
using VectorType = blaze::DynamicVector<int>;
using ElementsType = decltype( blaze::elements<4UL,12UL>( std::declval<VectorType>() ) );
\endcode
// The resulting view can be treated as any other dense or sparse vector, i.e. it can be assigned
// to, it can be copied from, and it can be used in arithmetic operations. An element selection
// created from a row vector can be used as any other row vector, an element selection created
// from a column vector can be used as any other column vector. The view can also be used on both
// sides of an assignment: It can either be used as an alias to grant write access to specific
// elements of a vector primitive on the left-hand side of an assignment or to grant read-access
// to specific elements of a vector primitive or expression on the right-hand side of an assignment.
// The following example demonstrates this in detail:
\code
blaze::DynamicVector<double,blaze::rowVector> x;
blaze::CompressedVector<double,blaze::rowVector> y;
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
// Selecting the elements 1, 3, 5, and 7
auto e = elements( x, { 1UL, 3UL, 5UL, 7UL } );
// Setting the elements 1, 3, 5, and 7 of x to the 2nd row of matrix A
e = row( A, 2UL );
// Setting the elements 2, 4, 6, and 8 of x to y
elements( x, { 2UL, 4UL, 6UL, 8UL } ) = y;
// Setting the 3rd row of A to the elements 5, 4, 3, and 2 of x
row( A, 3UL ) = elements( x, { 5UL, 4UL, 3UL, 2UL } );
// Rotating the result of the addition between y and the 1st row of A
x = elements( y + row( A, 1UL ), { 2UL, 3UL, 0UL, 1UL } )
\endcode
// Please note that using an element selection, which refers to an index multiple times, on the
// left-hand side of an assignment leads to undefined behavior:
\code
blaze::DynamicVector<int,blaze::rowVector> a{ 1, 2, 3 };
blaze::DynamicVector<int,blaze::rowVector> b{ 1, 2, 3, 4 };
auto e = elements( a, { 1, 1, 1, 1 } ); // Selecting the element 1 four times
e = b; // Undefined behavior
\endcode
// In this example both vectors have the same size, which results in a correct vector assignment,
// but the final value of the element at index 1 is unspecified.
//
// \warning It is the programmer's responsibility to ensure the element selection does not outlive
// the viewed vector:
\code
// Creating an element selection on a temporary vector; results in a dangling reference!
auto e = elements<1UL,3UL>( DynamicVector<int>{ 1, 2, 3, 4, 5 } );
\endcode
// \n \section views_element_selections_element_access Element Access
//
// The elements of an element selection can be directly accessed via the subscript operator:
\code
blaze::DynamicVector<double,blaze::rowVector> v;
// ... Resizing and initialization
// Selecting the elements 2, 4, 6, and 8
auto e = elements( v, { 2UL, 4UL, 6UL, 8UL } );
// Setting the 1st element of the element selection, which corresponds to
// the element at index 4 in vector v
e[1] = 2.0;
\endcode
// The numbering of the selected elements is
\f[\left(\begin{array}{*{5}{c}}
0 & 1 & 2 & \cdots & N-1 \\
\end{array}\right),\f]
// where N is the number of selected elements. Alternatively, the elements of an element selection
// can be traversed via iterators. Just as with vectors, in case of non-const element selections,
// \c begin() and \c end() return an iterator, which allows to manipulate the elements, in case of
// constant element selections an iterator to immutable elements is returned:
\code
blaze::DynamicVector<int,blaze::rowVector> v( 256UL );
// ... Resizing and initialization
// Creating an element selection including specific elements of dense vector v
auto e = elements( v, { 0UL, 3UL, 6UL, 9UL, 12UL } );
// Traversing the elements via iterators to non-const elements
for( auto it=e.begin(); it!=e.end(); ++it ) {
*it = ...; // OK: Write access to the dense vector value.
... = *it; // OK: Read access to the dense vector value.
}
// Traversing the elements via iterators to const elements
for( auto it=e.cbegin(); it!=e.cend(); ++it ) {
*it = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid.
... = *it; // OK: Read access to the dense vector value.
}
\endcode
\code
blaze::CompressedVector<int,blaze::rowVector> v( 256UL );
// ... Resizing and initialization
// Creating an element selection including specific elements of sparse vector v
auto e = elements( v, { 0UL, 3UL, 6UL, 9UL, 12UL } );
// Traversing the elements via iterators to non-const elements
for( auto it=e.begin(); it!=e.end(); ++it ) {
it->value() = ...; // OK: Write access to the value of the non-zero element.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the sparse element.
}
// Traversing the elements via iterators to const elements
for( auto it=e.cbegin(); it!=e.cend(); ++it ) {
it->value() = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the sparse element.
}
\endcode
// \n \section views_element_selections_element_insertion Element Insertion
//
// Inserting/accessing elements in a sparse element selection can be done by several alternative
// functions. The following example demonstrates all options:
\code
blaze::CompressedVector<double,blaze::rowVector> v( 256UL ); // Non-initialized vector of size 256
std::vector<size_t> indices;
// ... Selecting indices of the sparse vector
auto e = elements( v, indices );
// The subscript operator provides access to the selected elements of the sparse vector,
// including the zero elements. In case the subscript operator is used to access an element
// that is currently not stored in the sparse vector, the element is inserted.
e[42] = 2.0;
// The second operation for inserting elements via the element selection is the set() function.
// In case the element is not contained in the vector it is inserted into the vector, if it is
// already contained in the vector its value is modified.
e.set( 45UL, -1.2 );
// An alternative for inserting elements into the vector is the insert() function. However, it
// inserts the element only in case the element is not already contained in the vector.
e.insert( 50UL, 3.7 );
// Just as in case of vectors, elements can also be inserted via the append() function. In case
// of element selections, append() also requires that the appended element's index is strictly
// larger than the currently largest non-zero index of the selection and that the selections's
// capacity is large enough to hold the new element. Note however that due to the nature of an
// element selection, which is an alias to arbitrary elements of a sparse vector, the append()
// function does not work as efficiently for an element selection as it does for a vector.
e.reserve( 10UL );
e.append( 51UL, -2.1 );
\endcode
// \n \section views_element_selections_common_operations Common Operations
//
// An element selection can be used like any other dense or sparse vector. For instance, the
// number of selected elements can be obtained via the \c size() function, the current capacity
// via the \c capacity() function, and the number of non-zero elements via the \c nonZeros()
// function. However, since element selections are references to a specific range of a vector,
// several operations are not possible, such as resizing and swapping. The following example
// shows this by means of an element selection on a dense vector:
\code
blaze::DynamicVector<int,blaze::rowVector> v( 42UL );
// ... Resizing and initialization
// Selecting the elements 5 and 10
auto e = elements( v, { 5UL, 10UL } );
e.size(); // Returns the number of elements in the element selection
e.capacity(); // Returns the capacity of the element selection
e.nonZeros(); // Returns the number of non-zero elements contained in the element selection
e.resize( 84UL ); // Compilation error: Cannot resize an element selection
auto e2 = elements( v, { 15UL, 10UL } );
swap( e, e2 ); // Compilation error: Swap operation not allowed
\endcode
// \n \section views_element_selections_arithmetic_operations Arithmetic Operations
//
// Both dense and sparse element selections can be used in all arithmetic operations that any other
// dense or sparse vector can be used in. The following example gives an impression of the use of
// dense element selections within arithmetic operations. All operations (addition, subtraction,
// multiplication, scaling, ...) can be performed on all possible combinations of dense and sparse
// element selections with fitting element types:
\code
blaze::DynamicVector<double,blaze::rowVector> d1, d2, d3;
blaze::CompressedVector<double,blaze::rowVector> s1, s2;
// ... Resizing and initialization
blaze::DynamicMatrix<double,blaze::rowMajor> A;
std::initializer_list<size_t> indices1{ 0UL, 3UL, 6UL, 9UL, 12UL, 15UL, 18UL, 21UL };
std::initializer_list<size_t> indices2{ 1UL, 4UL, 7UL, 10UL, 13UL, 16UL, 19UL, 22UL };
std::initializer_list<size_t> indices3{ 2UL, 5UL, 8UL, 11UL, 14UL, 17UL, 20UL, 23UL };
auto e( elements( d1, indices1 ) ); // Selecting the every third element of d1 in the range [0..21]
e = d2; // Dense vector assignment to the selected elements
elements( d1, indices2 ) = s1; // Sparse vector assignment to the selected elements
d3 = e + d2; // Dense vector/dense vector addition
s2 = s1 + elements( d1, indices2 ); // Sparse vector/dense vector addition
d2 = e * elements( d1, indices3 ); // Component-wise vector multiplication
elements( d1, indices2 ) *= 2.0; // In-place scaling of the second selection of elements
d2 = elements( d1, indices3 ) * 2.0; // Scaling of the elements in the third selection of elements
d2 = 2.0 * elements( d1, indices3 ); // Scaling of the elements in the third selection of elements
elements( d1, indices1 ) += d2; // Addition assignment
elements( d1, indices2 ) -= s2; // Subtraction assignment
elements( d1, indices3 ) *= e; // Multiplication assignment
double scalar = elements( d1, indices2 ) * trans( s1 ); // Scalar/dot/inner product between two vectors
A = trans( s1 ) * elements( d1, { 3UL, 6UL } ); // Outer product between two vectors
\endcode
// \n Previous: \ref views_subvectors Next: \ref views_submatrices
*/
//*************************************************************************************************
//**Submatrices************************************************************************************
/*!\page views_submatrices Submatrices
//
// \tableofcontents
//
//
// Submatrices provide views on a specific part of a dense or sparse matrix just as subvectors
// provide views on specific parts of vectors. As such, submatrices act as a reference to a
// specific block within a matrix. This reference is valid and can be used in evary way any
// other dense or sparse matrix can be used as long as the matrix containing the submatrix is
// not resized or entirely destroyed. The submatrix also acts as an alias to the matrix elements
// in the specified block: Changes made to the elements (e.g. modifying values, inserting or
// erasing elements) are immediately visible in the matrix and changes made via the matrix are
// immediately visible in the submatrix.
//
//
// \n \section views_submatrices_setup Setup of Submatrices
// <hr>
//
// A view on a dense or sparse submatrix can be created very conveniently via the \c submatrix()
// function. It can be included via the header files
\code
#include <blaze/Blaze.h>
// or
#include <blaze/Math.h>
// or
#include <blaze/math/Submatrix.h>
\endcode
// and forward declared via the header file
\code
#include <blaze/Forward.h>
\endcode
// The first and second parameter specify the row and column of the first element of the submatrix.
// The third and fourth parameter specify the number of rows and columns, respectively. The four
// parameters can be specified either at compile time or at runtime:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
// Creating a dense submatrix of size 4x8, starting in row 3 and column 0 (compile time arguments)
auto sm1 = submatrix<3UL,0UL,4UL,8UL>( A );
// Creating a dense submatrix of size 8x16, starting in row 0 and column 4 (runtime arguments)
auto sm2 = submatrix( A, 0UL, 4UL, 8UL, 16UL );
\endcode
// The \c submatrix() function returns an expression representing the submatrix view. The type of
// this expression depends on the given submatrix arguments, primarily the type of the matrix and
// the compile time arguments. If the type is required, it can be determined via the \c decltype
// specifier:
\code
using MatrixType = blaze::DynamicMatrix<int>;
using SubmatrixType = decltype( blaze::submatrix<3UL,0UL,4UL,8UL>( std::declval<MatrixType>() ) );
\endcode
// The resulting view can be treated as any other dense or sparse matrix, i.e. it can be assigned
// to, it can be copied from, and it can be used in arithmetic operations. A submatrix created from
// a row-major matrix will itself be a row-major matrix, a submatrix created from a column-major
// matrix will be a column-major matrix. The view can also be used on both sides of an assignment:
// The submatrix can either be used as an alias to grant write access to a specific submatrix
// of a matrix primitive on the left-hand side of an assignment or to grant read-access to
// a specific submatrix of a matrix primitive or expression on the right-hand side of an
// assignment. The following example demonstrates this in detail:
\code
blaze::DynamicMatrix<double,blaze::columnMajor> A, B;
blaze::CompressedMatrix<double,blaze::rowMajor> C;
// ... Resizing and initialization
// Creating a dense submatrix of size 8x4, starting in row 0 and column 2
auto sm = submatrix( A, 0UL, 2UL, 8UL, 4UL );
// Setting the submatrix of A to a 8x4 submatrix of B
sm = submatrix( B, 0UL, 0UL, 8UL, 4UL );
// Copying the sparse matrix C into another 8x4 submatrix of A
submatrix( A, 8UL, 2UL, 8UL, 4UL ) = C;
// Assigning part of the result of a matrix addition to the first submatrix
sm = submatrix( B + C, 0UL, 0UL, 8UL, 4UL );
\endcode
// \warning It is the programmer's responsibility to ensure the submatrix does not outlive the
// viewed matrix:
\code
// Creating a submatrix on a temporary matrix; results in a dangling reference!
auto sm = submatrix<1UL,0UL,2UL,3UL>( DynamicMatrix<int>{ { 1, 2, 3 }, { 4, 5, 6 }, { 7, 8, 9 } } );
\endcode
// \n \section views_submatrices_element_access Element Access
// <hr>
//
// The elements of a submatrix can be directly accessed with the function call operator:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
// Creating a 8x8 submatrix, starting from position (4,4)
auto sm = submatrix( A, 4UL, 4UL, 8UL, 8UL );
// Setting the element (0,0) of the submatrix, which corresponds to
// the element at position (4,4) in matrix A
sm(0,0) = 2.0;
\endcode
// Alternatively, the elements of a submatrix can be traversed via (const) iterators. Just as
// with matrices, in case of non-const submatrices, \c begin() and \c end() return an iterator,
// which allows to manipuate the elements, in case of constant submatrices an iterator to
// immutable elements is returned:
\code
blaze::DynamicMatrix<int,blaze::rowMajor> A( 256UL, 512UL );
// ... Resizing and initialization
// Creating a reference to a specific submatrix of matrix A
auto sm = submatrix( A, 16UL, 16UL, 64UL, 128UL );
// Traversing the elements of the 0th row via iterators to non-const elements
for( auto it=sm.begin(0); it!=sm.end(0); ++it ) {
*it = ...; // OK: Write access to the dense submatrix value.
... = *it; // OK: Read access to the dense submatrix value.
}
// Traversing the elements of the 1st row via iterators to const elements
for( auto it=sm.cbegin(1); it!=sm.cend(1); ++it ) {
*it = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid.
... = *it; // OK: Read access to the dense submatrix value.
}
\endcode
\code
blaze::CompressedMatrix<int,blaze::rowMajor> A( 256UL, 512UL );
// ... Resizing and initialization
// Creating a reference to a specific submatrix of matrix A
auto sm = submatrix( A, 16UL, 16UL, 64UL, 128UL );
// Traversing the elements of the 0th row via iterators to non-const elements
for( auto it=sm.begin(0); it!=sm.end(0); ++it ) {
it->value() = ...; // OK: Write access to the value of the non-zero element.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the sparse element.
}
// Traversing the elements of the 1st row via iterators to const elements
for( auto it=sm.cbegin(1); it!=sm.cend(1); ++it ) {
it->value() = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the sparse element.
}
\endcode
// \n \section views_submatrices_element_insertion Element Insertion
// <hr>
//
// Inserting/accessing elements in a sparse submatrix can be done by several alternative functions.
// The following example demonstrates all options:
\code
blaze::CompressedMatrix<double,blaze::rowMajor> A( 256UL, 512UL ); // Non-initialized matrix of size 256x512
auto sm = submatrix( A, 10UL, 10UL, 16UL, 16UL ); // View on a 16x16 submatrix of A
// The function call operator provides access to all possible elements of the sparse submatrix,
// including the zero elements. In case the function call operator is used to access an element
// that is currently not stored in the sparse submatrix, the element is inserted into the
// submatrix.
sm(2,4) = 2.0;
// The second operation for inserting elements is the set() function. In case the element is
// not contained in the submatrix it is inserted into the submatrix, if it is already contained
// in the submatrix its value is modified.
sm.set( 2UL, 5UL, -1.2 );
// An alternative for inserting elements into the submatrix is the insert() function. However,
// it inserts the element only in case the element is not already contained in the submatrix.
sm.insert( 2UL, 6UL, 3.7 );
// Just as in the case of sparse matrices, elements can also be inserted via the append()
// function. In case of submatrices, append() also requires that the appended element's
// index is strictly larger than the currently largest non-zero index in the according row
// or column of the submatrix and that the according row's or column's capacity is large
// enough to hold the new element. Note however that due to the nature of a submatrix, which
// may be an alias to the middle of a sparse matrix, the append() function does not work as
// efficiently for a submatrix as it does for a matrix.
sm.reserve( 2UL, 10UL );
sm.append( 2UL, 10UL, -2.1 );
\endcode
// \n \section views_submatrices_common_operations Common Operations
// <hr>
//
// A submatrix view can be used like any other dense or sparse matrix. This means that with only
// a few exceptions all \ref matrix_operations and \ref arithmetic_operations can be used. For
// instance, the current size of the matrix, i.e. the number of rows or columns can be obtained
// via the \c rows() and \c columns() functions, the current total capacity via the \c capacity()
// function, and the number of non-zero elements via the \c nonZeros() function. However, since
// submatrices are views on a specific submatrix of a matrix, several operations are not possible,
// such as resizing and swapping:
\code
blaze::DynamicMatrix<int,blaze::rowMajor> A( 42UL, 42UL );
// ... Resizing and initialization
// Creating a view on the a 8x12 submatrix of matrix A
auto sm = submatrix( A, 0UL, 0UL, 8UL, 12UL );
sm.rows(); // Returns the number of rows of the submatrix
sm.columns(); // Returns the number of columns of the submatrix
sm.capacity(); // Returns the capacity of the submatrix
sm.nonZeros(); // Returns the number of non-zero elements contained in the submatrix
sm.resize( 10UL, 8UL ); // Compilation error: Cannot resize a submatrix of a matrix
auto sm2 = submatrix( A, 8UL, 0UL, 12UL, 8UL );
swap( sm, sm2 ); // Compilation error: Swap operation not allowed
\endcode
// \n \section views_submatrices_arithmetic_operations Arithmetic Operations
// <hr>
//
// Both dense and sparse submatrices can be used in all arithmetic operations that any other dense
// or sparse matrix can be used in. The following example gives an impression of the use of dense
// submatrices within arithmetic operations. All operations (addition, subtraction, multiplication,
// scaling, ...) can be performed on all possible combinations of dense and sparse matrices with
// fitting element types:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> D1, D2, D3;
blaze::CompressedMatrix<double,blaze::rowMajor> S1, S2;
blaze::CompressedVector<double,blaze::columnVector> a, b;
// ... Resizing and initialization
auto sm = submatrix( D1, 0UL, 0UL, 8UL, 8UL ); // View on the 8x8 submatrix of matrix D1
// starting from row 0 and column 0
submatrix( D1, 0UL, 8UL, 8UL, 8UL ) = D2; // Dense matrix initialization of the 8x8 submatrix
// starting in row 0 and column 8
sm = S1; // Sparse matrix initialization of the second 8x8 submatrix
D3 = sm + D2; // Dense matrix/dense matrix addition
S2 = S1 - submatrix( D1, 8UL, 0UL, 8UL, 8UL ); // Sparse matrix/dense matrix subtraction
D2 = sm * submatrix( D1, 8UL, 8UL, 8UL, 8UL ); // Dense matrix/dense matrix multiplication
submatrix( D1, 8UL, 0UL, 8UL, 8UL ) *= 2.0; // In-place scaling of a submatrix of D1
D2 = submatrix( D1, 8UL, 8UL, 8UL, 8UL ) * 2.0; // Scaling of the a submatrix of D1
D2 = 2.0 * sm; // Scaling of the a submatrix of D1
submatrix( D1, 0UL, 8UL, 8UL, 8UL ) += D2; // Addition assignment
submatrix( D1, 8UL, 0UL, 8UL, 8UL ) -= S1; // Subtraction assignment
submatrix( D1, 8UL, 8UL, 8UL, 8UL ) *= sm; // Multiplication assignment
a = submatrix( D1, 4UL, 4UL, 8UL, 8UL ) * b; // Dense matrix/sparse vector multiplication
\endcode
// \n \section views_aligned_submatrices Aligned Submatrices
// <hr>
//
// Usually submatrices can be defined anywhere within a matrix. They may start at any position and
// may have an arbitrary extension (only restricted by the extension of the underlying matrix).
// However, in contrast to matrices themselves, which are always properly aligned in memory and
// therefore can provide maximum performance, this means that submatrices in general have to be
// considered to be unaligned. This can be made explicit by the \c blaze::unaligned flag:
\code
using blaze::unaligned;
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
// Identical creations of an unaligned submatrix of size 8x8, starting in row 0 and column 0
auto sm1 = submatrix ( A, 0UL, 0UL, 8UL, 8UL );
auto sm2 = submatrix<unaligned>( A, 0UL, 0UL, 8UL, 8UL );
auto sm3 = submatrix<0UL,0UL,8UL,8UL> ( A );
auto sm4 = submatrix<unaligned,0UL,0UL,8UL,8UL>( A );
\endcode
// All of these calls to the \c submatrix() function are identical. Whether the alignment flag is
// explicitly specified or not, it always returns an unaligned submatrix. Whereas this may provide
// full flexibility in the creation of submatrices, this might result in performance disadvantages
// in comparison to matrix primitives (even in case the specified submatrix could be aligned).
// Whereas matrix primitives are guaranteed to be properly aligned and therefore provide maximum
// performance in all operations, a general view on a matrix might not be properly aligned. This
// may cause a performance penalty on some platforms and/or for some operations.
//
// However, it is also possible to create aligned submatrices. Aligned submatrices are identical to
// unaligned submatrices in all aspects, except that they may pose additional alignment restrictions
// and therefore have less flexibility during creation, but don't suffer from performance penalties
// and provide the same performance as the underlying matrix. Aligned submatrices are created by
// explicitly specifying the \c blaze::aligned flag:
\code
using blaze::aligned;
// Creating an aligned submatrix of size 8x8, starting in row 0 and column 0
auto sv1 = submatrix<aligned>( A, 0UL, 0UL, 8UL, 8UL );
auto sv2 = submatrix<aligned,0UL,0UL,8UL,8UL>( A );
\endcode
// The alignment restrictions refer to system dependent address restrictions for the used element
// type and the available vectorization mode (SSE, AVX, ...). In order to be properly aligned the
// first element of each row/column of the submatrix must be aligned. The following source code
// gives some examples for a double precision row-major dynamic matrix, assuming that padding is
// enabled and that AVX is available, which packs 4 \c double values into a SIMD vector:
\code
using blaze::aligned;
blaze::DynamicMatrix<double,blaze::rowMajor> D( 13UL, 17UL );
// ... Resizing and initialization
// OK: Starts at position (0,0), i.e. the first element of each row is aligned (due to padding)
auto dsm1 = submatrix<aligned>( D, 0UL, 0UL, 7UL, 11UL );
// OK: First column is a multiple of 4, i.e. the first element of each row is aligned (due to padding)
auto dsm2 = submatrix<aligned>( D, 3UL, 12UL, 8UL, 16UL );
// OK: First column is a multiple of 4 and the submatrix includes the last row and column
auto dsm3 = submatrix<aligned>( D, 4UL, 0UL, 9UL, 17UL );
// Error: First column is not a multiple of 4, i.e. the first element is not aligned
auto dsm4 = submatrix<aligned>( D, 2UL, 3UL, 12UL, 12UL );
\endcode
// Note that the discussed alignment restrictions are only valid for aligned dense submatrices.
// In contrast, aligned sparse submatrices at this time don't pose any additional restrictions.
// Therefore aligned and unaligned sparse submatrices are truly fully identical. Still, in case
// the \c blaze::aligned flag is specified during setup, an aligned submatrix is created:
\code
using blaze::aligned;
blaze::CompressedMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
// Creating an aligned submatrix of size 8x8, starting in row 0 and column 0
auto sv = submatrix<aligned>( A, 0UL, 0UL, 8UL, 8UL );
\endcode
// \n \section views_submatrices_on_symmetric_matrices Submatrices on Symmetric Matrices
//
// Submatrices can also be created on symmetric matrices (see the \c SymmetricMatrix class template):
\code
using blaze::DynamicMatrix;
using blaze::SymmetricMatrix;
// Setup of a 16x16 symmetric matrix
SymmetricMatrix< DynamicMatrix<int> > A( 16UL );
// Creating a dense submatrix of size 8x12, starting in row 2 and column 4
auto sm = submatrix( A, 2UL, 4UL, 8UL, 12UL );
\endcode
// It is important to note, however, that (compound) assignments to such submatrices have a
// special restriction: The symmetry of the underlying symmetric matrix must not be broken!
// Since the modification of element \f$ a_{ij} \f$ of a symmetric matrix also modifies the
// element \f$ a_{ji} \f$, the matrix to be assigned must be structured such that the symmetry
// of the symmetric matrix is preserved. Otherwise a \c std::invalid_argument exception is
// thrown:
\code
using blaze::DynamicMatrix;
using blaze::SymmetricMatrix;
// Setup of two default 4x4 symmetric matrices
SymmetricMatrix< DynamicMatrix<int> > A1( 4 ), A2( 4 );
// Setup of the 3x2 dynamic matrix
//
// ( 1 2 )
// B = ( 3 4 )
// ( 5 6 )
//
DynamicMatrix<int> B{ { 1, 2 }, { 3, 4 }, { 5, 6 } };
// OK: Assigning B to a submatrix of A1 such that the symmetry can be preserved
//
// ( 0 0 1 2 )
// A1 = ( 0 0 3 4 )
// ( 1 3 5 6 )
// ( 2 4 6 0 )
//
submatrix( A1, 0UL, 2UL, 3UL, 2UL ) = B; // OK
// Error: Assigning B to a submatrix of A2 such that the symmetry cannot be preserved!
// The elements marked with X cannot be assigned unambiguously!
//
// ( 0 1 2 0 )
// A2 = ( 1 3 X 0 )
// ( 2 X 6 0 )
// ( 0 0 0 0 )
//
submatrix( A2, 0UL, 1UL, 3UL, 2UL ) = B; // Assignment throws an exception!
\endcode
// \n Previous: \ref views_element_selections Next: \ref views_rows
*/
//*************************************************************************************************
//**Rows*******************************************************************************************
/*!\page views_rows Rows
//
// \tableofcontents
//
//
// Rows provide views on a specific row of a dense or sparse matrix. As such, rows act as a
// reference to a specific row. This reference is valid and can be used in every way any other
// row vector can be used as long as the matrix containing the row is not resized or entirely
// destroyed. The row also acts as an alias to the row elements: Changes made to the elements
// (e.g. modifying values, inserting or erasing elements) are immediately visible in the matrix
// and changes made via the matrix are immediately visible in the row.
//
//
// \n \section views_rows_setup Setup of Rows
// <hr>
//
// \image html row.png
// \image latex row.eps "Row view" width=250pt
//
// A reference to a dense or sparse row can be created very conveniently via the \c row() function.
// It can be included via the header files
\code
#include <blaze/Blaze.h>
// or
#include <blaze/Math.h>
// or
#include <blaze/math/Row.h>
\endcode
// and forward declared via the header file
\code
#include <blaze/Forward.h>
\endcode
// The row index must be in the range from \f$[0..M-1]\f$, where \c M is the total number of rows
// of the matrix, and can be specified both at compile time or at runtime:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
// Creating a reference to the 1st row of matrix A (compile time index)
auto row1 = row<1UL>( A );
// Creating a reference to the 2nd row of matrix A (runtime index)
auto row2 = row( A, 2UL );
\endcode
// The \c row() function returns an expression representing the row view. The type of this
// expression depends on the given row arguments, primarily the type of the matrix and the compile
// time arguments. If the type is required, it can be determined via the \c decltype specifier:
\code
using MatrixType = blaze::DynamicMatrix<int>;
using RowType = decltype( blaze::row<1UL>( std::declval<MatrixType>() ) );
\endcode
// The resulting view can be treated as any other row vector, i.e. it can be assigned to, it can
// be copied from, and it can be used in arithmetic operations. The reference can also be used on
// both sides of an assignment: The row can either be used as an alias to grant write access to a
// specific row of a matrix primitive on the left-hand side of an assignment or to grant read-access
// to a specific row of a matrix primitive or expression on the right-hand side of an assignment.
// The following example demonstrates this in detail:
\code
blaze::DynamicVector<double,blaze::rowVector> x;
blaze::CompressedVector<double,blaze::rowVector> y;
blaze::DynamicMatrix<double,blaze::rowMajor> A, B;
blaze::CompressedMatrix<double,blaze::rowMajor> C, D;
// ... Resizing and initialization
// Setting the 2nd row of matrix A to x
auto row2 = row( A, 2UL );
row2 = x;
// Setting the 3rd row of matrix B to y
row( B, 3UL ) = y;
// Setting x to the 4th row of the result of the matrix multiplication
x = row( A * B, 4UL );
// Setting y to the 2nd row of the result of the sparse matrix multiplication
y = row( C * D, 2UL );
\endcode
// \warning It is the programmer's responsibility to ensure the row does not outlive the viewed
// matrix:
\code
// Creating a row on a temporary matrix; results in a dangling reference!
auto row1 = row<1UL>( DynamicMatrix<int>{ { 1, 2, 3 }, { 4, 5, 6 }, { 7, 8, 9 } } );
\endcode
// \n \section views_rows_element_access Element Access
// <hr>
//
// The elements of a row can be directly accessed with the subscript operator:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
// Creating a view on the 4th row of matrix A
auto row4 = row( A, 4UL );
// Setting the 1st element of the dense row, which corresponds
// to the 1st element in the 4th row of matrix A
row4[1] = 2.0;
\endcode
// The numbering of the row elements is
\f[\left(\begin{array}{*{5}{c}}
0 & 1 & 2 & \cdots & N-1 \\
\end{array}\right),\f]
// where N is the number of columns of the referenced matrix. Alternatively, the elements of a
// row can be traversed via iterators. Just as with vectors, in case of non-const rows, \c begin()
// and \c end() return an iterator, which allows to manipulate the elements, in case of constant
// rows an iterator to immutable elements is returned:
\code
blaze::DynamicMatrix<int,blaze::rowMajor> A( 128UL, 256UL );
// ... Resizing and initialization
// Creating a reference to the 31st row of matrix A
auto row31 = row( A, 31UL );
// Traversing the elements via iterators to non-const elements
for( auto it=row31.begin(); it!=row31.end(); ++it ) {
*it = ...; // OK; Write access to the dense row value
... = *it; // OK: Read access to the dense row value.
}
// Traversing the elements via iterators to const elements
for( auto it=row31.cbegin(); it!=row31.cend(); ++it ) {
*it = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid.
... = *it; // OK: Read access to the dense row value.
}
\endcode
\code
blaze::CompressedMatrix<int,blaze::rowMajor> A( 128UL, 256UL );
// ... Resizing and initialization
// Creating a reference to the 31st row of matrix A
auto row31 = row( A, 31UL );
// Traversing the elements via iterators to non-const elements
for( auto it=row31.begin(); it!=row31.end(); ++it ) {
it->value() = ...; // OK: Write access to the value of the non-zero element.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the sparse element.
}
// Traversing the elements via iterators to const elements
for( auto it=row31.cbegin(); it!=row31.cend(); ++it ) {
it->value() = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the sparse element.
}
\endcode
// \n \section views_rows_element_insertion Element Insertion
// <hr>
//
// Inserting/accessing elements in a sparse row can be done by several alternative functions.
// The following example demonstrates all options:
\code
blaze::CompressedMatrix<double,blaze::rowMajor> A( 10UL, 100UL ); // Non-initialized 10x100 matrix
auto row0( row( A, 0UL ) ); // Reference to the 0th row of A
// The subscript operator provides access to all possible elements of the sparse row,
// including the zero elements. In case the subscript operator is used to access an element
// that is currently not stored in the sparse row, the element is inserted into the row.
row0[42] = 2.0;
// The second operation for inserting elements is the set() function. In case the element
// is not contained in the row it is inserted into the row, if it is already contained in
// the row its value is modified.
row0.set( 45UL, -1.2 );
// An alternative for inserting elements into the row is the insert() function. However,
// it inserts the element only in case the element is not already contained in the row.
row0.insert( 50UL, 3.7 );
// A very efficient way to add new elements to a sparse row is the append() function.
// Note that append() requires that the appended element's index is strictly larger than
// the currently largest non-zero index of the row and that the row's capacity is large
// enough to hold the new element.
row0.reserve( 10UL );
row0.append( 51UL, -2.1 );
\endcode
// \n \section views_rows_common_operations Common Operations
// <hr>
//
// A row view can be used like any other row vector. This means that with only a few exceptions
// all \ref vector_operations and \ref arithmetic_operations can be used. For instance, the
// current number of elements can be obtained via the \c size() function, the current capacity
// via the \c capacity() function, and the number of non-zero elements via the \c nonZeros()
// function. However, since rows are references to specific rows of a matrix, several operations
// are not possible on views, such as resizing and swapping. The following example shows this by
// means of a dense row view:
\code
blaze::DynamicMatrix<int,blaze::rowMajor> A( 42UL, 42UL );
// ... Resizing and initialization
// Creating a reference to the 2nd row of matrix A
auto row2 = row( A, 2UL );
row2.size(); // Returns the number of elements in the row
row2.capacity(); // Returns the capacity of the row
row2.nonZeros(); // Returns the number of non-zero elements contained in the row
row2.resize( 84UL ); // Compilation error: Cannot resize a single row of a matrix
auto row3 = row( A, 3UL );
swap( row2, row3 ); // Compilation error: Swap operation not allowed
\endcode
// \n \section views_rows_arithmetic_operations Arithmetic Operations
// <hr>
//
// Both dense and sparse rows can be used in all arithmetic operations that any other dense or
// sparse row vector can be used in. The following example gives an impression of the use of
// dense rows within arithmetic operations. All operations (addition, subtraction, multiplication,
// scaling, ...) can be performed on all possible combinations of dense and sparse rows with
// fitting element types:
\code
blaze::DynamicVector<double,blaze::rowVector> a( 2UL, 2.0 ), b;
blaze::CompressedVector<double,blaze::rowVector> c( 2UL );
c[1] = 3.0;
blaze::DynamicMatrix<double,blaze::rowMajor> A( 4UL, 2UL ); // Non-initialized 4x2 matrix
auto row0( row( A, 0UL ) ); // Reference to the 0th row of A
row0[0] = 0.0; // Manual initialization of the 0th row of A
row0[1] = 0.0;
row( A, 1UL ) = 1.0; // Homogeneous initialization of the 1st row of A
row( A, 2UL ) = a; // Dense vector initialization of the 2nd row of A
row( A, 3UL ) = c; // Sparse vector initialization of the 3rd row of A
b = row0 + a; // Dense vector/dense vector addition
b = c + row( A, 1UL ); // Sparse vector/dense vector addition
b = row0 * row( A, 2UL ); // Component-wise vector multiplication
row( A, 1UL ) *= 2.0; // In-place scaling of the 1st row
b = row( A, 1UL ) * 2.0; // Scaling of the 1st row
b = 2.0 * row( A, 1UL ); // Scaling of the 1st row
row( A, 2UL ) += a; // Addition assignment
row( A, 2UL ) -= c; // Subtraction assignment
row( A, 2UL ) *= row( A, 0UL ); // Multiplication assignment
double scalar = row( A, 1UL ) * trans( c ); // Scalar/dot/inner product between two vectors
A = trans( c ) * row( A, 1UL ); // Outer product between two vectors
\endcode
// \n \section views_rows_non_fitting_storage_order Views on Matrices with Non-Fitting Storage Order
// <hr>
//
// Especially noteworthy is that row views can be created for both row-major and column-major
// matrices. Whereas the interface of a row-major matrix only allows to traverse a row directly
// and the interface of a column-major matrix only allows to traverse a column, via views it is
// possible to traverse a row of a column-major matrix or a column of a row-major matrix. For
// instance:
\code
blaze::DynamicMatrix<int,blaze::columnMajor> A( 64UL, 32UL );
// ... Resizing and initialization
// Creating a reference to the 1st row of a column-major matrix A
auto row1 = row( A, 1UL );
for( auto it=row1.begin(); it!=row1.end(); ++it ) {
// ...
}
\endcode
// However, please note that creating a row view on a matrix stored in a column-major fashion
// can result in a considerable performance decrease in comparison to a row view on a matrix
// with row-major storage format. This is due to the non-contiguous storage of the matrix
// elements. Therefore care has to be taken in the choice of the most suitable storage order:
\code
// Setup of two column-major matrices
blaze::DynamicMatrix<double,blaze::columnMajor> A( 128UL, 128UL );
blaze::DynamicMatrix<double,blaze::columnMajor> B( 128UL, 128UL );
// ... Resizing and initialization
// The computation of the 15th row of the multiplication between A and B ...
blaze::DynamicVector<double,blaze::rowVector> x = row( A * B, 15UL );
// ... is essentially the same as the following computation, which multiplies
// the 15th row of the column-major matrix A with B.
blaze::DynamicVector<double,blaze::rowVector> x = row( A, 15UL ) * B;
\endcode
// Although \b Blaze performs the resulting vector/matrix multiplication as efficiently as possible
// using a row-major storage order for matrix \c A would result in a more efficient evaluation.
//
// \n Previous: \ref views_submatrices Next: \ref views_row_selections
*/
//*************************************************************************************************
//**Row Selections*********************************************************************************
/*!\page views_row_selections Row Selections
//
// \tableofcontents
//
//
// Row selections provide views on arbitrary compositions of rows of dense and sparse matrices.
// These views act as a reference to the selected rows and represent them as another dense or
// sparse matrix. This reference is valid and can be used in every way any other dense or sparse
// matrix can be used as long as the matrix containing the rows is not resized or entirely
// destroyed. The row selection also acts as an alias to the matrix elements in the specified
// range: Changes made to the rows (e.g. modifying values, inserting or erasing elements) are
// immediately visible in the matrix and changes made via the matrix are immediately visible
// in the rows.
//
//
// \n \section views_row_selections_setup Setup of Row Selections
//
// A row selection can be created very conveniently via the \c rows() function. It can be included
// via the header files
\code
#include <blaze/Blaze.h>
// or
#include <blaze/Math.h>
// or
#include <blaze/math/Rows.h>
\endcode
// and forward declared via the header file
\code
#include <blaze/Forward.h>
\endcode
// The indices of the rows to be selected can be specified either at compile time or at runtime
// (by means of an initializer list, array or vector):
\code
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
// Selecting the rows 4, 6, 8, and 10 (compile time arguments)
auto rs1 = rows<4UL,6UL,8UL,10UL>( A );
// Selecting the rows 3, 2, and 1 (runtime arguments via an initializer list)
const std::initializer_list<size_t> list{ 3UL, 2UL, 1UL };
auto rs2 = rows( A, { 3UL, 2UL, 1UL } );
auto rs3 = rows( A, list );
// Selecting the rows 1, 2, 3, 3, 2, and 1 (runtime arguments via a std::array)
const std::array<size_t> array{ 1UL, 2UL, 3UL, 3UL, 2UL, 1UL };
auto rs4 = rows( A, array );
auto rs5 = rows( A, array.data(), array.size() );
// Selecting the row 4 fives times (runtime arguments via a std::vector)
const std::vector<size_t> vector{ 4UL, 4UL, 4UL, 4UL, 4UL };
auto rs6 = rows( A, vector );
auto rs7 = rows( A, vector.data(), vector.size() );
\endcode
// Note that it is possible to alias the rows of the underlying matrix in any order. Also note
// that it is possible to use the same index multiple times.
//
// Alternatively it is possible to pass a callable such as a lambda or functor that produces the
// indices:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> A( 9UL, 18UL );
// Selecting all even rows of the matrix, i.e. selecting the rows 0, 2, 4, 6, and 8
auto rs1 = rows( A, []( size_t i ){ return i*2UL; }, 5UL );
// Selecting all odd rows of the matrix, i.e. selecting the rows 1, 3, 5, and 7
auto rs2 = rows( A, []( size_t i ){ return i*2UL+1UL; }, 4UL );
// Reversing the rows of the matrix, i.e. selecting the rows 8, 7, 6, 5, 4, 3, 2, 1, and 0
auto rs3 = rows( A, [max=A.rows()-1UL]( size_t i ){ return max-i; }, 9UL );
\endcode
// The \c rows() function returns an expression representing the view on the selected rows. The
// type of this expression depends on the given arguments, primarily the type of the matrix and
// the compile time arguments. If the type is required, it can be determined via the \c decltype
// specifier:
\code
using MatrixType = blaze::DynamicMatrix<int>;
using RowsType = decltype( blaze::rows<3UL,0UL,4UL,8UL>( std::declval<MatrixType>() ) );
\endcode
// The resulting view can be treated as any other dense or sparse matrix, i.e. it can be assigned
// to, it can be copied from, and it can be used in arithmetic operations. Note, however, that a
// row selection will always be treated as a row-major matrix, regardless of the storage order of
// the matrix containing the rows. The view can also be used on both sides of an assignment: It
// can either be used as an alias to grant write access to specific rows of a matrix primitive
// on the left-hand side of an assignment or to grant read-access to specific rows of a matrix
// primitive or expression on the right-hand side of an assignment. The following example
// demonstrates this in detail:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> A;
blaze::DynamicMatrix<double,blaze::columnMajor> B;
blaze::CompressedMatrix<double,blaze::rowMajor> C;
// ... Resizing and initialization
// Selecting the rows 1, 3, 5, and 7 of A
auto rs = rows( A, { 1UL, 3UL, 5UL, 7UL } );
// Setting rows 1, 3, 5, and 7 of A to row 4 of B
rs = rows( B, { 4UL, 4UL, 4UL, 4UL } );
// Setting the rows 2, 4, 6, and 8 of A to C
rows( A, { 2UL, 4UL, 6UL, 8UL } ) = C;
// Setting the first 4 rows of A to the rows 5, 4, 3, and 2 of C
submatrix( A, 0UL, 0UL, 4UL, A.columns() ) = rows( C, { 5UL, 4UL, 3UL, 2UL } );
// Rotating the result of the addition between rows 1, 3, 5, and 7 of A and C
B = rows( rs + C, { 2UL, 3UL, 0UL, 1UL } );
\endcode
// \warning It is the programmer's responsibility to ensure the row selection does not outlive the
// viewed matrix:
\code
// Creating a row selection on a temporary matrix; results in a dangling reference!
auto rs = rows<2UL,0UL>( DynamicMatrix<int>{ { 1, 2, 3 }, { 4, 5, 6 }, { 7, 8, 9 } } );
\endcode
// \n \section views_row_selections_element_access Element Access
//
// The elements of a row selection can be directly accessed via the function call operator:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
// Creating a view on the first four rows of A in reverse order
auto rs = rows( A, { 3UL, 2UL, 1UL, 0UL } );
// Setting the element (0,0) of the row selection, which corresponds
// to the element at position (3,0) in matrix A
rs(0,0) = 2.0;
\endcode
// Alternatively, the elements of a row selection can be traversed via (const) iterators. Just as
// with matrices, in case of non-const row selection, \c begin() and \c end() return an iterator,
// which allows to manipuate the elements, in case of constant row selection an iterator to
// immutable elements is returned:
\code
blaze::DynamicMatrix<int,blaze::rowMajor> A( 256UL, 512UL );
// ... Resizing and initialization
// Creating a reference to a selection of rows of matrix A
auto rs = rows( A, { 16UL, 32UL, 64UL, 128UL } );
// Traversing the elements of the 0th row via iterators to non-const elements
for( auto it=rs.begin(0); it!=rs.end(0); ++it ) {
*it = ...; // OK: Write access to the dense value.
... = *it; // OK: Read access to the dense value.
}
// Traversing the elements of the 1st row via iterators to const elements
for( auto it=rs.cbegin(1); it!=rs.cend(1); ++it ) {
*it = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid.
... = *it; // OK: Read access to the dense value.
}
\endcode
\code
blaze::CompressedMatrix<int,blaze::rowMajor> A( 256UL, 512UL );
// ... Resizing and initialization
// Creating a reference to a selection of rows of matrix A
auto rs = rows( A, { 16UL, 32UL, 64UL, 128UL } );
// Traversing the elements of the 0th row via iterators to non-const elements
for( auto it=rs.begin(0); it!=rs.end(0); ++it ) {
it->value() = ...; // OK: Write access to the value of the non-zero element.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the sparse element.
}
// Traversing the elements of the 1st row via iterators to const elements
for( auto it=rs.cbegin(1); it!=rs.cend(1); ++it ) {
it->value() = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the sparse element.
}
\endcode
// \n \section views_row_selections_element_insertion Element Insertion
//
// Inserting/accessing elements in a sparse row selection can be done by several alternative
// functions. The following example demonstrates all options:
\code
blaze::CompressedMatrix<double,blaze::rowMajor> A( 256UL, 512UL ); // Non-initialized matrix of size 256x512
auto rs = rows( A, { 10UL, 20UL, 30UL, 40UL } ); // View on the rows 10, 20, 30, and 40 of A
// The function call operator provides access to all possible elements of the sparse row
// selection, including the zero elements. In case the function call operator is used to
// access an element that is currently not stored in the sparse row selection, the element
// is inserted into the row selection.
rs(2,4) = 2.0;
// The second operation for inserting elements is the set() function. In case the element is
// not contained in the row selection it is inserted into the row selection, if it is already
// contained in the row selection its value is modified.
rs.set( 2UL, 5UL, -1.2 );
// An alternative for inserting elements into the row selection is the insert() function.
// However, it inserts the element only in case the element is not already contained in the
// row selection.
rs.insert( 2UL, 6UL, 3.7 );
// Just as in the case of sparse matrices, elements can also be inserted via the append()
// function. In case of row selections, append() also requires that the appended element's
// index is strictly larger than the currently largest non-zero index in the according row
// of the row selection and that the according row's capacity is large enough to hold the new
// element. Note however that due to the nature of a row selection, which may be an alias to
// an arbitrary collection of rows, the append() function does not work as efficiently for
// a row selection as it does for a matrix.
rs.reserve( 2UL, 10UL );
rs.append( 2UL, 10UL, -2.1 );
\endcode
// \n \section views_row_selections_common_operations Common Operations
//
// A view on specific rows of a matrix can be used like any other dense or sparse matrix. For
// instance, the current size of the matrix, i.e. the number of rows or columns can be obtained
// via the \c rows() and \c columns() functions, the current total capacity via the \c capacity()
// function, and the number of non-zero elements via the \c nonZeros() function. However, since
// row selections are views on specific rows of a matrix, several operations are not possible,
// such as resizing and swapping:
\code
blaze::DynamicMatrix<int,blaze::rowMajor> A( 42UL, 42UL );
// ... Resizing and initialization
// Creating a view on the rows 8, 16, 24, and 32 of matrix A
auto rs = rows( A, { 8UL, 16UL, 24UL, 32UL } );
rs.rows(); // Returns the number of rows of the row selection
rs.columns(); // Returns the number of columns of the row selection
rs.capacity(); // Returns the capacity of the row selection
rs.nonZeros(); // Returns the number of non-zero elements contained in the row selection
rs.resize( 10UL, 8UL ); // Compilation error: Cannot resize a row selection
auto rs2 = rows( A, 9UL, 17UL, 25UL, 33UL );
swap( rs, rs2 ); // Compilation error: Swap operation not allowed
\endcode
// \n \section views_row_selections_arithmetic_operations Arithmetic Operations
//
// Both dense and sparse row selections can be used in all arithmetic operations that any other
// dense or sparse matrix can be used in. The following example gives an impression of the use
// of dense row selctions within arithmetic operations. All operations (addition, subtraction,
// multiplication, scaling, ...) can be performed on all possible combinations of dense and
// sparse matrices with fitting element types:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> D1, D2, D3;
blaze::CompressedMatrix<double,blaze::rowMajor> S1, S2;
blaze::CompressedVector<double,blaze::columnVector> a, b;
// ... Resizing and initialization
std::initializer_list<size_t> indices1{ 0UL, 3UL, 6UL, 9UL, 12UL, 15UL, 18UL, 21UL };
std::initializer_list<size_t> indices2{ 1UL, 4UL, 7UL, 10UL, 13UL, 16UL, 19UL, 22UL };
std::initializer_list<size_t> indices3{ 2UL, 5UL, 8UL, 11UL, 14UL, 17UL, 20UL, 23UL };
auto rs = rows( D1, indices1 ); // Selecting the every third row of D1 in the range [0..21]
rs = D2; // Dense matrix assignment to the selected rows
rows( D1, indices2 ) = S1; // Sparse matrix assignment to the selected rows
D3 = rs + D2; // Dense matrix/dense matrix addition
S2 = S1 - rows( D1, indices2 ); // Sparse matrix/dense matrix subtraction
D2 = rs % rows( D1, indices3 ); // Dense matrix/dense matrix Schur product
D2 = rows( D1, indices2 ) * D1; // Dense matrix/dense matrix multiplication
rows( D1, indices2 ) *= 2.0; // In-place scaling of the second selection of rows
D2 = rows( D1, indices3 ) * 2.0; // Scaling of the elements in the third selection of rows
D2 = 2.0 * rows( D1, indices3 ); // Scaling of the elements in the third selection of rows
rows( D1, indices1 ) += D2; // Addition assignment
rows( D1, indices2 ) -= S1; // Subtraction assignment
rows( D1, indices3 ) %= rs; // Schur product assignment
a = rows( D1, indices1 ) * b; // Dense matrix/sparse vector multiplication
\endcode
// \n \section views_row_selections_on_column_major_matrix Row Selections on Column-Major Matrices
//
// Especially noteworthy is that row selections can be created for both row-major and column-major
// matrices. Whereas the interface of a row-major matrix only allows to traverse a row directly
// and the interface of a column-major matrix only allows to traverse a column, via views it is
// possible to traverse a row of a column-major matrix or a column of a row-major matrix. For
// instance:
\code
blaze::DynamicMatrix<int,blaze::columnMajor> A( 64UL, 32UL );
// ... Resizing and initialization
// Creating a reference to the 1st and 3rd row of a column-major matrix A
auto rs = rows( A, { 1UL, 3UL } );
// Traversing row 0 of the selection, which corresponds to the 1st row of matrix A
for( auto it=rs.begin( 0UL ); it!=rs.end( 0UL ); ++it ) {
// ...
}
\endcode
// However, please note that creating a row selection on a matrix stored in a column-major fashion
// can result in a considerable performance decrease in comparison to a row selection on a matrix
// with row-major storage format. This is due to the non-contiguous storage of the matrix elements.
// Therefore care has to be taken in the choice of the most suitable storage order:
\code
// Setup of two column-major matrices
blaze::DynamicMatrix<double,blaze::columnMajor> A( 128UL, 128UL );
blaze::DynamicMatrix<double,blaze::columnMajor> B( 128UL, 128UL );
// ... Resizing and initialization
// The computation of the 15th, 30th, and 45th row of the multiplication between A and B ...
blaze::DynamicMatrix<double,blaze::rowMajor> x = rows( A * B, { 15UL, 30UL, 45UL } );
// ... is essentially the same as the following computation, which multiplies
// the 15th, 30th, and 45th row of the column-major matrix A with B.
blaze::DynamicMatrix<double,blaze::rowMajor> x = rows( A, { 15UL, 30UL, 45UL } ) * B;
\endcode
// Although \b Blaze performs the resulting matrix/matrix multiplication as efficiently as possible
// using a row-major storage order for matrix \c A would result in a more efficient evaluation.
//
// \n Previous: \ref views_rows Next: \ref views_columns
*/
//*************************************************************************************************
//**Columns****************************************************************************************
/*!\page views_columns Columns
//
// \tableofcontents
//
//
// Just as rows provide a view on a specific row of a matrix, columns provide views on a specific
// column of a dense or sparse matrix. As such, columns act as a reference to a specific column.
// This reference is valid an can be used in every way any other column vector can be used as long
// as the matrix containing the column is not resized or entirely destroyed. Changes made to the
// elements (e.g. modifying values, inserting or erasing elements) are immediately visible in the
// matrix and changes made via the matrix are immediately visible in the column.
//
//
// \n \section views_colums_setup Setup of Columns
// <hr>
//
// \image html column.png
// \image latex column.eps "Column view" width=250pt
//
// A reference to a dense or sparse column can be created very conveniently via the \c column()
// function. It can be included via the header files
\code
#include <blaze/Blaze.h>
// or
#include <blaze/Math.h>
// or
#include <blaze/math/Column.h>
\endcode
// and forward declared via the header file
\code
#include <blaze/Forward.h>
\endcode
// The column index must be in the range from \f$[0..N-1]\f$, where \c N is the total number of
// columns of the matrix, and can be specified both at compile time or at runtime:
\code
blaze::DynamicMatrix<double,blaze::columnMajor> A;
// ... Resizing and initialization
// Creating a reference to the 1st column of matrix A (compile time index)
auto col1 = column<1UL>( A );
// Creating a reference to the 2nd column of matrix A (runtime index)
auto col2 = column( A, 2UL );
\endcode
// The \c column() function returns an expression representing the column view. The type of this
// expression depends on the given column arguments, primarily the type of the matrix and the
// compile time arguments. If the type is required, it can be determined via the \c decltype
// specifier:
\code
using MatrixType = blaze::DynamicMatrix<int>;
using ColumnType = decltype( blaze::column<1UL>( std::declval<MatrixType>() ) );
\endcode
// The resulting view can be treated as any other column vector, i.e. it can be assigned to, it
// can be copied from, and it can be used in arithmetic operations. The reference can also be used
// on both sides of an assignment: The column can either be used as an alias to grant write access
// to a specific column of a matrix primitive on the left-hand side of an assignment or to grant
// read-access to a specific column of a matrix primitive or expression on the right-hand side
// of an assignment. The following example demonstrates this in detail:
\code
blaze::DynamicVector<double,blaze::columnVector> x;
blaze::CompressedVector<double,blaze::columnVector> y;
blaze::DynamicMatrix<double,blaze::columnMajor> A, B;
blaze::CompressedMatrix<double,blaze::columnMajor> C, D;
// ... Resizing and initialization
// Setting the 1st column of matrix A to x
auto col1 = column( A, 1UL );
col1 = x;
// Setting the 4th column of matrix B to y
column( B, 4UL ) = y;
// Setting x to the 2nd column of the result of the matrix multiplication
x = column( A * B, 2UL );
// Setting y to the 2nd column of the result of the sparse matrix multiplication
y = column( C * D, 2UL );
\endcode
// \warning It is the programmer's responsibility to ensure the column does not outlive the
// viewed matrix:
\code
// Creating a column on a temporary matrix; results in a dangling reference!
auto col1 = column<1UL>( DynamicMatrix<int>{ { 1, 2, 3 }, { 4, 5, 6 }, { 7, 8, 9 } } );
\endcode
// \n \section views_columns_element_access Element Access
// <hr>
//
// The elements of a column can be directly accessed with the subscript operator.
\code
blaze::DynamicMatrix<double,blaze::columnMajor> A;
// ... Resizing and initialization
// Creating a view on the 4th column of matrix A
auto col4 = column( A, 4UL );
// Setting the 1st element of the dense column, which corresponds
// to the 1st element in the 4th column of matrix A
col4[1] = 2.0;
\endcode
// The numbering of the column elements is
\f[\left(\begin{array}{*{5}{c}}
0 & 1 & 2 & \cdots & N-1 \\
\end{array}\right),\f]
// where N is the number of rows of the referenced matrix. Alternatively, the elements of a column
// can be traversed via iterators. Just as with vectors, in case of non-const columns, \c begin()
// and \c end() return an iterator, which allows to manipulate the elements, in case of constant
// columns an iterator to immutable elements is returned:
\code
blaze::DynamicMatrix<int,blaze::columnMajor> A( 128UL, 256UL );
// ... Resizing and initialization
// Creating a reference to the 31st column of matrix A
auto col31 = column( A, 31UL );
// Traversing the elements via iterators to non-const elements
for( auto it=col31.begin(); it!=col31.end(); ++it ) {
*it = ...; // OK; Write access to the dense column value
... = *it; // OK: Read access to the dense column value.
}
// Traversing the elements via iterators to const elements
for( auto it=col31.cbegin(); it!=col31.cend(); ++it ) {
*it = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid.
... = *it; // OK: Read access to the dense column value.
}
\endcode
\code
blaze::CompressedMatrix<int,blaze::columnMajor> A( 128UL, 256UL );
// ... Resizing and initialization
// Creating a reference to the 31st column of matrix A
auto col31 = column( A, 31UL );
// Traversing the elements via iterators to non-const elements
for( auto it=col31.begin(); it!=col31.end(); ++it ) {
it->value() = ...; // OK: Write access to the value of the non-zero element.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the sparse element.
}
// Traversing the elements via iterators to const elements
for( auto it=col31.cbegin(); it!=col31.cend(); ++it ) {
it->value() = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the sparse element.
}
\endcode
// \n \section views_columns_element_insertion Element Insertion
// <hr>
//
// Inserting/accessing elements in a sparse column can be done by several alternative functions.
// The following example demonstrates all options:
\code
blaze::CompressedMatrix<double,blaze::columnMajor> A( 100UL, 10UL ); // Non-initialized 100x10 matrix
auto col0( column( A, 0UL ) ); // Reference to the 0th column of A
// The subscript operator provides access to all possible elements of the sparse column,
// including the zero elements. In case the subscript operator is used to access an element
// that is currently not stored in the sparse column, the element is inserted into the column.
col0[42] = 2.0;
// The second operation for inserting elements is the set() function. In case the element
// is not contained in the column it is inserted into the column, if it is already contained
// in the column its value is modified.
col0.set( 45UL, -1.2 );
// An alternative for inserting elements into the column is the insert() function. However,
// it inserts the element only in case the element is not already contained in the column.
col0.insert( 50UL, 3.7 );
// A very efficient way to add new elements to a sparse column is the append() function.
// Note that append() requires that the appended element's index is strictly larger than
// the currently largest non-zero index of the column and that the column's capacity is
// large enough to hold the new element.
col0.reserve( 10UL );
col0.append( 51UL, -2.1 );
\endcode
// \n \section views_columns_common_operations Common Operations
// <hr>
//
// A column view can be used like any other column vector. This means that with only a few
// exceptions all \ref vector_operations and \ref arithmetic_operations can be used. For instance,
// the current number of elements can be obtained via the \c size() function, the current capacity
// via the \c capacity() function, and the number of non-zero elements via the \c nonZeros()
// function. However, since columns are references to specific columns of a matrix, several
// operations are not possible on views, such as resizing and swapping. The following example
// shows this by means of a dense column view:
\code
blaze::DynamicMatrix<int,blaze::columnMajor> A( 42UL, 42UL );
// ... Resizing and initialization
// Creating a reference to the 2nd column of matrix A
auto col2 = column( A, 2UL );
col2.size(); // Returns the number of elements in the column
col2.capacity(); // Returns the capacity of the column
col2.nonZeros(); // Returns the number of non-zero elements contained in the column
col2.resize( 84UL ); // Compilation error: Cannot resize a single column of a matrix
auto col3 = column( A, 3UL );
swap( col2, col3 ); // Compilation error: Swap operation not allowed
\endcode
// \n \section views_columns_arithmetic_operations Arithmetic Operations
// <hr>
//
// Both dense and sparse columns can be used in all arithmetic operations that any other dense or
// sparse column vector can be used in. The following example gives an impression of the use of
// dense columns within arithmetic operations. All operations (addition, subtraction, multiplication,
// scaling, ...) can be performed on all possible combinations of dense and sparse columns with
// fitting element types:
\code
blaze::DynamicVector<double,blaze::columnVector> a( 2UL, 2.0 ), b;
blaze::CompressedVector<double,blaze::columnVector> c( 2UL );
c[1] = 3.0;
blaze::DynamicMatrix<double,blaze::columnMajor> A( 2UL, 4UL ); // Non-initialized 2x4 matrix
auto col0( column( A, 0UL ) ); // Reference to the 0th column of A
col0[0] = 0.0; // Manual initialization of the 0th column of A
col0[1] = 0.0;
column( A, 1UL ) = 1.0; // Homogeneous initialization of the 1st column of A
column( A, 2UL ) = a; // Dense vector initialization of the 2nd column of A
column( A, 3UL ) = c; // Sparse vector initialization of the 3rd column of A
b = col0 + a; // Dense vector/dense vector addition
b = c + column( A, 1UL ); // Sparse vector/dense vector addition
b = col0 * column( A, 2UL ); // Component-wise vector multiplication
column( A, 1UL ) *= 2.0; // In-place scaling of the 1st column
b = column( A, 1UL ) * 2.0; // Scaling of the 1st column
b = 2.0 * column( A, 1UL ); // Scaling of the 1st column
column( A, 2UL ) += a; // Addition assignment
column( A, 2UL ) -= c; // Subtraction assignment
column( A, 2UL ) *= column( A, 0UL ); // Multiplication assignment
double scalar = trans( c ) * column( A, 1UL ); // Scalar/dot/inner product between two vectors
A = column( A, 1UL ) * trans( c ); // Outer product between two vectors
\endcode
// \n \section views_columns_non_fitting_storage_order Views on Matrices with Non-Fitting Storage Order
// <hr>
//
// Especially noteworthy is that column views can be created for both row-major and column-major
// matrices. Whereas the interface of a row-major matrix only allows to traverse a row directly
// and the interface of a column-major matrix only allows to traverse a column, via views it is
// possible to traverse a row of a column-major matrix or a column of a row-major matrix. For
// instance:
\code
blaze::DynamicMatrix<int,blaze::rowMajor> A( 64UL, 32UL );
// ... Resizing and initialization
// Creating a reference to the 1st column of a column-major matrix A
auto col1 = column( A, 1UL );
for( auto it=col1.begin(); it!=col1.end(); ++it ) {
// ...
}
\endcode
// However, please note that creating a column view on a matrix stored in a row-major fashion
// can result in a considerable performance decrease in comparison to a column view on a matrix
// with column-major storage format. This is due to the non-contiguous storage of the matrix
// elements. Therefore care has to be taken in the choice of the most suitable storage order:
\code
// Setup of two row-major matrices
blaze::DynamicMatrix<double,blaze::rowMajor> A( 128UL, 128UL );
blaze::DynamicMatrix<double,blaze::rowMajor> B( 128UL, 128UL );
// ... Resizing and initialization
// The computation of the 15th column of the multiplication between A and B ...
blaze::DynamicVector<double,blaze::columnVector> x = column( A * B, 15UL );
// ... is essentially the same as the following computation, which multiplies
// A with the 15th column of the row-major matrix B.
blaze::DynamicVector<double,blaze::columnVector> x = A * column( B, 15UL );
\endcode
// Although \b Blaze performs the resulting matrix/vector multiplication as efficiently as possible
// using a column-major storage order for matrix \c B would result in a more efficient evaluation.
//
// \n Previous: \ref views_row_selections Next: \ref views_column_selections
*/
//*************************************************************************************************
//**Column Selections******************************************************************************
/*!\page views_column_selections Column Selections
//
// \tableofcontents
//
//
// Column selections provide views on arbitrary compositions of columns of dense and sparse
// matrices. These views act as a reference to the selected columns and represent them as another
// dense or sparse matrix. This reference is valid and can be used in every way any other dense
// or sparse matrix can be used as long as the matrix containing the columns is not resized or
// entirely destroyed. The column selection also acts as an alias to the matrix elements in the
// specified range: Changes made to the columns (e.g. modifying values, inserting or erasing
// elements) are immediately visible in the matrix and changes made via the matrix are immediately
// visible in the columns.
//
//
// \n \section views_column_selections_setup Setup of Column Selections
//
// A column selection can be created very conveniently via the \c columns() function. It can be
// included via the header files
\code
#include <blaze/Blaze.h>
// or
#include <blaze/Math.h>
// or
#include <blaze/math/Columns.h>
\endcode
// and forward declared via the header file
\code
#include <blaze/Forward.h>
\endcode
// The indices of the columns to be selected can be specified either at compile time or at runtime
// (by means of an initializer list, array or vector):
\code
blaze::DynamicMatrix<double,blaze::columnMajor> A;
// ... Resizing and initialization
// Selecting the columns 4, 6, 8, and 10 (compile time arguments)
auto cs1 = columns<4UL,6UL,8UL,10UL>( A );
// Selecting the columns 3, 2, and 1 (runtime arguments via an initializer list)
const std::initializer_list<size_t> list{ 3UL, 2UL, 1UL };
auto cs2 = columns( A, { 3UL, 2UL, 1UL } );
auto cs3 = columns( A, list );
// Selecting the columns 1, 2, 3, 3, 2, and 1 (runtime arguments via a std::array)
const std::array<size_t> array{ 1UL, 2UL, 3UL, 3UL, 2UL, 1UL };
auto cs4 = columns( A, array );
auto cs5 = columns( A, array.data(), array.size() );
// Selecting the column 4 fives times (runtime arguments via a std::vector)
const std::vector<size_t> vector{ 4UL, 4UL, 4UL, 4UL, 4UL };
auto cs6 = columns( A, vector );
auto cs7 = columns( A, vector.data(), vector.size() );
\endcode
// Note that it is possible to alias the columns of the underlying matrix in any order. Also note
// that it is possible to use the same index multiple times.
//
// Alternatively it is possible to pass a callable such as a lambda or functor that produces the
// indices:
\code
blaze::DynamicMatrix<double,blaze::columnMajor> A( 18UL, 9UL );
// Selecting all even columns of the matrix, i.e. selecting the columns 0, 2, 4, 6, and 8
auto cs1 = columns( A, []( size_t i ){ return i*2UL; }, 5UL );
// Selecting all odd columns of the matrix, i.e. selecting the columns 1, 3, 5, and 7
auto cs2 = columns( A, []( size_t i ){ return i*2UL+1UL; }, 4UL );
// Reversing the columns of the matrix, i.e. selecting the columns 8, 7, 6, 5, 4, 3, 2, 1, and 0
auto cs3 = columns( A, [max=A.columns()-1UL]( size_t i ){ return max-i; }, 9UL );
\endcode
// The \c columns() function returns an expression representing the view on the selected columns.
// The type of this expression depends on the given arguments, primarily the type of the matrix
// and the compile time arguments. If the type is required, it can be determined via the \c decltype
// specifier:
\code
using MatrixType = blaze::DynamicMatrix<int>;
using ColumnsType = decltype( blaze::columns<3UL,0UL,4UL,8UL>( std::declval<MatrixType>() ) );
\endcode
// The resulting view can be treated as any other dense or sparse matrix, i.e. it can be assigned
// to, it can be copied from, and it can be used in arithmetic operations. Note, however, that a
// column selection will always be treated as a column-major matrix, regardless of the storage
// order of the matrix containing the columns. The view can also be used on both sides of an
// assignment: It can either be used as an alias to grant write access to specific columns of a
// matrix primitive on the left-hand side of an assignment or to grant read-access to specific
// columns of a matrix primitive or expression on the right-hand side of an assignment. The
// following example demonstrates this in detail:
\code
blaze::DynamicMatrix<double,blaze::columnMajor> A;
blaze::DynamicMatrix<double,blaze::rowMajor> B;
blaze::CompressedMatrix<double,blaze::columnMajor> C;
// ... Resizing and initialization
// Selecting the columns 1, 3, 5, and 7 of A
auto cs = columns( A, { 1UL, 3UL, 5UL, 7UL } );
// Setting columns 1, 3, 5, and 7 of A to column 4 of B
cs = columns( B, { 4UL, 4UL, 4UL, 4UL } );
// Setting the columns 2, 4, 6, and 8 of A to C
columns( A, { 2UL, 4UL, 6UL, 8UL } ) = C;
// Setting the first 4 columns of A to the columns 5, 4, 3, and 2 of C
submatrix( A, 0UL, 0UL, A.rows(), 4UL ) = columns( C, { 5UL, 4UL, 3UL, 2UL } );
// Rotating the result of the addition between columns 1, 3, 5, and 7 of A and C
B = columns( cs + C, { 2UL, 3UL, 0UL, 1UL } );
\endcode
// \warning It is the programmer's responsibility to ensure the column selection does not outlive
// the viewed matrix:
\code
// Creating a column selection on a temporary matrix; results in a dangling reference!
auto cs = columns<2UL,0UL>( DynamicMatrix<int>{ { 1, 2, 3 }, { 4, 5, 6 }, { 7, 8, 9 } } );
\endcode
// \n \section views_column_selections_element_access Element Access
//
// The elements of a column selection can be directly accessed via the function call operator:
\code
blaze::DynamicMatrix<double,blaze::columnMajor> A;
// ... Resizing and initialization
// Creating a view on the first four columns of A in reverse order
auto cs = columns( A, { 3UL, 2UL, 1UL, 0UL } );
// Setting the element (0,0) of the column selection, which corresponds
// to the element at position (0,3) in matrix A
cs(0,0) = 2.0;
\endcode
// Alternatively, the elements of a column selection can be traversed via (const) iterators.
// Just as with matrices, in case of non-const column selection, \c begin() and \c end() return
// an iterator, which allows to manipuate the elements, in case of constant column selection an
// iterator to immutable elements is returned:
\code
blaze::DynamicMatrix<int,blaze::columnMajor> A( 512UL, 256UL );
// ... Resizing and initialization
// Creating a reference to a selection of columns of matrix A
auto cs = columns( A, { 16UL, 32UL, 64UL, 128UL } );
// Traversing the elements of the 0th column via iterators to non-const elements
for( auto it=cs.begin(0); it!=cs.end(0); ++it ) {
*it = ...; // OK: Write access to the dense value.
... = *it; // OK: Read access to the dense value.
}
// Traversing the elements of the 1st column via iterators to const elements
for( auto it=cs.cbegin(1); it!=cs.cend(1); ++it ) {
*it = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid.
... = *it; // OK: Read access to the dense value.
}
\endcode
\code
blaze::CompressedMatrix<int,blaze::columnMajor> A( 512UL, 256UL );
// ... Resizing and initialization
// Creating a reference to a selection of columns of matrix A
auto cs = columns( A, { 16UL, 32UL, 64UL, 128UL } );
// Traversing the elements of the 0th column via iterators to non-const elements
for( auto it=cs.begin(0); it!=cs.end(0); ++it ) {
it->value() = ...; // OK: Write access to the value of the non-zero element.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the sparse element.
}
// Traversing the elements of the 1st column via iterators to const elements
for( auto it=cs.cbegin(1); it!=cs.cend(1); ++it ) {
it->value() = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the sparse element.
}
\endcode
// \n \section views_column_selections_element_insertion Element Insertion
//
// Inserting/accessing elements in a sparse column selection can be done by several alternative
// functions. The following example demonstrates all options:
\code
blaze::CompressedMatrix<double,blaze::columnMajor> A( 512UL, 256UL ); // Non-initialized matrix of size 512x256
auto cs = columns( A, { 10UL, 20UL, 30UL, 40UL } ); // View on the columns 10, 20, 30, and 40 of A
// The function call operator provides access to all possible elements of the sparse column
// selection, including the zero elements. In case the function call operator is used to
// access an element that is currently not stored in the sparse column selection, the element
// is inserted into the column selection.
cs(2,4) = 2.0;
// The second operation for inserting elements is the set() function. In case the element is
// not contained in the column selection it is inserted into the column selection, if it is
// already contained in the column selection its value is modified.
cs.set( 2UL, 5UL, -1.2 );
// An alternative for inserting elements into the column selection is the insert() function.
// However, it inserts the element only in case the element is not already contained in the
// column selection.
cs.insert( 2UL, 6UL, 3.7 );
// Just as in the case of sparse matrices, elements can also be inserted via the append()
// function. In case of column selections, append() also requires that the appended element's
// index is strictly larger than the currently largest non-zero index in the according column
// of the column selection and that the according column's capacity is large enough to hold the
// new element. Note however that due to the nature of a column selection, which may be an alias
// to an arbitrary collection of columns, the append() function does not work as efficiently
// for a column selection as it does for a matrix.
cs.reserve( 2UL, 10UL );
cs.append( 2UL, 10UL, -2.1 );
\endcode
// \n \section views_column_selections_common_operations Common Operations
//
// A view on specific columns of a matrix can be used like any other dense or sparse matrix. For
// instance, the current size of the matrix, i.e. the number of rows or columns can be obtained
// via the \c rows() and \c columns() functions, the current total capacity via the \c capacity()
// function, and the number of non-zero elements via the \c nonZeros() function. However, since
// column selections are views on specific columns of a matrix, several operations are not possible,
// such as resizing and swapping:
\code
blaze::DynamicMatrix<int,blaze::columnMajor> A( 42UL, 42UL );
// ... Resizing and initialization
// Creating a view on the columns 8, 16, 24, and 32 of matrix A
auto cs = columns( A, { 8UL, 16UL, 24UL, 32UL } );
cs.rows(); // Returns the number of rows of the column selection
cs.columns(); // Returns the number of columns of the column selection
cs.capacity(); // Returns the capacity of the column selection
cs.nonZeros(); // Returns the number of non-zero elements contained in the column selection
cs.resize( 10UL, 8UL ); // Compilation error: Cannot resize a column selection
auto cs2 = columns( A, 9UL, 17UL, 25UL, 33UL );
swap( cs, cs2 ); // Compilation error: Swap operation not allowed
\endcode
// \n \section views_column_selections_arithmetic_operations Arithmetic Operations
//
// Both dense and sparse column selections can be used in all arithmetic operations that any other
// dense or sparse matrix can be used in. The following example gives an impression of the use of
// dense column selctions within arithmetic operations. All operations (addition, subtraction,
// multiplication, scaling, ...) can be performed on all possible combinations of dense and
// sparse matrices with fitting element types:
\code
blaze::DynamicMatrix<double,blaze::columnMajor> D1, D2, D3;
blaze::CompressedMatrix<double,blaze::columnMajor> S1, S2;
blaze::CompressedVector<double,blaze::columnVector> a, b;
// ... Resizing and initialization
std::initializer_list<size_t> indices1{ 0UL, 3UL, 6UL, 9UL, 12UL, 15UL, 18UL, 21UL };
std::initializer_list<size_t> indices2{ 1UL, 4UL, 7UL, 10UL, 13UL, 16UL, 19UL, 22UL };
std::initializer_list<size_t> indices3{ 2UL, 5UL, 8UL, 11UL, 14UL, 17UL, 20UL, 23UL };
auto cs = columns( D1, indices1 ); // Selecting the every third column of D1 in the range [0..21]
cs = D2; // Dense matrix assignment to the selected columns
columns( D1, indices2 ) = S1; // Sparse matrix assignment to the selected columns
D3 = cs + D2; // Dense matrix/dense matrix addition
S2 = S1 - columns( D1, indices2 ); // Sparse matrix/dense matrix subtraction
D2 = cs % columns( D1, indices3 ); // Dense matrix/dense matrix Schur product
D2 = columns( D1, indices2 ) * D1; // Dense matrix/dense matrix multiplication
columns( D1, indices2 ) *= 2.0; // In-place scaling of the second selection of columns
D2 = columns( D1, indices3 ) * 2.0; // Scaling of the elements in the third selection of columns
D2 = 2.0 * columns( D1, indices3 ); // Scaling of the elements in the third selection of columns
columns( D1, indices1 ) += D2; // Addition assignment
columns( D1, indices2 ) -= S1; // Subtraction assignment
columns( D1, indices3 ) %= cs; // Schur product assignment
a = columns( D1, indices1 ) * b; // Dense matrix/sparse vector multiplication
\endcode
// \n \section views_column_selections_on_row_major_matrix Column Selections on a Row-Major Matrix
//
// Especially noteworthy is that column selections can be created for both row-major and
// column-major matrices. Whereas the interface of a row-major matrix only allows to traverse a
// row directly and the interface of a column-major matrix only allows to traverse a column, via
// views it is possible to traverse a row of a column-major matrix or a column of a row-major
// matrix. For instance:
\code
blaze::DynamicMatrix<int,blaze::rowMajor> A( 64UL, 32UL );
// ... Resizing and initialization
// Creating a reference to the 1st and 3rd column of a column-major matrix A
auto cs = columns( A, { 1UL, 3UL } );
// Traversing column 0 of the selection, which corresponds to the 1st column of matrix A
for( auto it=cs.begin( 0UL ); it!=cs.end( 0UL ); ++it ) {
// ...
}
\endcode
// However, please note that creating a column selection on a matrix stored in a row-major fashion
// can result in a considerable performance decrease in comparison to a column selection on a
// matrix with column-major storage format. This is due to the non-contiguous storage of the
// matrix elements. Therefore care has to be taken in the choice of the most suitable storage
// order:
\code
// Setup of two row-major matrices
blaze::DynamicMatrix<double,blaze::rowMajor> A( 128UL, 128UL );
blaze::DynamicMatrix<double,blaze::rowMajor> B( 128UL, 128UL );
// ... Resizing and initialization
// The computation of the 15th, 30th, and 45th column of the multiplication between A and B ...
blaze::DynamicMatrix<double,blaze::columnMajor> x = columns( A * B, { 15UL, 30UL, 45UL } );
// ... is essentially the same as the following computation, which multiplies
// A with the 15th, 30th, and 45th column of the row-major matrix B.
blaze::DynamicMatrix<double,blaze::columnMajor> x = A * column( B, { 15UL, 30UL, 45UL } );
\endcode
// Although \b Blaze performs the resulting matrix/matrix multiplication as efficiently as possible
// using a column-major storage order for matrix \c A would result in a more efficient evaluation.
//
// \n Previous: \ref views_columns Next: \ref views_bands
*/
//*************************************************************************************************
//**Bands******************************************************************************************
/*!\page views_bands Bands
//
// \tableofcontents
//
//
// Bands provide views on a specific band of a dense or sparse matrix (e.g. the diagonal, the
// subdiagonal, ...). As such, bands act as a reference to a specific band. This reference
// is valid and can be used in every way any other vector can be used as long as the matrix
// containing the band is not resized or entirely destroyed. The band also acts as an alias to
// the band elements: Changes made to the elements (e.g. modifying values, inserting or erasing
// elements) are immediately visible in the matrix and changes made via the matrix are immediately
// visible in the band.
//
//
// \n \section views_bands_setup Setup of Bands
// <hr>
//
// \image html band.png
// \image latex band.eps "Band view" width=250pt
//
// A reference to a dense or sparse band can be created very conveniently via the \c band()
// function. It can be included via the header files
\code
#include <blaze/Blaze.h>
// or
#include <blaze/Math.h>
// or
#include <blaze/math/Band.h>
\endcode
// and forward declared via the header file
\code
#include <blaze/Forward.h>
\endcode
// The band index must be in the range from \f$[min(0,1-M)..max(0,N-1)]\f$, where \c M is the
// total number of rows and \c N is the total number of columns, and can be specified both at
// compile time or at runtime:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
// Creating a reference to the 1st lower band of matrix A (compile time index)
auto band1 = band<-1L>( A );
// Creating a reference to the 2nd upper band of matrix A (runtime index)
auto band2 = band( A, 2L );
\endcode
// In addition, the \c diagonal() function provides a convenient shortcut for the setup of a view
// on the diagonal of a dense or sparse matrix. It has the same effect as calling the \c band()
// function with a compile time index of 0:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
// Creating a reference to the diagonal of matrix A via the band() and diagonal() functions
auto diag1 = band<0L>( A );
auto diag2 = diagonal( A );
static_assert( blaze::IsSame< decltype(diag1), decltype(diag2) >::value, "Non-identical types detected" );
\endcode
// Both the \c band() and the \c diagonal() function return an expression representing the band
// view. The type of this expression depends on the given arguments, primarily the type of the
// matrix and the compile time arguments. If the type is required, it can be determined via
// \c decltype specifier:
\code
using MatrixType = blaze::DynamicMatrix<int>;
using BandType = decltype( blaze::band<1L>( std::declval<MatrixType>() ) );
using DiagonalType = decltype( blaze::diagonal( std::declval<MatrixType>() ) );
\endcode
// This resulting view can be treated as any other vector, i.e. it can be assigned to, it can
// be copied from, and it can be used in arithmetic operations. By default, bands are considered
// column vectors, but this setting can be changed via the \c BLAZE_DEFAULT_TRANSPOSE_FLAG switch
// (see \ref transpose_flag). The reference can also be used on both sides of an assignment: The
// band can either be used as an alias to grant write access to a specific band of a matrix
// primitive on the left-hand side of an assignment or to grant read-access to a specific band of
// a matrix primitive or expression on the right-hand side of an assignment. The following example
// demonstrates this in detail:
\code
blaze::DynamicVector<double,blaze::rowVector> x;
blaze::CompressedVector<double,blaze::rowVector> y;
blaze::DynamicMatrix<double,blaze::rowMajor> A, B;
blaze::CompressedMatrix<double,blaze::rowMajor> C, D;
// ... Resizing and initialization
// Setting the 2nd upper band of matrix A to x
auto band2 = band( A, 2L );
band2 = x;
// Setting the 3rd upper band of matrix B to y
band( B, 3L ) = y;
// Setting x to the 2nd lower band of the result of the matrix multiplication
x = band( A * B, -2L );
// Setting y to the 2nd upper band of the result of the sparse matrix multiplication
y = band( C * D, 2L );
\endcode
// \warning It is the programmer's responsibility to ensure the band does not outlive the viewed
// matrix:
\code
// Creating a band on a temporary matrix; results in a dangling reference!
auto band1 = band<1L>( DynamicMatrix<int>{ { 1, 2, 3 }, { 4, 5, 6 }, { 7, 8, 9 } } );
\endcode
// \n \section views_bands_element_access Element Access
// <hr>
//
// The elements of a band can be directly accessed with the subscript operator:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
// Creating a view on the 4th upper band of matrix A
auto band4 = band( A, 4L );
// Setting the 1st element of the dense band, which corresponds
// to the 1st element in the 4th upper band of matrix A
band4[1] = 2.0;
\endcode
// The numbering of the band elements is
\f[\left(\begin{array}{*{5}{c}}
0 & 1 & 2 & \cdots & N-1 \\
\end{array}\right),\f]
// where N is the number of elements of the referenced band. Alternatively, the elements of a band
// can be traversed via iterators. Just as with vectors, in case of non-const band, \c begin() and
// \c end() return an iterator, which allows to manipulate the elements, in case of constant bands
// an iterator to immutable elements is returned:
\code
blaze::DynamicMatrix<int,blaze::rowMajor> A( 128UL, 256UL );
// ... Resizing and initialization
// Creating a reference to the 5th upper band of matrix A
auto band5 = band( A, 5L );
// Traversing the elements via iterators to non-const elements
for( auto it=band5.begin(); it!=band5.end(); ++it ) {
*it = ...; // OK; Write access to the dense band value
... = *it; // OK: Read access to the dense band value.
}
// Traversing the elements via iterators to const elements
for( auto it=band5.cbegin(); it!=band5.cend(); ++it ) {
*it = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid.
... = *it; // OK: Read access to the dense band value.
}
\endcode
\code
blaze::CompressedMatrix<int,blaze::rowMajor> A( 128UL, 256UL );
// ... Resizing and initialization
// Creating a reference to the 5th band of matrix A
auto band5 = band( A, 5L );
// Traversing the elements via iterators to non-const elements
for( auto it=band5.begin(); it!=band5.end(); ++it ) {
it->value() = ...; // OK: Write access to the value of the non-zero element.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the sparse element.
}
// Traversing the elements via iterators to const elements
for( auto it=band5.cbegin(); it!=band5.cend(); ++it ) {
it->value() = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the sparse element.
}
\endcode
// \n \section views_bands_element_insertion Element Insertion
// <hr>
//
// Inserting/accessing elements in a sparse band can be done by several alternative functions.
// The following example demonstrates all options:
\code
blaze::CompressedMatrix<double,blaze::rowMajor> A( 10UL, 100UL ); // Non-initialized 10x100 matrix
auto diag( band( A, 0L ) ); // Reference to the diagonal of A
// The subscript operator provides access to all possible elements of the sparse band,
// including the zero elements. In case the subscript operator is used to access an element
// that is currently not stored in the sparse band, the element is inserted into the band.
diag[42] = 2.0;
// The second operation for inserting elements is the set() function. In case the element
// is not contained in the band it is inserted into the band, if it is already contained in
// the band its value is modified.
diag.set( 45UL, -1.2 );
// An alternative for inserting elements into the band is the insert() function. However,
// it inserts the element only in case the element is not already contained in the band.
diag.insert( 50UL, 3.7 );
\endcode
// \n \section views_bands_common_operations Common Operations
// <hr>
//
// A band view can be used like any other column vector. This means that with only a few
// exceptions all \ref vector_operations and \ref arithmetic_operations can be used. For instance,
// the current number of band elements can be obtained via the \c size() function, the current
// capacity via the \c capacity() function, and the number of non-zero elements via the
// \c nonZeros() function. However, since bands are references to specific bands of a matrix,
// several operations are not possible, such as resizing and swapping. The following example
// shows this by means of a dense band view:
\code
blaze::DynamicMatrix<int,blaze::rowMajor> A( 42UL, 42UL );
// ... Resizing and initialization
// Creating a reference to the 2nd upper band of matrix A
auto band2 = band( A, 2L );
band2.size(); // Returns the number of elements in the band
band2.capacity(); // Returns the capacity of the band
band2.nonZeros(); // Returns the number of non-zero elements contained in the band
band2.resize( 84UL ); // Compilation error: Cannot resize a single band of a matrix
auto band3 = band( A, 3L );
swap( band2, band3 ); // Compilation error: Swap operation not allowed
\endcode
// \n \section views_bands_arithmetic_operations Arithmetic Operations
// <hr>
//
// Both dense and sparse bands can be used in all arithmetic operations that any other dense or
// sparse vector can be used in. The following example gives an impression of the use of dense
// bands within arithmetic operations. All operations (addition, subtraction, multiplication,
// scaling, ...) can be performed on all possible combinations of dense and sparse bands with
// fitting element types:
\code
blaze::DynamicVector<double,blaze::columnVector> a( 2UL, 2.0 ), b;
blaze::CompressedVector<double,blaze::columnVector> c( 2UL );
c[1] = 3.0;
blaze::DynamicMatrix<double,blaze::rowMajor> A( 4UL, 2UL ); // Non-initialized 4x2 matrix
auto band1( band( A, 1L ) ); // Reference to the 1st upper band of A
auto diag ( band( A, 0L ) ); // Reference to the diagonal of A
band1[0] = 0.0; // Manual initialization of the 1st upper band of A
diag = 1.0; // Homogeneous initialization of the diagonal of A
band( A, -1L ) = a; // Dense vector initialization of the 1st lower band of A
band( A, -2L ) = c; // Sparse vector initialization of the 2nd lower band of A
b = diag + a; // Dense vector/dense vector addition
b = c + band( A, -1L ); // Sparse vector/dense vector addition
b = diag * band( A, -2L ); // Component-wise vector multiplication
band( A, -1L ) *= 2.0; // In-place scaling of the 1st upper band
b = band( A, -1L ) * 2.0; // Scaling of the 1st upper band
b = 2.0 * band( A, -1L ); // Scaling of the 1st upper band
band( A, -2L ) += a; // Addition assignment
band( A, -2L ) -= c; // Subtraction assignment
band( A, -2L ) *= band( A, 0L ); // Multiplication assignment
double scalar = trans( c ) * band( A, -1L ); // Scalar/dot/inner product between two vectors
A = band( A, -1L ) * trans( c ); // Outer product between two vectors
\endcode
// \n Previous: \ref views_column_selections Next: \ref arithmetic_operations
*/
//*************************************************************************************************
//**Arithmetic Operations**************************************************************************
/*!\page arithmetic_operations Arithmetic Operations
//
// \tableofcontents
//
//
// \b Blaze provides the following arithmetic operations for vectors and matrices:
//
// <ul>
// <li> \ref addition
// <ul>
// <li> \ref vector_vector_addition </li>
// <li> \ref matrix_matrix_addition </li>
// <li> \ref scalar_addition </li>
// </ul>
// </li>
// <li> \ref subtraction
// <ul>
// <li> \ref vector_vector_subtraction </li>
// <li> \ref matrix_matrix_subtraction </li>
// <li> \ref scalar_subtraction </li>
// </ul>
// </li>
// <li> \ref scalar_multiplication </li>
// <li> \ref vector_vector_multiplication
// <ul>
// <li> \ref componentwise_multiplication </li>
// <li> \ref inner_product </li>
// <li> \ref outer_product </li>
// <li> \ref cross_product </li>
// <li> \ref vector_kronecker_product </li>
// </ul>
// </li>
// <li> \ref vector_vector_division </li>
// <li> \ref matrix_vector_multiplication </li>
// <li> \ref matrix_matrix_multiplication
// <ul>
// <li> \ref schur_product </li>
// <li> \ref matrix_product </li>
// <li> \ref matrix_kronecker_product </li>
// </ul>
// </li>
// </ul>
//
// \n Previous: \ref views_bands Next: \ref addition
*/
//*************************************************************************************************
//**Addition***************************************************************************************
/*!\page addition Addition
//
// \n \section vector_vector_addition Vector/Vector Addition
// <hr>
//
// The addition of vectors is as intuitive as the addition of scalar values. For the addition of
// any two vectors the addition operator (i.e. \c operator+()) can be used. It even enables the
// addition of dense and sparse vectors:
\code
blaze::DynamicVector<int> v1( 5UL ), v3;
blaze::CompressedVector<float> v2( 5UL );
// ... Initializing the vectors
v3 = v1 + v2; // Addition of a dense and a sparse column vector of different data type
v3 = add( v1, v2 ); // Alternative syntax
\endcode
// Note that it is necessary that both operands have exactly the same dimensions. Violating this
// precondition results in an exception. Also note that it is only possible to add vectors with
// the same transpose flag:
\code
using blaze::columnVector;
using blaze::rowVector;
blaze::DynamicVector<int,columnVector> v1( 5UL );
blaze::CompressedVector<float,rowVector> v2( 5UL );
v1 + v2; // Compilation error: Cannot add a column vector and a row vector
v1 + trans( v2 ); // OK: Addition of two column vectors
\endcode
// Also note that the addition of two vectors with the same element type is favorable due to
// possible vectorization of the operation:
\code
blaze::DynamicVector<double> v1( 100UL ), v2( 100UL ), v3;
// ... Initialization of the vectors
v3 = v1 + v2; // Vectorized addition of two double precision vectors
\endcode
// \n \section outer_sum Outer Sum
// <hr>
//
// The addition between a column vector and a row vector results in the outer sum of the two
// vectors:
\code
blaze::StaticVector<int,3UL,columnVector> v1{ 2, 5, -1 };
blaze::DynamicVector<int,rowVector> v2{ -1, 3, -2, 4 };
// Results in the matrix
//
// ( 1 5 0 6 )
// A = ( 4 8 3 9 )
// ( -2 2 -3 3 )
//
blaze::StaticMatrix<int,3UL,4UL> M1 = v1 + v2; // Outer sum
blaze::StaticMatrix<int,3UL,4UL> M2 = add( v1, v2 ); // Alternative syntax
\endcode
// The \c trans() function can be used to transpose a vector as necessary:
\code
blaze::StaticVector<int,3UL,rowVector> v1{ 2, 5, -1 };
blaze::DynamicVector<int,rowVector> v2{ -1, 3, -2, 4 };
blaze::StaticMatrix<int,3UL,4UL> M1 = trans( v1 ) + v2;
\endcode
// \n \section matrix_matrix_addition Matrix/Matrix Addition
// <hr>
//
// For the addition of any two matrices the addition operator (i.e. \c operator+()) can be used.
// It even enables the addition of dense and sparse matrices:
\code
using blaze::rowMajor;
using blaze::columnMajor;
blaze::CompressedMatrix<size_t,columnMajor> M1( 7UL, 3UL );
blaze::DynamicMatrix<float,rowMajor> M2( 7UL, 3UL ), M3;
// ... Initializing the matrices
M3 = M1 + M2; // Addition of a sparse column-major and a dense row-major matrix of different data type
M3 = add( M1, M2 ); // Alternative syntax
\endcode
// Note that it is necessary that both operands have exactly the same dimensions. Violating this
// precondition results in an exception. It is possible to add row-major and column-major matrices.
// Note however that in favor of performance the addition of two matrices with the same storage
// order is favorable. The same argument holds for the element type: In case two matrices with
// the same element type are added, the performance can be much higher due to vectorization of
// the operation.
\code
blaze::DynamicMatrix<float> M1( 50UL, 70UL ), M2( 50UL, 70UL ), M3;
// ... Initialization of the matrices
M3 = M1 + M2; // Vectorized addition of two row-major, single precision dense matrices
\endcode
// \n \section scalar_addition Scalar Addition
// <hr>
//
// For convenience it is also possible to add a scalar value to a dense vector or dense matrix,
// which has the same effect as adding a uniform vector or matrix. In \b Blaze it is possible to
// use all built-in/fundamental data types except bool as scalar values. Additionally, it is
// possible to use \c std::complex values with the same built-in data types as element type.
// Examples:
\code
blaze::StaticVector<int,3UL> v1{ 3, 2, 5, -4, 1, 6 };
blaze::DynamicVector<int> v2 = v1 + 2; // Results in { 5, 4, 7, -2, 3, 8 }
blaze::CompressedVector<int> v3 = 3 + v1; // Results in { 6, 5, 8, -1, 4, 9 }
blaze::DynamicVector<int> v4 = add( v1, 2 ); // Alternative syntax
\endcode
\code
blaze::StaticMatrix<int,2UL,3UL> M1{ { 3, 2, 5 },
{ -4, 1, 6 } };
blaze::DynamicMatrix<int> M2 = M1 + 2; // Results in { { 5, 4, 7 }, { -2, 3, 8 } }
blaze::CompressedMatrix<int> M3 = 3 + M1; // Results in { { 6, 5, 8 }, { -1, 4, 9 } }
blaze::DynamicMatrix<int> M4 = add( M1, 2 ); // Alternative syntax
\endcode
// \n Previous: \ref arithmetic_operations Next: \ref subtraction
*/
//*************************************************************************************************
//**Subtraction************************************************************************************
/*!\page subtraction Subtraction
//
// \n \section vector_vector_subtraction Vector/Vector Subtraction
// <hr>
//
// The subtraction of vectors works exactly as intuitive as the addition, but with the subtraction
// operator (i.e. \c operator-()). It also enables the subtraction of dense and sparse vectors:
\code
blaze::DynamicVector<int> v1( 5UL ), v3;
blaze::CompressedVector<float> v2( 5UL );
// ... Initializing the vectors
v3 = v1 - v2; // Subtraction of a dense and a sparse column vector of different data type
v3 = sub( v1, v2 ); // Alternative syntax
\endcode
// Note that it is necessary that both operands have exactly the same dimensions. Violating this
// precondition results in an exception. Also note that in case of vectors it is only possible to
// subtract vectors with the same transpose flag:
\code
blaze::DynamicVector<int,columnVector> v1( 5UL );
blaze::CompressedVector<float,rowVector> v2( 5UL );
v1 - v2; // Compilation error: Cannot subtract a row vector from a column vector
v1 - trans( v2 ); // OK: Subtraction of two column vectors
\endcode
// Also note that the subtraction of two vectors with the same element type is favorable due to
// possible vectorization of the operation:
\code
blaze::DynamicVector<double> v1( 100UL ), v2( 100UL ), v3;
// ... Initialization of the vectors
v3 = v1 - v2; // Vectorized subtraction of two double precision vectors
\endcode
// \n \section outer_difference Outer Difference
// <hr>
//
// The subtraction between a column vector and a row vector results in the outer difference of
// the two vectors:
\code
blaze::StaticVector<int,3UL,columnVector> v1{ 2, 5, -1 };
blaze::DynamicVector<int,rowVector> v2{ -1, 3, -2, 4 };
// Results in the matrix
//
// ( 3 -1 4 -2 )
// A = ( 6 2 7 1 )
// ( 0 -4 1 -5 )
//
blaze::StaticMatrix<int,3UL,3UL> M1 = v1 - v2; // Outer difference
blaze::StaticMatrix<int,3UL,3UL> M2 = sub( v1, v2 ); // Alternative syntax
\endcode
// The \c trans() function can be used to transpose a vector as necessary:
\code
blaze::StaticVector<int,3UL,rowVector> v1{ 2, 5, -1 };
blaze::DynamicVector<int,rowVector> v2{ -1, 3, -2, 4 };
blaze::StaticMatrix<int,3UL,4UL> M1 = trans( v1 ) - v2;
\endcode
// \n \section matrix_matrix_subtraction Matrix/Matrix Subtraction
// <hr>
//
// For the subtraction of any two matrices the subtraction operator (i.e. \c operator-()) can be
// used. It even enables the subtraction of dense and sparse matrices:
\code
blaze::DynamicMatrix<float,rowMajor> M1( 7UL, 3UL );
blaze::CompressedMatrix<size_t,columnMajor> M2( 7UL, 3UL ), M3;
// ... Initializing the matrices
M3 = M1 - M2; // Subtraction of a row-major and a column-major matrix of different data type
M3 = sub( M1, M2 ); // Alternative syntax
\endcode
// Note that it is necessary that both operands have exactly the same dimensions. Violating this
// precondition results in an exception. It is possible to subtract row-major and column-major
// matrices. Note however that in favor of performance the subtraction of two matrices with the
// same storage order is favorable. The same argument holds for the element type: In case two
// matrices with the same element type are subtracted, the performance can be much higher due
// to vectorization of the operation.
\code
blaze::DynamicMatrix<float> M1( 50UL, 70UL ), M2( 50UL, 70UL ), M3;
// ... Initialization of the matrices
M3 = M1 - M2; // Vectorized subtraction of two row-major, single precision dense matrices
\endcode
// \n \section scalar_subtraction Scalar Subtraction
// <hr>
//
// For convenience it is also possible to subtract a scalar value from a dense vector or dense
// matrix, which has the same effect as subtracting a uniform vector or matrix. In \b Blaze it is
// possible to use all built-in/fundamental data types except bool as scalar values. Additionally,
// it is possible to use \c std::complex values with the same built-in data types as element type.
// Examples:
\code
blaze::StaticVector<int,3UL> v1{ 3, 2, 5, -4, 1, 6 };
blaze::DynamicVector<int> v2 = v1 - 2; // Results in { 1, 0, 3, -6, -1, 4 }
blaze::CompressedVector<int> v3 = 3 - v1; // Results in { 0, 1, -2, 7, 2, -3 }
blaze::DynamicVector<int> v4 = sub( v1, 2 ); // Alternative syntax
\endcode
\code
blaze::StaticMatrix<int,2UL,3UL> M1{ { 3, 2, 5 },
{ -4, 1, 6 } };
blaze::DynamicMatrix<int> M2 = M1 - 2; // Results in { { 1, 0, 3 }, { -6, -1, 4 } }
blaze::CompressedMatrix<int> M3 = 3 - M1; // Results in { { 0, 1, -2 }, { 7, 2, -3 } }
blaze::DynamicMatrix<int> M4 = sub( M1, 2 ); // Alternative syntax
\endcode
// \n Previous: \ref addition Next: \ref scalar_multiplication
*/
//*************************************************************************************************
//**Scalar Multiplication**************************************************************************
/*!\page scalar_multiplication Scalar Multiplication
//
// The scalar multiplication is the multiplication of vector or a matrix with a scalar value.
// Alternatively it is also possible to divide a vector or a matrix by a scalar value. In \b Blaze
// it is possible to use all built-in/fundamental data types except bool as scalar values.
// Additionally, it is possible to use \c std::complex values with the same built-in data types
// as element type.
\code
blaze::StaticVector<int,3UL> v1{ 1, 2, 3 };
blaze::DynamicVector<double> v2 = v1 * 1.2; // Scalar multiplication
blaze::CompressedVector<float> v3 = -0.3F * v1; // Scalar multiplication
blaze::DynamicVector<double> v4 = v1 / 1.2; // Scalar division
blaze::CompressedVector<float> v5 = 12.0F / v1; // Scalar division (only dense vectors)
blaze::DynamicVector<double> v6 = mult( v1, 1.2 ); // Alternative syntax
blaze::DynamicVector<double> v7 = div( v1, 1.2 ); // Alternative syntax
\endcode
\code
blaze::StaticMatrix<int,3UL,2UL> M1{ { 1, 2 }, { 3, 4 }, { 5, 6 } };
blaze::DynamicMatrix<double> M2 = M1 * 1.2; // Scalar multiplication
blaze::CompressedMatrix<float> M3 = -0.3F * M1; // Scalar multiplication
blaze::DynamicMatrix<double> M4 = M1 / 1.2; // Scalar division
blaze::CompressedMatrix<float> M5 = 12.0F / M1; // Scalar division (only dense matrices)
blaze::DynamicMatrix<double> M6 = mult( M1, 1.2 ); // Alternative syntax
blaze::DynamicMatrix<double> M7 = div( M1, 1.2 ); // Alternative syntax
\endcode
// Vectors and matrices cannot be used for as scalar value for scalar multiplications or divisions
// (see the following example). However, each vector and matrix provides the \c scale() function,
// which can be used to scale a vector or matrix element-wise with arbitrary scalar data types:
\code
blaze::CompressedMatrix< blaze::StaticMatrix<int,3UL,3UL> > M1;
blaze::StaticMatrix<int,3UL,3UL> scalar;
M1 * scalar; // No scalar multiplication, but matrix/matrix multiplication
M1.scale( scalar ); // Scalar multiplication
\endcode
// \n Previous: \ref subtraction Next: \ref componentwise_multiplication
*/
//*************************************************************************************************
//**Vector/Vector Multiplication*******************************************************************
/*!\page vector_vector_multiplication Vector/Vector Multiplication
//
// \n \section componentwise_multiplication Componentwise Multiplication
// <hr>
//
// Multiplying two vectors with the same transpose flag (i.e. either blaze::columnVector or
// blaze::rowVector) via the multiplication operator results in a componentwise multiplication
// of the two vectors:
\code
using blaze::DynamicVector;
using blaze::CompressedVector;
CompressedVector<int,columnVector> v1( 17UL );
DynamicVector<int,columnVector> v2( 17UL );
StaticVector<double,10UL,rowVector> v3;
DynamicVector<double,rowVector> v4( 10UL );
// ... Initialization of the vectors
CompressedVector<int,columnVector> v5( v1 * v2 ); // Componentwise multiplication of a sparse and
// a dense column vector. The result is a sparse
// column vector.
DynamicVector<double,rowVector> v6( v3 * v4 ); // Componentwise multiplication of two dense row
// vectors. The result is a dense row vector.
CompressedVector<int,columnVector> v7( mult( v1, v2 ) ); // Alternative syntax
DynamicVector<double,rowVector> v8( mult( v3, v4 ) ); // Alternative syntax
\endcode
// \n \section inner_product Inner Product / Scalar Product / Dot Product
// <hr>
//
// The multiplication between a row vector and a column vector results in an inner product between
// the two vectors:
\code
blaze::StaticVector<int,3UL,rowVector> v1{ 2, 5, -1 };
blaze::DynamicVector<int,columnVector> v2{ -1, 3, -2 };
int result1 = v1 * v2; // Results in the value 15
int result2 = mult( v1, v2 ); // Alternative syntax
\endcode
// The \c trans() function can be used to transpose a vector as necessary:
\code
blaze::StaticVector<int,3UL,rowVector> v1{ 2, 5, -1 };
blaze::StaticVector<int,3UL,rowVector> v2{ -1, 3, -2 };
int result = v1 * trans( v2 ); // Also results in the value 15
\endcode
// Alternatively, either the \c inner() function, the \c dot() function or the comma operator can
// be used for any combination of vectors (row or column vectors) to perform an inner product:
\code
blaze::StaticVector<int,3UL,columnVector> v1{ 2, 5, -1 };
blaze::StaticVector<int,3UL,rowVector> v2{ -1, 3, -2 };
// All alternatives for the inner product between a column vector and a row vector
int result1 = trans( v1 ) * trans( v2 );
int result2 = inner( v1, v2 );
int result3 = dot( v1, v2 );
int result4 = (v1,v2);
\endcode
// When using the comma operator, please note the brackets embracing the inner product expression.
// Due to the low precedence of the comma operator (lower even than the assignment operator) these
// brackets are strictly required for a correct evaluation of the inner product.
//
//
// \n \section outer_product Outer Product
// <hr>
//
// The multiplication between a column vector and a row vector results in the outer product of
// the two vectors:
\code
blaze::StaticVector<int,3UL,columnVector> v1{ 2, 5, -1 };
blaze::DynamicVector<int,rowVector> v2{ -1, 3, -2, 4 };
// Results in the matrix
//
// ( -2 6 -4 8 )
// A = ( -5 15 -10 20 )
// ( 1 -3 2 -4 )
//
blaze::StaticMatrix<int,3UL,3UL> M1 = v1 * v2; // Outer product
blaze::StaticMatrix<int,3UL,3UL> M2 = mult( v1, v2 ); // Alternative syntax
\endcode
// The \c trans() function can be used to transpose a vector as necessary:
\code
blaze::StaticVector<int,3UL,rowVector> v1{ 2, 5, -1 };
blaze::DynamicVector<int,rowVector> v2{ -1, 3, -2, 4 };
blaze::StaticMatrix<int,3UL,4UL> M1 = trans( v1 ) * v2;
\endcode
// Alternatively, the \c outer() function can be used for any combination of vectors (row or column
// vectors) to perform an outer product:
\code
blaze::StaticVector<int,3UL,rowVector> v1{ 2, 5, -1 };
blaze::DynamicVector<int,rowVector> v2{ -1, 3, -2, 4 };
blaze::StaticMatrix<int,3UL,4UL> M1 = outer( v1, v2 ); // Outer product between two row vectors
\endcode
// \n \section cross_product Cross Product
// <hr>
//
// Two vectors with the same transpose flag can be multiplied via the cross product. The cross
// product between two vectors \f$ a \f$ and \f$ b \f$ is defined as
\f[
\left(\begin{array}{*{1}{c}}
c_0 \\
c_1 \\
c_2 \\
\end{array}\right)
=
\left(\begin{array}{*{1}{c}}
a_1 b_2 - a_2 b_1 \\
a_2 b_0 - a_0 b_2 \\
a_0 b_1 - a_1 b_0 \\
\end{array}\right).
\f]
// Due to the absence of a \f$ \times \f$ operator in the C++ language, the cross product is
// realized via the \c cross() function. Alternatively, the modulo operator (i.e. \c operator%)
// can be used in case infix notation is required:
\code
blaze::StaticVector<int,3UL,columnVector> v1{ 2, 5, -1 };
blaze::DynamicVector<int,columnVector> v2{ -1, 3, -2 };
blaze::StaticVector<int,3UL,columnVector> v3( cross( v1, v2 ) );
blaze::StaticVector<int,3UL,columnVector> v4( v1 % v2 );
\endcode
// Please note that the cross product is restricted to three dimensional (dense and sparse)
// column vectors.
//
//
// \n \section vector_kronecker_product Kronecker Product
// <hr>
//
// The Kronecker product of two vectors with the same transpose flag can be computed via the
// \a kron() function:
\code
using blaze::DynamicVector;
using blaze::CompressedVector;
DynamicVector<double> v1( 28UL );
CompressedVector<float> v2( 17UL );
// ... Initialization of the vectors
CompressedVector<double> v3 = kron( v1, v2 );
\endcode
// Both dense and sparse vectors can be used for a Kronecker product. It is possible to multiply
// two vectors with different element type, as long as the element types themselves can be
// multiplied.
//
// \n Previous: \ref scalar_multiplication Next: \ref vector_vector_division
*/
//*************************************************************************************************
//**Vector/Vector Division*************************************************************************
/*!\page vector_vector_division Vector/Vector Division
//
// \n \section componentwise_division Componentwise Division
// <hr>
//
// Dividing a vector by a dense vector with the same transpose flag (i.e. either blaze::columnVector
// or blaze::rowVector) via the division operator results in a componentwise division:
\code
using blaze::DynamicVector;
using blaze::CompressedVector;
CompressedVector<int,columnVector> v1( 17UL );
DynamicVector<int,columnVector> v2( 17UL );
StaticVector<double,10UL,rowVector> v3;
DynamicVector<double,rowVector> v4( 10UL );
// ... Initialization of the vectors
CompressedVector<int,columnVector> v5( v1 / v2 ); // Componentwise division of a sparse and a
// dense column vector. The result is a sparse
// column vector.
DynamicVector<double,rowVector> v6( v3 / v4 ); // Componentwise division of two dense row
// vectors. The result is a dense row vector.
CompressedVector<int,columnVector> v7( div( v1, v2 ) ); // Alternative syntax
DynamicVector<double,rowVector> v8( div( v3, v4 ) ); // Alternative syntax
\endcode
// Note that all values of the divisor must be non-zero and that no checks are performed to assert
// this precondition!
//
//
// \n \section outer_quotient Outer Quotient
// <hr>
//
// The division between a column vector and a row vector results in the outer quotient of the
// two vectors:
\code
blaze::StaticVector<double,3UL,columnVector> v1{ 2, 5, -1 };
blaze::DynamicVector<double,rowVector> v2{ -1, 5, -2, 4 };
// Results in the matrix
//
// ( -2 0.4 -1 0.5 )
// A = ( -5 1 -2.5 1.25 )
// ( 1 -0.2 0.5 -0.25 )
//
blaze::StaticMatrix<int,3UL,4UL> M1 = v1 / v2; // Outer quotient
blaze::StaticMatrix<int,3UL,4UL> M2 = div( v1, v2 ); // Alternative syntax
\endcode
// The \c trans() function can be used to transpose a vector as necessary:
\code
blaze::StaticVector<int,3UL,rowVector> v1{ 2, 5, -1 };
blaze::DynamicVector<int,rowVector> v2{ -1, 5, -2, 4 };
blaze::StaticMatrix<int,3UL,4UL> M1 = trans( v1 ) / v2;
\endcode
// Note that all values of the divisor must be non-zero and that no checks are performed to assert
// this precondition!
//
// \n Previous: \ref vector_vector_multiplication Next: \ref matrix_vector_multiplication
*/
//*************************************************************************************************
//**Matrix/Vector Multiplication*******************************************************************
/*!\page matrix_vector_multiplication Matrix/Vector Multiplication
//
// In \b Blaze matrix/vector multiplications can be as intuitively formulated as in mathematical
// textbooks. Just as in textbooks there are two different multiplications between a matrix and
// a vector: a matrix/column vector multiplication and a row vector/matrix multiplication:
\code
using blaze::StaticVector;
using blaze::DynamicVector;
using blaze::DynamicMatrix;
DynamicMatrix<int> M1( 39UL, 12UL );
StaticVector<int,12UL,columnVector> v1;
// ... Initialization of the matrix and the vector
DynamicVector<int,columnVector> v2 = M1 * v1; // Matrix/column vector multiplication
DynamicVector<int,rowVector> v3 = trans( v1 ) * M1; // Row vector/matrix multiplication
DynamicVector<int,columnVector> v4 = mult( M1, v1 ); // Alternative syntax
DynamicVector<int,rowVector> v5 = mult( trans( v1 ), M1 ); // Alternative syntax
\endcode
// Note that the storage order of the matrix poses no restrictions on the operation. Also note,
// that the highest performance for a multiplication between a dense matrix and a dense vector can
// be achieved if both the matrix and the vector have the same scalar element type.
//
// \n Previous: \ref vector_vector_division Next: \ref matrix_matrix_multiplication
*/
//*************************************************************************************************
//**Matrix/Matrix Multiplication*******************************************************************
/*!\page matrix_matrix_multiplication Matrix/Matrix Multiplication
//
// \n \section schur_product Componentwise Multiplication / Schur Product
// <hr>
//
// Multiplying two matrices with the same dimensions (i.e. the same number of rows and columns)
// via the modulo operator results in a componentwise multiplication (Schur product) of the two
// matrices:
\code
using blaze::DynamicMatrix;
using blaze::CompressedMatrix;
DynamicMatrix<double> M1( 28UL, 35UL );
CompressedMatrix<float> M2( 28UL, 35UL );
// ... Initialization of the matrices
DynamicMatrix<double> M3 = M1 % M2; // Schur product
DynamicMatrix<double> M4 = schur( M1, M2 ); // Alternative syntax
\endcode
// Both dense and sparse matrices can be used for a Schur product. The storage order of the two
// matrices poses no restrictions on the operation, all variations are possible. It is also
// possible to multiply two matrices with different element type, as long as the element types
// themselves can be multiplied.
//
//
// \n \section matrix_product Matrix Product
// <hr>
//
// The matrix/matrix product can be formulated exactly as in mathematical textbooks:
\code
using blaze::DynamicMatrix;
using blaze::CompressedMatrix;
DynamicMatrix<double> M1( 45UL, 85UL );
CompressedMatrix<float> M2( 85UL, 37UL );
// ... Initialization of the matrices
DynamicMatrix<double> M3 = M1 * M2; // Matrix product
DynamicMatrix<double> M4 = mult( M1, M2 ); // Alternative syntax
\endcode
// The storage order of the two matrices poses no restrictions on the operation, all variations
// are possible. It is also possible to multiply two matrices with different element type, as
// long as the element types themselves can be multiplied and added. Note however that the
// highest performance for a multiplication between two matrices can be expected for two
// matrices with the same scalar element type.
//
// In case the resulting matrix is known to be symmetric, Hermitian, lower triangular, upper
// triangular, or diagonal, the computation can be optimized by explicitly declaring the
// multiplication as symmetric, Hermitian, lower triangular, upper triangular, or diagonal by
// means of the \ref matrix_operations_declaration_operations :
\code
using blaze::DynamicMatrix;
DynamicMatrix<double> M1, M2, M3;
// ... Initialization of the square matrices
M3 = declsym ( M1 * M2 ); // Declare the result of the matrix multiplication as symmetric
M3 = declherm( M1 * M2 ); // Declare the result of the matrix multiplication as Hermitian
M3 = decllow ( M1 * M2 ); // Declare the result of the matrix multiplication as lower triangular
M3 = declupp ( M1 * M2 ); // Declare the result of the matrix multiplication as upper triangular
M3 = decldiag( M1 * M2 ); // Declare the result of the matrix multiplication as diagonal
\endcode
// Using a declaration operation on the a multiplication expression can speed up the computation
// by a factor of 2. Note however that the caller of the according declaration operation takes
// full responsibility for the correctness of the declaration. Falsely declaring a multiplication
// as symmetric, Hermitian, lower triangular, upper triangular, or diagonal leads to undefined
// behavior!
//
//
// \n \section matrix_kronecker_product Kronecker Product
// <hr>
//
// The Kronecker product of two matrices can be computed via the \a kron() function:
\code
using blaze::DynamicMatrix;
using blaze::CompressedMatrix;
DynamicMatrix<double> M1( 28UL, 35UL );
CompressedMatrix<float> M2( 17UL, 11UL );
// ... Initialization of the matrices
CompressedMatrix<double> M3 = kron( M1, M2 );
\endcode
// Both dense and sparse matrices can be used for a Kronecker product. The storage order of the
// two matrices poses no restrictions on the operation, all variations are possible. It is also
// possible to multiply two matrices with different element type, as long as the element types
// themselves can be multiplied.
//
// \n Previous: \ref matrix_vector_multiplication Next: \ref bitwise_operations
*/
//*************************************************************************************************
//**Bitwise Operations*****************************************************************************
/*!\page bitwise_operations Bitwise Operations
//
// \tableofcontents
//
//
// \b Blaze provides the following bitwise operations for vectors and matrices:
//
// <ul>
// <li> \ref bitwise_shift
// <ul>
// <li> \ref vector_vector_shift </li>
// <li> \ref matrix_matrix_shift </li>
// <li> \ref scalar_shift </li>
// </ul>
// </li>
// <li> \ref bitwise_and
// <ul>
// <li> \ref vector_vector_bitand </li>
// <li> \ref matrix_matrix_bitand </li>
// <li> \ref scalar_bitand </li>
// </ul>
// </li>
// <li> \ref bitwise_or
// <ul>
// <li> \ref vector_vector_bitor </li>
// <li> \ref matrix_matrix_bitor </li>
// <li> \ref scalar_bitor </li>
// </ul>
// </li>
// <li> \ref bitwise_xor
// <ul>
// <li> \ref vector_vector_bitxor </li>
// <li> \ref matrix_matrix_bitxor </li>
// <li> \ref scalar_bitxor </li>
// </ul>
// </li>
// </ul>
//
// \n Previous: \ref matrix_matrix_multiplication Next: \ref bitwise_shift
*/
//*************************************************************************************************
//**Bitwise Shift**********************************************************************************
/*!\page bitwise_shift Bitwise Shift
//
// \n \section vector_vector_shift Vector/Vector Shift
// <hr>
//
// Via the left-shift operator (i.e. operator<<()) and the right-shift operator (i.e. operator>>())
// it is possible to perform an elementwise shift of a dense vector:
\code
blaze::DynamicVector<unsigned int> v1( 5UL ), v3;
blaze::DynamicVector<unsigned short> v2( 5UL );
// ... Initializing the vectors
v3 = v1 << v2; // Elementwise left-shift of a dense column vector
v3 = v1 >> v2; // Elementwise right-shift of a dense column vector
\endcode
// Note that it is necessary that both operands have exactly the same dimensions. Violating this
// precondition results in an exception. Also note that it is only possible to shift vectors with
// the same transpose flag:
\code
using blaze::columnVector;
using blaze::rowVector;
blaze::DynamicVector<unsigned int,columnVector> v1( 5UL );
blaze::DynamicVector<unsigned int,rowVector> v2( 5UL );
v1 << v2; // Compilation error: Cannot shift a column vector by a row vector
v1 << trans( v2 ); // OK: Shifting a column vector by another column vector
\endcode
// Furthermore, it is possible to use different element types in the two vector operands, but
// shifting two vectors with the same element type is favorable due to possible vectorization
// of the operation:
\code
blaze::DynamicVector<unsigned int> v1( 100UL ), v2( 100UL ), v3;
// ... Initialization of the vectors
v3 = v1 << v2; // Vectorized left-shift of an unsigned int vector
\endcode
// \n \section matrix_matrix_shift Matrix/Matrix Shift
// <hr>
//
// The left-shift operator (i.e. operator<<()) and the right-shift operator (i.e. operator>>())
// can also be used to perform an elementwise shift of a dense matrix:
\code
using blaze::rowMajor;
using blaze::columnMajor;
blaze::DynamicMatrix<unsigned int,columnMajor> M1( 7UL, 3UL );
blaze::DynamicMatrix<unsigned short,rowMajor> M2( 7UL, 3UL ), M3;
// ... Initializing the matrices
M3 = M1 << M2; // Elementwise left-shift of a dense column-major matrix
M3 = M1 >> M2; // Elementwise right-shift of a dense column-major matrix
\endcode
// Note that it is necessary that both operands have exactly the same dimensions. Violating this
// precondition results in an exception. It is possible to use any combination of row-major and
// column-major matrices. Note however that in favor of performance using two matrices with the
// same storage order is favorable. The same argument holds for the element type: While it is
// possible to use matrices with different element type, using two matrices with the same element
// type potentially leads to better performance due to vectorization of the operation.
\code
blaze::DynamicMatrix<unsigned int> M1( 50UL, 70UL ), M2( 50UL, 70UL ), M3;
// ... Initialization of the matrices
M3 = M1 << M2; // Vectorized left-shift of an unsigned int matrix
\endcode
// \n \section scalar_shift Scalar Shift
// <hr>
//
// It is also possible to uniformly shift all elements of a dense vector or dense matrix by means
// of a scalar, which has the same effect as shifting by means of a uniform vector or matrix (see
// \ref vector_types_uniform_vector and \ref matrix_types_uniform_matrix). In \b Blaze it is
// possible to use all built-in/fundamental data types except bool as scalar values. Examples:
\code
blaze::DynamicVector<unsigned int> v1{ 3, 2, 5, 4, 1, 6 };
// Uniform left-shift by one bit of all elements of v1; Results in
//
// ( 6, 4, 10, 8, 2, 12 )
//
blaze::DynamicVector<int> v2( v1 << 1U );
\endcode
\code
blaze::DynamicMatrix<unsigned int> M1{ { 3, 2, 5 },
{ 4, 1, 6 } };
// Uniform left-shift by one bit of all elements of M1; Results in
//
// ( 6, 4, 10 )
// ( 8, 2, 12 )
//
blaze::DynamicMatrix<unsigned int> M2( M1 << 1U );
\endcode
// \n Previous: \ref bitwise_operations Next: \ref bitwise_and
*/
//*************************************************************************************************
//**Bitwise AND************************************************************************************
/*!\page bitwise_and Bitwise AND
//
// \n \section vector_vector_bitand Vector/Vector Bitwise AND
// <hr>
//
// Via the bitwise AND operator (i.e. operator&()) it is possible to perform an elementwise
// bitwise AND with dense vectors:
\code
blaze::DynamicVector<unsigned int> v1( 5UL ), v3;
blaze::DynamicVector<unsigned short> v2( 5UL );
// ... Initializing the vectors
v3 = v1 & v2; // Elementwise bitwise AND of two dense column vectors of different data type
\endcode
// Note that it is necessary that both operands have exactly the same dimensions. Violating this
// precondition results in an exception. Also note that it is only possible to use vectors with
// the same transpose flag:
\code
using blaze::columnVector;
using blaze::rowVector;
blaze::DynamicVector<unsigned int,columnVector> v1( 5UL );
blaze::DynamicVector<unsigned int,rowVector> v2( 5UL );
v1 & v2; // Compilation error: Cannot AND a column vector and a row vector
v1 & trans( v2 ); // OK: Bitwise AND of two column vectors
\endcode
// Furthermore, it is possible to use different element types in the two vector operands, but a
// bitwise AND of two vectors with the same element type is favorable due to possible vectorization
// of the operation:
\code
blaze::DynamicVector<unsigned int> v1( 100UL ), v2( 100UL ), v3;
// ... Initialization of the vectors
v3 = v1 & v2; // Vectorized bitwise AND of an unsigned int vector
\endcode
// \n \section matrix_matrix_bitand Matrix/Matrix Bitwise AND
// <hr>
//
// The bitwise AND operator (i.e. operator&()) can also be used to perform an elementwise bitwise
// AND with dense matrices:
\code
using blaze::rowMajor;
using blaze::columnMajor;
blaze::DynamicMatrix<unsigned int,columnMajor> M1( 7UL, 3UL );
blaze::DynamicMatrix<unsigned short,rowMajor> M2( 7UL, 3UL ), M3;
// ... Initializing the matrices
M3 = M1 & M2; // Elementwise bitwise AND of two dense matrices of different data type
\endcode
// Note that it is necessary that both operands have exactly the same dimensions. Violating this
// precondition results in an exception. It is possible to use any combination of row-major and
// column-major matrices. Note however that in favor of performance using two matrices with the
// same storage order is favorable. The same argument holds for the element type: While it is
// possible to use matrices with different element type, using two matrices with the same element
// type potentially leads to better performance due to vectorization of the operation.
\code
blaze::DynamicMatrix<unsigned int> M1( 50UL, 70UL ), M2( 50UL, 70UL ), M3;
// ... Initialization of the matrices
M3 = M1 & M2; // Vectorized bitwise AND of two row-major, unsigned int dense matrices
\endcode
// \n \section scalar_bitand Scalar Bitwise AND
// <hr>
//
// Is is also possible to perform a bitwise AND between a dense vector or dense matrix and a
// scalar value, which has the same effect as performing a bitwise AND by means of a uniform
// vector or matrix (see \ref vector_types_uniform_vector and \ref matrix_types_uniform_matrix).
// In \b Blaze it is possible to use all built-in/fundamental data types except bool as scalar
// values. Examples:
\code
blaze::DynamicVector<unsigned int> v1{ 3U, 2U, 5U, 4U, 1U, 6U };
// Perform a bitwise AND with all elements of v1; Results in
//
// ( 3, 2, 1, 0, 1, 2 )
//
blaze::DynamicVector<int> v2( v1 & 3U );
\endcode
\code
blaze::DynamicMatrix<unsigned int> M1{ { 3U, 2U, 5U },
{ 4U, 1U, 6U } };
// Perform a bitwise AND with all elements of M1; Results in
//
// ( 3, 2, 1 )
// ( 0, 1, 2 )
//
blaze::DynamicMatrix<unsigned int> M2( M1 & 3U );
\endcode
// \n Previous: \ref bitwise_shift Next: \ref bitwise_or
*/
//*************************************************************************************************
//**Bitwise OR*************************************************************************************
/*!\page bitwise_or Bitwise OR
//
// \n \section vector_vector_bitor Vector/Vector Bitwise OR
// <hr>
//
// Via the bitwise OR operator (i.e. operator|()) it is possible to perform an elementwise
// bitwise OR with dense vectors:
\code
blaze::DynamicVector<unsigned int> v1( 5UL ), v3;
blaze::DynamicVector<unsigned short> v2( 5UL );
// ... Initializing the vectors
v3 = v1 | v2; // Elementwise bitwise OR of two dense column vectors of different data type
\endcode
// Note that it is necessary that both operands have exactly the same dimensions. Violating this
// precondition results in an exception. Also note that it is only possible to use vectors with
// the same transpose flag:
\code
using blaze::columnVector;
using blaze::rowVector;
blaze::DynamicVector<unsigned int,columnVector> v1( 5UL );
blaze::DynamicVector<unsigned int,rowVector> v2( 5UL );
v1 | v2; // Compilation error: Cannot OR a column vector and a row vector
v1 | trans( v2 ); // OK: Bitwise OR of two column vectors
\endcode
// Furthermore, it is possible to use different element types in the two vector operands, but a
// bitwise OR of two vectors with the same element type is favorable due to possible vectorization
// of the operation:
\code
blaze::DynamicVector<unsigned int> v1( 100UL ), v2( 100UL ), v3;
// ... Initialization of the vectors
v3 = v1 | v2; // Vectorized bitwise OR of an unsigned int vector
\endcode
// \n \section matrix_matrix_bitor Matrix/Matrix Bitwise OR
// <hr>
//
// The bitwise OR operator (i.e. operator|()) can also be used to perform an elementwise bitwise
// OR with dense matrices:
\code
using blaze::rowMajor;
using blaze::columnMajor;
blaze::DynamicMatrix<unsigned int,columnMajor> M1( 7UL, 3UL );
blaze::DynamicMatrix<unsigned short,rowMajor> M2( 7UL, 3UL ), M3;
// ... Initializing the matrices
M3 = M1 | M2; // Elementwise bitwise OR of two dense matrices of different data type
\endcode
// Note that it is necessary that both operands have exactly the same dimensions. Violating this
// precondition results in an exception. It is possible to use any combination of row-major and
// column-major matrices. Note however that in favor of performance using two matrices with the
// same storage order is favorable. The same argument holds for the element type: While it is
// possible to use matrices with different element type, using two matrices with the same element
// type potentially leads to better performance due to vectorization of the operation.
\code
blaze::DynamicMatrix<unsigned int> M1( 50UL, 70UL ), M2( 50UL, 70UL ), M3;
// ... Initialization of the matrices
M3 = M1 | M2; // Vectorized bitwise OR of two row-major, unsigned int dense matrices
\endcode
// \n \section scalar_bitor Scalar Bitwise OR
// <hr>
//
// Is is also possible to perform a bitwise OR between a dense vector or dense matrix and a
// scalar value, which has the same effect as performing a bitwise OR by means of a uniform
// vector or matrix (see \ref vector_types_uniform_vector and \ref matrix_types_uniform_matrix).
// In \b Blaze it is possible to use all built-in/fundamental data types except bool as scalar
// values. Examples:
\code
blaze::DynamicVector<unsigned int> v1{ 3U, 2U, 5U, 4U, 1U, 6U };
// Perform a bitwise OR with all elements of v1; Results in
//
// ( 3, 3, 7, 7, 3, 3 )
//
blaze::DynamicVector<int> v2( v1 | 3U );
\endcode
\code
blaze::DynamicMatrix<unsigned int> M1{ { 3U, 2U, 5U },
{ 4U, 1U, 6U } };
// Perform a bitwise OR with all elements of M1; Results in
//
// ( 3, 3, 7 )
// ( 7, 3, 3 )
//
blaze::DynamicMatrix<unsigned int> M2( M1 | 3U );
\endcode
// \n Previous: \ref bitwise_and Next: \ref bitwise_xor
*/
//*************************************************************************************************
//**Bitwise XOR************************************************************************************
/*!\page bitwise_xor Bitwise XOR
//
// \n \section vector_vector_bitxor Vector/Vector Bitwise XOR
// <hr>
//
// Via the bitwise XOR operator (i.e. operator^()) it is possible to perform an elementwise
// bitwise XOR with dense vectors:
\code
blaze::DynamicVector<unsigned int> v1( 5UL ), v3;
blaze::DynamicVector<unsigned short> v2( 5UL );
// ... Initializing the vectors
v3 = v1 ^ v2; // Elementwise bitwise XOR of two dense column vectors of different data type
\endcode
// Note that it is necessary that both operands have exactly the same dimensions. Violating this
// precondition results in an exception. Also note that it is only possible to use vectors with
// the same transpose flag:
\code
using blaze::columnVector;
using blaze::rowVector;
blaze::DynamicVector<unsigned int,columnVector> v1( 5UL );
blaze::DynamicVector<unsigned int,rowVector> v2( 5UL );
v1 ^ v2; // Compilation error: Cannot XOR a column vector and a row vector
v1 ^ trans( v2 ); // OK: Bitwise XOR of two column vectors
\endcode
// Furthermore, it is possible to use different element types in the two vector operands, but a
// bitwise XOR of two vectors with the same element type is favorable due to possible vectorization
// of the operation:
\code
blaze::DynamicVector<unsigned int> v1( 100UL ), v2( 100UL ), v3;
// ... Initialization of the vectors
v3 = v1 ^ v2; // Vectorized bitwise XOR of an unsigned int vector
\endcode
// \n \section matrix_matrix_bitxor Matrix/Matrix Bitwise XOR
// <hr>
//
// The bitwise XOR operator (i.e. operator^()) can also be used to perform an elementwise bitwise
// XOR with dense matrices:
\code
using blaze::rowMajor;
using blaze::columnMajor;
blaze::DynamicMatrix<unsigned int,columnMajor> M1( 7UL, 3UL );
blaze::DynamicMatrix<unsigned short,rowMajor> M2( 7UL, 3UL ), M3;
// ... Initializing the matrices
M3 = M1 ^ M2; // Elementwise bitwise XOR of two dense matrices of different data type
\endcode
// Note that it is necessary that both operands have exactly the same dimensions. Violating this
// precondition results in an exception. It is possible to use any combination of row-major and
// column-major matrices. Note however that in favor of performance using two matrices with the
// same storage order is favorable. The same argument holds for the element type: While it is
// possible to use matrices with different element type, using two matrices with the same element
// type potentially leads to better performance due to vectorization of the operation.
\code
blaze::DynamicMatrix<unsigned int> M1( 50UL, 70UL ), M2( 50UL, 70UL ), M3;
// ... Initialization of the matrices
M3 = M1 ^ M2; // Vectorized bitwise XOR of two row-major, unsigned int dense matrices
\endcode
// \n \section scalar_bitxor Scalar Bitwise XOR
// <hr>
//
// Is is also possible to perform a bitwise XOR between a dense vector or dense matrix and a
// scalar value, which has the same effect as performing a bitwise XOR by means of a uniform
// vector or matrix (see \ref vector_types_uniform_vector and \ref matrix_types_uniform_matrix).
// In \b Blaze it is possible to use all built-in/fundamental data types except bool as scalar
// values. Examples:
\code
blaze::DynamicVector<unsigned int> v1{ 3U, 2U, 5U, 4U, 1U, 6U };
// Perform a bitwise XOR with all elements of v1; Results in
//
// ( 0, 1, 6, 7, 2, 5 )
//
blaze::DynamicVector<int> v2( v1 ^ 3U );
\endcode
\code
blaze::DynamicMatrix<unsigned int> M1{ { 3U, 2U, 5U },
{ 4U, 1U, 6U } };
// Perform a bitwise XOR with all elements of M1; Results in
//
// ( 0, 1, 6 )
// ( 7, 2, 5 )
//
blaze::DynamicMatrix<unsigned int> M2( M1 ^ 3U );
\endcode
// \n Previous: \ref bitwise_or Next: \ref logical_operations
*/
//*************************************************************************************************
//**Logical Operations*****************************************************************************
/*!\page logical_operations Logical Operations
//
// \tableofcontents
//
//
// \b Blaze provides the following logical operations for vectors and matrices:
//
// <ul>
// <li> \ref logical_not
// <ul>
// <li> \ref vector_vector_not </li>
// <li> \ref matrix_matrix_not </li>
// </ul>
// </li>
// <li> \ref logical_and
// <ul>
// <li> \ref vector_vector_and </li>
// <li> \ref matrix_matrix_and </li>
// </ul>
// </li>
// <li> \ref logical_or
// <ul>
// <li> \ref vector_vector_or </li>
// <li> \ref matrix_matrix_or </li>
// </ul>
// </li>
// </ul>
//
// \n Previous: \ref bitwise_xor Next: \ref logical_not
*/
//*************************************************************************************************
//**Logical NOT************************************************************************************
/*!\page logical_not Logical NOT
//
// \n \section vector_vector_not Vector/Vector Logical NOT
// <hr>
//
// Via the logical NOT operator (i.e. operator!()) it is possible to compute an elementwise
// logical NOT of a dense vector:
\code
blaze::DynamicVector<bool> v1( 5UL ), v2;
// ... Initializing the vectors
v2 = !v1; // Elementwise logical NOT of a dense column vector
\endcode
// \n \section matrix_matrix_not Matrix/Matrix Logical NOT
// <hr>
//
// The logical NOT operator (i.e. operator!()) can also be used to compute an elementwise logical
// NOT with dense matrices:
\code
using blaze::rowMajor;
using blaze::columnMajor;
blaze::DynamicMatrix<bool,rowMajor> M1( 7UL, 3UL ), M2;
// ... Initializing the matrices
M2 = !M1; // Elementwise logical NOT of a dense row-major matrix
\endcode
// \n Previous: \ref logical_operations Next: \ref logical_and
*/
//*************************************************************************************************
//**Logical AND************************************************************************************
/*!\page logical_and Logical AND
//
// \n \section vector_vector_and Vector/Vector Logical AND
// <hr>
//
// Via the logical AND operator (i.e. operator&&()) it is possible to compute an elementwise
// logical AND with dense vectors:
\code
blaze::DynamicVector<bool> v1( 5UL ), v3;
blaze::DynamicVector<bool> v2( 5UL );
// ... Initializing the vectors
v3 = v1 && v2; // Elementwise logical AND of two dense column vectors
\endcode
// Note that it is necessary that both operands have exactly the same dimensions. Violating this
// precondition results in an exception. Also note that it is only possible to use vectors with
// the same transpose flag:
\code
using blaze::columnVector;
using blaze::rowVector;
blaze::DynamicVector<bool,columnVector> v1( 5UL );
blaze::DynamicVector<bool,rowVector> v2( 5UL );
v1 && v2; // Compilation error: Cannot AND a column vector and a row vector
v1 && trans( v2 ); // OK: Logical AND of two column vectors
\endcode
// \n \section matrix_matrix_and Matrix/Matrix Logical AND
// <hr>
//
// The logical AND operator (i.e. operator&&()) can also be used to compute an elementwise logical
// AND with dense matrices:
\code
using blaze::rowMajor;
using blaze::columnMajor;
blaze::DynamicMatrix<bool,columnMajor> M1( 7UL, 3UL );
blaze::DynamicMatrix<bool,rowMajor> M2( 7UL, 3UL ), M3;
// ... Initializing the matrices
M3 = M1 && M2; // Elementwise logical AND of two dense matrices
\endcode
// Note that it is necessary that both operands have exactly the same dimensions. Violating this
// precondition results in an exception. It is possible to use any combination of row-major and
// column-major matrices. Note however that in favor of performance using two matrices with the
// same storage order is favorable.
//
// \n Previous: \ref logical_not Next: \ref logical_or
*/
//*************************************************************************************************
//**Logical OR*************************************************************************************
/*!\page logical_or Logical OR
//
// \n \section vector_vector_or Vector/Vector Logical OR
// <hr>
//
// Via the logical OR operator (i.e. operator||()) it is possible to perform an elementwise
// logical OR with dense vectors:
\code
blaze::DynamicVector<bool> v1( 5UL ), v3;
blaze::DynamicVector<bool> v2( 5UL );
// ... Initializing the vectors
v3 = v1 || v2; // Elementwise logical OR of two dense column vectors
\endcode
// Note that it is necessary that both operands have exactly the same dimensions. Violating this
// precondition results in an exception. Also note that it is only possible to use vectors with
// the same transpose flag:
\code
using blaze::columnVector;
using blaze::rowVector;
blaze::DynamicVector<unsigned int,columnVector> v1( 5UL );
blaze::DynamicVector<unsigned int,rowVector> v2( 5UL );
v1 || v2; // Compilation error: Cannot OR a column vector and a row vector
v1 || trans( v2 ); // OK: Logical OR of two column vectors
\endcode
// \n \section matrix_matrix_or Matrix/Matrix Logical OR
// <hr>
//
// The logical OR operator (i.e. operator||()) can also be used to perform an elementwise logical
// OR with dense matrices:
\code
using blaze::rowMajor;
using blaze::columnMajor;
blaze::DynamicMatrix<bool,columnMajor> M1( 7UL, 3UL );
blaze::DynamicMatrix<bool,rowMajor> M2( 7UL, 3UL ), M3;
// ... Initializing the matrices
M3 = M1 || M2; // Elementwise logical OR of two dense matrices
\endcode
// Note that it is necessary that both operands have exactly the same dimensions. Violating this
// precondition results in an exception. It is possible to use any combination of row-major and
// column-major matrices. Note however that in favor of performance using two matrices with the
// same storage order is favorable.
//
// \n Previous: \ref logical_and Next: \ref shared_memory_parallelization
*/
//*************************************************************************************************
//**Shared Memory Parallelization******************************************************************
/*!\page shared_memory_parallelization Shared Memory Parallelization
//
// For all possible operations \b Blaze tries to achieve maximum performance on a single CPU
// core. However, today's CPUs are not single core anymore, but provide several (homogeneous
// or heterogeneous) compute cores. In order to fully exploit the performance potential of a
// multicore CPU, computations have to be parallelized across all available cores of a CPU.
// For this purpose, \b Blaze provides four different shared memory parallelization techniques:
//
// - \ref hpx_parallelization
// - \ref cpp_threads_parallelization
// - \ref boost_threads_parallelization
// - \ref openmp_parallelization
//
// When any of the shared memory parallelization techniques is activated, all arithmetic
// operations on dense vectors and matrices (including additions, subtractions, multiplications,
// divisions, and all componentwise arithmetic operations) and most operations on sparse vectors
// and matrices are automatically run in parallel. However, in addition, \b Blaze provides means
// to enforce the serial execution of specific operations:
//
// - \ref serial_execution
//
// \n Previous: \ref logical_or Next: \ref hpx_parallelization
*/
//*************************************************************************************************
//**HPX Parallelization****************************************************************************
/*!\page hpx_parallelization HPX Parallelization
//
// \tableofcontents
//
//
// The first shared memory parallelization provided with \b Blaze is based on
// <a href="http://stellar.cct.lsu.edu/projects/hpx/">HPX</a>.
//
//
// \n \section hpx_setup HPX Setup
// <hr>
//
// In order to enable the HPX-based parallelization, the following steps have to be taken: First,
// the \c BLAZE_USE_HPX_THREADS command line argument has to be explicitly specified during
// compilation:
\code
... -DBLAZE_USE_HPX_THREADS ...
\endcode
// Second, the HPX library and depending libraries such as Boost, hwloc, etc. have to be linked.
// And third, the HPX threads have to be initialized by a call to the \c hpx::init() function (see
// the <a href="http://stellar.cct.lsu.edu/files/hpx_0.9.0/docs/hpx/tutorial.html">HPX tutorial</a>
// for further details). These three actions will cause the \b Blaze library to automatically try
// to run all operations in parallel with the specified number of HPX threads.
//
// Note that the HPX-based parallelization has priority over the OpenMP-based, C++11 thread-based,
// and Boost thread-based parallelizations, i.e. is preferred in case multiple parallelizations
// are enabled in combination with the HPX thread parallelization.
//
// The number of threads used by the HPX backend has to be specified via the command line:
\code
... --hpx:threads 4 ...
\endcode
// Please note that the \b Blaze library does not limit the available number of threads. Therefore
// it is in YOUR responsibility to choose an appropriate number of threads. The best performance,
// though, can be expected if the specified number of threads matches the available number of
// cores.
//
// In order to query the number of threads used for the parallelization of operations, the
// \c getNumThreads() function can be used:
\code
const size_t threads = blaze::getNumThreads();
\endcode
// In the context of HPX threads, the function will return the actual number of threads used by
// the HPX subsystem.
//
//
// \n \section hpx_configuration HPX Configuration
// <hr>
//
// As in case of the other shared memory parallelizations \b Blaze is not unconditionally running
// an operation in parallel (see for instance \ref openmp_parallelization). Only in case a given
// operation is large enough and exceeds a certain threshold the operation is executed in parallel.
// All thresholds related to the HPX-based parallelization are contained within the configuration
// file <tt><blaze/config/Thresholds.h></tt>.
//
// Please note that these thresholds are highly sensitiv to the used system architecture and
// the shared memory parallelization technique. Therefore the default values cannot guarantee
// maximum performance for all possible situations and configurations. They merely provide a
// reasonable standard for the current CPU generation. Also note that the provided defaults
// have been determined using the OpenMP parallelization and require individual adaption for
// the HPX-based parallelization.
//
// \n Previous: \ref shared_memory_parallelization Next: \ref cpp_threads_parallelization
*/
//*************************************************************************************************
//**C++11 Thread Parallelization*******************************************************************
/*!\page cpp_threads_parallelization C++11 Thread Parallelization
//
// \tableofcontents
//
//
// In addition to the HPX-based shared memory parallelization, starting with \b Blaze 2.1,
// \b Blaze also provides a shared memory parallelization based on C++11 threads.
//
//
// \n \section cpp_threads_setup C++11 Thread Setup
// <hr>
//
// In order to enable the C++11 thread-based parallelization, first the according C++11-specific
// compiler flags have to be used and second the \c BLAZE_USE_CPP_THREADS command line argument
// has to be explicitly specified. For instance, in case of the GNU C++ and Clang compilers the
// compiler flags have to be extended by
\code
... -std=c++11 -DBLAZE_USE_CPP_THREADS ...
\endcode
// This simple action will cause the \b Blaze library to automatically try to run all operations
// in parallel with the specified number of C++11 threads. Note that in case both HPX and C++11
// threads are enabled on the command line, the HPX-based parallelization has priority and is
// preferred.
//
// The number of threads can be either specified via the environment variable \c BLAZE_NUM_THREADS
\code
export BLAZE_NUM_THREADS=4 // Unix systems
set BLAZE_NUM_THREADS=4 // Windows systems
\endcode
// or alternatively via the \c setNumThreads() function provided by the \b Blaze library:
\code
blaze::setNumThreads( 4 );
\endcode
// Please note that the \b Blaze library does not limit the available number of threads. Therefore
// it is in YOUR responsibility to choose an appropriate number of threads. The best performance,
// though, can be expected if the specified number of threads matches the available number of
// cores.
//
// In order to query the number of threads used for the parallelization of operations, the
// \c getNumThreads() function can be used:
\code
const size_t threads = blaze::getNumThreads();
\endcode
// In the context of C++11 threads, the function will return the previously specified number of
// threads.
//
//
// \n \section cpp_threads_configuration C++11 Thread Configuration
// <hr>
//
// As in case of the OpenMP-based parallelization \b Blaze is not unconditionally running an
// operation in parallel. In case \b Blaze deems the parallel execution as counterproductive for
// the overall performance, the operation is executed serially. One of the main reasons for not
// executing an operation in parallel is the size of the operands. For instance, a vector addition
// is only executed in parallel if the size of both vector operands exceeds a certain threshold.
// Otherwise, the performance could seriously decrease due to the overhead caused by the thread
// setup. However, in order to be able to adjust the \b Blaze library to a specific system, it
// is possible to configure these thresholds manually. All thresholds are contained within the
// configuration file <tt><blaze/config/Thresholds.h></tt>.
//
// Please note that these thresholds are highly sensitiv to the used system architecture and
// the shared memory parallelization technique. Therefore the default values cannot guarantee
// maximum performance for all possible situations and configurations. They merely provide a
// reasonable standard for the current CPU generation. Also note that the provided defaults
// have been determined using the OpenMP parallelization and require individual adaption for
// the C++11 thread parallelization.
//
//
// \n \section cpp_threads_known_issues Known Issues
// <hr>
//
// There is a known issue in Visual Studio 2012 and 2013 that may cause C++11 threads to hang
// if their destructor is executed after the \c main() function:
//
// http://connect.microsoft.com/VisualStudio/feedback/details/747145
//
// Unfortunately, the C++11 parallelization of the \b Blaze library is affected from this bug.
// In order to circumvent this problem, \b Blaze provides the \c shutDownThreads() function,
// which can be used to manually destroy all threads at the end of the \c main() function:
\code
int main()
{
// ... Using the C++11 thread parallelization of Blaze
shutDownThreads();
}
\endcode
// Please note that this function may only be used at the end of the \c main() function. After
// this function no further computation may be executed! Also note that this function has an
// effect for Visual Studio compilers only and doesn't need to be used with any other compiler.
//
// \n Previous: \ref hpx_parallelization Next: \ref boost_threads_parallelization
*/
//*************************************************************************************************
//**Boost Thread Parallelization*******************************************************************
/*!\page boost_threads_parallelization Boost Thread Parallelization
//
// \tableofcontents
//
//
// The third available shared memory parallelization provided with \b Blaze is based
// on <a href="https://www.boost.org/doc/libs/1_68_0/doc/html/thread.html">Boost threads</a>.
//
//
// \n \section boost_threads_setup Boost Thread Setup
// <hr>
//
// In order to enable the Boost thread-based parallelization, two steps have to be taken: First,
// the \c BLAZE_USE_BOOST_THREADS command line argument has to be explicitly specified during
// compilation:
\code
... -DBLAZE_USE_BOOST_THREADS ...
\endcode
// Second, the according Boost libraries have to be linked. These two simple actions will cause
// the \b Blaze library to automatically try to run all operations in parallel with the specified
// number of Boost threads. Note that the HPX-based and C++11 thread-based parallelizations have
// priority, i.e. are preferred in case either is enabled in combination with the Boost thread
// parallelization.
//
// The number of threads can be either specified via the environment variable \c BLAZE_NUM_THREADS
\code
export BLAZE_NUM_THREADS=4 // Unix systems
set BLAZE_NUM_THREADS=4 // Windows systems
\endcode
// or alternatively via the \c setNumThreads() function provided by the \b Blaze library:
\code
blaze::setNumThreads( 4 );
\endcode
// Please note that the \b Blaze library does not limit the available number of threads. Therefore
// it is in YOUR responsibility to choose an appropriate number of threads. The best performance,
// though, can be expected if the specified number of threads matches the available number of
// cores.
//
// In order to query the number of threads used for the parallelization of operations, the
// \c getNumThreads() function can be used:
\code
const size_t threads = blaze::getNumThreads();
\endcode
// In the context of Boost threads, the function will return the previously specified number of
// threads.
//
//
// \n \section boost_threads_configuration Boost Thread Configuration
// <hr>
//
// As in case of the other shared memory parallelizations \b Blaze is not unconditionally running
// an operation in parallel (see \ref openmp_parallelization or \ref cpp_threads_parallelization).
// All thresholds related to the Boost thread parallelization are also contained within the
// configuration file <tt><blaze/config/Thresholds.h></tt>.
//
// Please note that these thresholds are highly sensitiv to the used system architecture and
// the shared memory parallelization technique. Therefore the default values cannot guarantee
// maximum performance for all possible situations and configurations. They merely provide a
// reasonable standard for the current CPU generation. Also note that the provided defaults
// have been determined using the OpenMP parallelization and require individual adaption for
// the Boost thread parallelization.
//
// \n Previous: \ref cpp_threads_parallelization Next: \ref openmp_parallelization
*/
//*************************************************************************************************
//**OpenMP Parallelization*************************************************************************
/*!\page openmp_parallelization OpenMP Parallelization
//
// \tableofcontents
//
//
// The fourth and final shared memory parallelization provided with \b Blaze is based on
// <a href="https://www.openmp.org">OpenMP</a>.
//
//
// \n \section openmp_setup OpenMP Setup
// <hr>
//
// To enable the OpenMP-based parallelization, all that needs to be done is to explicitly specify
// the use of OpenMP on the command line:
\code
-fopenmp // GNU/Clang C++ compiler
-openmp // Intel C++ compiler
/openmp // Visual Studio
\endcode
// This simple action will cause the \b Blaze library to automatically try to run all operations
// in parallel with the specified number of threads. Note however that the HPX-based, the C++11
// thread-based, and the Boost thread-based parallelizations have priority, i.e. are preferred in
// case either is enabled in combination with the OpenMP thread parallelization.
//
// As common for OpenMP, the number of threads can be specified either via an environment variable
\code
export OMP_NUM_THREADS=4 // Unix systems
set OMP_NUM_THREADS=4 // Windows systems
\endcode
// or via an explicit call to the \c omp_set_num_threads() function:
\code
omp_set_num_threads( 4 );
\endcode
// Alternatively, the number of threads can also be specified via the \c setNumThreads() function
// provided by the \b Blaze library:
\code
blaze::setNumThreads( 4 );
\endcode
// Please note that the \b Blaze library does not limit the available number of threads. Therefore
// it is in YOUR responsibility to choose an appropriate number of threads. The best performance,
// though, can be expected if the specified number of threads matches the available number of
// cores.
//
// In order to query the number of threads used for the parallelization of operations, the
// \c getNumThreads() function can be used:
\code
const size_t threads = blaze::getNumThreads();
\endcode
// In the context of OpenMP, the function returns the maximum number of threads OpenMP will use
// within a parallel region and is therefore equivalent to the \c omp_get_max_threads() function.
//
//
// \n \section openmp_configuration OpenMP Configuration
// <hr>
//
// Note that \b Blaze is not unconditionally running an operation in parallel. In case \b Blaze
// deems the parallel execution as counterproductive for the overall performance, the operation
// is executed serially. One of the main reasons for not executing an operation in parallel is
// the size of the operands. For instance, a vector addition is only executed in parallel if the
// size of both vector operands exceeds a certain threshold. Otherwise, the performance could
// seriously decrease due to the overhead caused by the thread setup. However, in order to be
// able to adjust the \b Blaze library to a specific system, it is possible to configure these
// thresholds manually. All shared memory thresholds are contained within the configuration file
// <tt><blaze/config/Thresholds.h></tt>.
//
// Please note that these thresholds are highly sensitiv to the used system architecture and
// the shared memory parallelization technique (see also \ref cpp_threads_parallelization and
// \ref boost_threads_parallelization). Therefore the default values cannot guarantee maximum
// performance for all possible situations and configurations. They merely provide a reasonable
// standard for the current CPU generation.
//
//
// \n \section openmp_first_touch First Touch Policy
// <hr>
//
// So far the \b Blaze library does not (yet) automatically initialize dynamic memory according
// to the first touch principle. Consider for instance the following vector triad example:
\code
using blaze::columnVector;
const size_t N( 1000000UL );
blaze::DynamicVector<double,columnVector> a( N ), b( N ), c( N ), d( N );
// Initialization of the vectors b, c, and d
for( size_t i=0UL; i<N; ++i ) {
b[i] = rand<double>();
c[i] = rand<double>();
d[i] = rand<double>();
}
// Performing a vector triad
a = b + c * d;
\endcode
// If this code, which is prototypical for many OpenMP applications that have not been optimized
// for ccNUMA architectures, is run across several locality domains (LD), it will not scale
// beyond the maximum performance achievable on a single LD if the working set does not fit into
// the cache. This is because the initialization loop is executed by a single thread, writing to
// \c b, \c c, and \c d for the first time. Hence, all memory pages belonging to those arrays will
// be mapped into a single LD.
//
// As mentioned above, this problem can be solved by performing vector initialization in parallel:
\code
// ...
// Initialization of the vectors b, c, and d
#pragma omp parallel for
for( size_t i=0UL; i<N; ++i ) {
b[i] = rand<double>();
c[i] = rand<double>();
d[i] = rand<double>();
}
// ...
\endcode
// This simple modification makes a huge difference on ccNUMA in memory-bound situations (as for
// instance in all BLAS level 1 operations and partially BLAS level 2 operations). Therefore, in
// order to achieve the maximum possible performance, it is imperative to initialize the memory
// according to the later use of the data structures.
//
//
// \n \section openmp_limitations Limitations of the OpenMP Parallelization
// <hr>
//
// There are a few important limitations to the current \b Blaze OpenMP parallelization. The first
// one involves the explicit use of an OpenMP parallel region (see \ref openmp_parallel), the
// other one the OpenMP \c sections directive (see \ref openmp_sections).
//
//
// \n \subsection openmp_parallel The Parallel Directive
//
// In OpenMP threads are explicitly spawned via the an OpenMP parallel directive:
\code
// Serial region, executed by a single thread
#pragma omp parallel
{
// Parallel region, executed by the specified number of threads
}
// Serial region, executed by a single thread
\endcode
// Conceptually, the specified number of threads (see \ref openmp_setup) is created every time a
// parallel directive is encountered. Therefore, from a performance point of view, it seems to be
// beneficial to use a single OpenMP parallel directive for several operations:
\code
blaze::DynamicVector<double> x, y1, y2;
blaze::DynamicMatrix<double> A, B;
#pragma omp parallel
{
y1 = A * x;
y2 = B * x;
}
\endcode
// Unfortunately, this optimization approach is not allowed within the \b Blaze library. More
// explicitly, it is not allowed to put an operation into a parallel region. The reason is that
// the entire code contained within a parallel region is executed by all threads. Although this
// appears to just comprise the contained computations, a computation (or more specifically the
// assignment of an expression to a vector or matrix) can contain additional logic that must not
// be handled by multiple threads (as for instance memory allocations, setup of temporaries, etc.).
// Therefore it is not possible to manually start a parallel region for several operations, but
// \b Blaze will spawn threads automatically, depending on the specifics of the operation at hand
// and the given operands.
//
// \n \subsection openmp_sections The Sections Directive
//
// OpenMP provides several work-sharing construct to distribute work among threads. One of these
// constructs is the \c sections directive:
\code
blaze::DynamicVector<double> x, y1, y2;
blaze::DynamicMatrix<double> A, B;
// ... Resizing and initialization
#pragma omp sections
{
#pragma omp section
y1 = A * x;
#pragma omp section
y2 = B * x;
}
\endcode
// In this example, two threads are used to compute two distinct matrix/vector multiplications
// concurrently. Thereby each of the \c sections is executed by exactly one thread.
//
// Unfortunately \b Blaze does not support concurrent parallel computations and therefore this
// approach does not work with any of the \b Blaze parallelization techniques. All techniques
// (including the C++11 and Boost thread parallelizations; see \ref cpp_threads_parallelization
// and \ref boost_threads_parallelization) are optimized for the parallel computation of an
// operation within a single thread of execution. This means that \b Blaze tries to use all
// available threads to compute the result of a single operation as efficiently as possible.
// Therefore, for this special case, it is advisable to disable all \b Blaze parallelizations
// and to let \b Blaze compute all operations within a \c sections directive in serial. This can
// be done by either completely disabling the \b Blaze parallelization (see \ref serial_execution)
// or by selectively serializing all operations within a \c sections directive via the \c serial()
// function:
\code
blaze::DynamicVector<double> x, y1, y2;
blaze::DynamicMatrix<double> A, B;
// ... Resizing and initialization
#pragma omp sections
{
#pragma omp section
y1 = serial( A * x );
#pragma omp section
y2 = serial( B * x );
}
\endcode
// Please note that the use of the \c BLAZE_SERIAL_SECTION (see also \ref serial_execution) does
// NOT work in this context!
//
// \n Previous: \ref boost_threads_parallelization Next: \ref serial_execution
*/
//*************************************************************************************************
//**Serial Execution*******************************************************************************
/*!\page serial_execution Serial Execution
//
// Sometimes it may be necessary to enforce the serial execution of specific operations. For this
// purpose, the \b Blaze library offers three possible options: the serialization of a single
// expression via the \c serial() function, the serialization of a block of expressions via the
// \c BLAZE_SERIAL_SECTION, and the general deactivation of the parallel execution.
//
//
// \n \section serial_execution_serial_expression Option 1: Serialization of a Single Expression
// <hr>
//
// The first option is the serialization of a specific operation via the \c serial() function:
\code
blaze::DynamicMatrix<double> A, B, C;
// ... Resizing and initialization
C = serial( A + B );
\endcode
// \c serial() enforces the serial evaluation of the enclosed expression. It can be used on any
// kind of dense or sparse vector or matrix expression.
//
//
// \n \section serial_execution_serial_section Option 2: Serialization of Multiple Expressions
// <hr>
//
// The second option is the temporary and local enforcement of a serial execution via the
// \c BLAZE_SERIAL_SECTION:
\code
using blaze::rowMajor;
using blaze::columnVector;
blaze::DynamicMatrix<double,rowMajor> A;
blaze::DynamicVector<double,columnVector> b, c, d, x, y, z;
// ... Resizing and initialization
// Parallel execution
// If possible and beneficial for performance the following operation is executed in parallel.
x = A * b;
// Serial execution
// All operations executed within the serial section are guaranteed to be executed in
// serial (even if a parallel execution would be possible and/or beneficial).
BLAZE_SERIAL_SECTION
{
y = A * c;
z = A * d;
}
// Parallel execution continued
// ...
\endcode
// Within the scope of the \c BLAZE_SERIAL_SECTION, all operations are guaranteed to run in serial.
// Outside the scope of the serial section, all operations are run in parallel (if beneficial for
// the performance).
//
// Note that the \c BLAZE_SERIAL_SECTION must only be used within a single thread of execution.
// The use of the serial section within several concurrent threads will result undefined behavior!
//
//
// \n \section serial_execution_deactivate_parallelism Option 3: Deactivation of Parallel Execution
// <hr>
//
// The third option is the general deactivation of the parallel execution (even in case OpenMP is
// enabled on the command line). This can be achieved via the \c BLAZE_USE_SHARED_MEMORY_PARALLELIZATION
// switch in the <tt>./blaze/config/SMP.h</tt> configuration file:
\code
#define BLAZE_USE_SHARED_MEMORY_PARALLELIZATION 1
\endcode
// In case the \c BLAZE_USE_SHARED_MEMORY_PARALLELIZATION switch is set to 0, the shared memory
// parallelization is deactivated altogether.
//
// \n Previous: \ref openmp_parallelization Next: \ref serialization
*/
//*************************************************************************************************
//**Serialization**********************************************************************************
/*!\page serialization Serialization
//
// Sometimes it is necessary to store vector and/or matrices on disk, for instance for storing
// results or for sharing specific setups with other people. The \b Blaze math serialization
// module provides the according functionality to create platform independent, portable, binary
// representations of vectors and matrices that can be used to store the \b Blaze data structures
// without loss of precision and to reliably transfer them from one machine to another.
//
// The following two pages explain how to serialize vectors and matrices:
//
// - \ref vector_serialization
// - \ref matrix_serialization
//
// \n Previous: \ref serial_execution Next: \ref vector_serialization
*/
//*************************************************************************************************
//**Vector Serialization***************************************************************************
/*!\page vector_serialization Vector Serialization
//
// The following example demonstrates the (de-)serialization of dense and sparse vectors:
\code
using blaze::columnVector;
using blaze::rowVector;
// Serialization of both vectors
{
blaze::StaticVector<double,5UL,rowVector> d;
blaze::CompressedVector<int,columnVector> s;
// ... Resizing and initialization
// Creating an archive that writes into a the file "vectors.blaze"
blaze::Archive<std::ofstream> archive( "vectors.blaze" );
// Serialization of both vectors into the same archive. Note that d lies before s!
archive << d << s;
}
// Reconstitution of both vectors
{
blaze::DynamicVector<double,rowVector> d1;
blaze::DynamicVector<int,rowVector> d2;
// Creating an archive that reads from the file "vectors.blaze"
blaze::Archive<std::ifstream> archive( "vectors.blaze" );
// Reconstituting the former d vector into d1. Note that it is possible to reconstitute
// the vector into a differrent kind of vector (StaticVector -> DynamicVector), but that
// the type of elements has to be the same.
archive >> d1;
// Reconstituting the former s vector into d2. Note that is is even possible to reconstitute
// a sparse vector as a dense vector (also the reverse is possible) and that a column vector
// can be reconstituted as row vector (and vice versa). Note however that also in this case
// the type of elements is the same!
archive >> d2
}
\endcode
// The (de-)serialization of vectors is not restricted to vectors of built-in data type, but can
// also be used for vectors with vector or matrix element type:
\code
// Serialization
{
blaze::CompressedVector< blaze::DynamicVector< blaze::complex<double> > > vec;
// ... Resizing and initialization
// Creating an archive that writes into a the file "vector.blaze"
blaze::Archive<std::ofstream> archive( "vector.blaze" );
// Serialization of the vector into the archive
archive << vec;
}
// Deserialization
{
blaze::CompressedVector< blaze::DynamicVector< blaze::complex<double> > > vec;
// Creating an archive that reads from the file "vector.blaze"
blaze::Archive<std::ifstream> archive( "vector.blaze" );
// Reconstitution of the vector from the archive
archive >> vec;
}
\endcode
// As the examples demonstrates, the vector serialization offers an enormous flexibility. However,
// several actions result in errors:
//
// - vectors cannot be reconstituted as matrices (and vice versa)
// - the element type of the serialized and reconstituted vector must match, which means
// that on the source and destination platform the general type (signed/unsigned integral
// or floating point) and the size of the type must be exactly the same
// - when reconstituting a \c StaticVector, its size must match the size of the serialized vector
//
// In case an error is encountered during (de-)serialization, a \c std::runtime_exception is
// thrown.
//
// \n Previous: \ref serialization Next: \ref matrix_serialization
*/
//*************************************************************************************************
//**Matrix Serialization***************************************************************************
/*!\page matrix_serialization Matrix Serialization
//
// The serialization of matrices works in the same manner as the serialization of vectors. The
// following example demonstrates the (de-)serialization of dense and sparse matrices:
\code
using blaze::rowMajor;
using blaze::columnMajor;
// Serialization of both matrices
{
blaze::StaticMatrix<double,3UL,5UL,rowMajor> D;
blaze::CompressedMatrix<int,columnMajor> S;
// ... Resizing and initialization
// Creating an archive that writes into a the file "matrices.blaze"
blaze::Archive<std::ofstream> archive( "matrices.blaze" );
// Serialization of both matrices into the same archive. Note that D lies before S!
archive << D << S;
}
// Reconstitution of both matrices
{
blaze::DynamicMatrix<double,rowMajor> D1;
blaze::DynamicMatrix<int,rowMajor> D2;
// Creating an archive that reads from the file "matrices.blaze"
blaze::Archive<std::ifstream> archive( "matrices.blaze" );
// Reconstituting the former D matrix into D1. Note that it is possible to reconstitute
// the matrix into a differrent kind of matrix (StaticMatrix -> DynamicMatrix), but that
// the type of elements has to be the same.
archive >> D1;
// Reconstituting the former S matrix into D2. Note that is is even possible to reconstitute
// a sparse matrix as a dense matrix (also the reverse is possible) and that a column-major
// matrix can be reconstituted as row-major matrix (and vice versa). Note however that also
// in this case the type of elements is the same!
archive >> D2
}
\endcode
// Note that also in case of matrices it is possible to (de-)serialize matrices with vector or
// matrix elements:
\code
// Serialization
{
blaze::CompressedMatrix< blaze::DynamicMatrix< blaze::complex<double> > > mat;
// ... Resizing and initialization
// Creating an archive that writes into a the file "matrix.blaze"
blaze::Archive<std::ofstream> archive( "matrix.blaze" );
// Serialization of the matrix into the archive
archive << mat;
}
// Deserialization
{
blaze::CompressedMatrix< blaze::DynamicMatrix< blaze::complex<double> > > mat;
// Creating an archive that reads from the file "matrix.blaze"
blaze::Archive<std::ifstream> archive( "matrix.blaze" );
// Reconstitution of the matrix from the archive
archive >> mat;
}
\endcode
// Note that just as the vector serialization, the matrix serialization is restricted by a
// few important rules:
//
// - matrices cannot be reconstituted as vectors (and vice versa)
// - the element type of the serialized and reconstituted matrix must match, which means
// that on the source and destination platform the general type (signed/unsigned integral
// or floating point) and the size of the type must be exactly the same
// - when reconstituting a \c StaticMatrix, the number of rows and columns must match those
// of the serialized matrix
//
// In case an error is encountered during (de-)serialization, a \c std::runtime_exception is
// thrown.
//
// \n Previous: \ref vector_serialization Next: \ref customization \n
*/
//*************************************************************************************************
//**Customization**********************************************************************************
/*!\page customization Customization
//
// Although \b Blaze tries to work out of the box for every possible setting, still it may be
// necessary to adapt the library to specific requirements. The following three pages explain
// how to customize the \b Blaze library to your own needs:
//
// - \ref configuration_files
// - \ref vector_and_matrix_customization
// - \ref grouping_tagging
// - \ref error_reporting_customization
//
// \n Previous: \ref matrix_serialization Next: \ref configuration_files
*/
//*************************************************************************************************
//**Configuration Files****************************************************************************
/*!\page configuration_files Configuration Files
//
// \tableofcontents
//
//
// Sometimes it is necessary to adapt \b Blaze to specific requirements. For this purpose
// \b Blaze provides several configuration files in the <tt>./blaze/config/</tt> subdirectory,
// which provide ample opportunity to customize internal settings, behavior, and thresholds.
// This chapter explains the most important of these configuration files. For a complete
// overview of all customization opportunities, please go to the configuration files in the
// <tt>./blaze/config/</tt> subdirectory or see the complete \b Blaze documentation.
//
//
// \n \section transpose_flag Default Vector Storage
// <hr>
//
// The \b Blaze default is that all vectors are created as column vectors (if not specified
// explicitly):
\code
blaze::StaticVector<double,3UL> x; // Creates a 3-dimensional static column vector
\endcode
// The header file <tt>./blaze/config/TransposeFlag.h</tt> allows the configuration of the default
// vector storage (i.e. the default transpose flag) of all vectors within the \b Blaze library.
// The default transpose flag is specified via the \c BLAZE_DEFAULT_TRANSPOSE_FLAG macro:
\code
#define BLAZE_DEFAULT_TRANSPOSE_FLAG blaze::columnVector
\endcode
// Alternatively the default transpose flag can be specified via command line or by or defining
// this symbol manually before including any \b Blaze header file:
\code
g++ ... -DBLAZE_DEFAULT_TRANSPOSE_FLAG=blaze::columnVector ...
\endcode
\code
#define BLAZE_DEFAULT_TRANSPOSE_FLAG blaze::columnVector
#include <blaze/Blaze.h>
\endcode
// Valid settings for \c BLAZE_DEFAULT_TRANSPOSE_FLAG are blaze::rowVector and blaze::columnVector.
//
//
// \n \section storage_order Default Matrix Storage
// <hr>
//
// Matrices are by default created as row-major matrices:
\code
blaze::StaticMatrix<double,3UL,3UL> A; // Creates a 3x3 row-major matrix
\endcode
// The header file <tt>./blaze/config/StorageOrder.h</tt> allows the configuration of the default
// matrix storage order. Via the \c BLAZE_DEFAULT_STORAGE_ORDER macro the default storage order
// for all matrices of the \b Blaze library can be specified.
\code
#define BLAZE_DEFAULT_STORAGE_ORDER blaze::rowMajor
\endcode
// Alternatively the default storage order can be specified via command line or by or defining
// this symbol manually before including any \b Blaze header file:
\code
g++ ... -DBLAZE_DEFAULT_STORAGE_ORDER=blaze::rowMajor ...
\endcode
\code
#define BLAZE_DEFAULT_STORAGE_ORDER blaze::rowMajor
#include <blaze/Blaze.h>
\endcode
// Valid settings for \c BLAZE_DEFAULT_STORAGE_ORDER are blaze::rowMajor and blaze::columnMajor.
//
//
// \n \section blas_mode BLAS Mode
// <hr>
//
// In order to achieve maximum performance for multiplications with dense matrices, \b Blaze can
// be configured to use a BLAS library. Via the following compilation switch in the configuration
// file <tt>./blaze/config/BLAS.h</tt> BLAS can be enabled:
\code
#define BLAZE_BLAS_MODE 1
\endcode
// By default, \b Blaze assumes a 32-bit BLAS library. Via the \c BLAZE_BLAS_IS_64BIT compilation
// switch, the 64-bit BLAS mode can be selected:
\code
#define BLAZE_BLAS_IS_64BIT 1
\endcode
// Note that the \c BLAZE_BLAS_IS_64BIT switch also has an effect on the \ref lapack_functions.
// Please also note that it might additionally be necessary to use a compilation switch to put
// the BLAS/LAPACK library into 64-bit mode (e.g. \c MKL_ILP64 for the Intel MKL library).
//
// In case the selected BLAS library provides parallel execution, the \c BLAZE_BLAS_IS_PARALLEL
// switch should be activated to prevent \b Blaze from parallelizing on its own:
\code
#define BLAZE_BLAS_IS_PARALLEL 1
\endcode
// Additionally, it is possible to specify the name of the BLAS include file via the
// \c BLAZE_BLAS_INCLUDE_FILE switch. The default setting is <tt><cblas.h></tt>:
\code
#define BLAZE_BLAS_INCLUDE_FILE <cblas.h>
\endcode
// Alternatively, all settings can be specified via command line or by or defining the symbols
// manually before including any \b Blaze header file:
\code
g++ ... -DBLAZE_BLAS_MODE=1 -DBLAZE_BLAS_IS_64BIT=1 -DBLAZE_BLAS_IS_PARALLEL=1 -DBLAZE_BLAS_INCLUDE_FILE='<cblas.h>' ...
\endcode
\code
#define BLAZE_BLAS_MODE 1
#define BLAZE_BLAS_IS_64BIT 1
#define BLAZE_BLAS_IS_PARALLEL 1
#define BLAZE_BLAS_INCLUDE_FILE <cblas.h>
#include <blaze/Blaze.h>
\endcode
// In case no BLAS library is available, \b Blaze will still work and will not be reduced in
// functionality, but performance may be limited.
//
//
// \n \section cache_size Cache Size
// <hr>
//
// The optimization of several \b Blaze compute kernels depends on the cache size of the target
// architecture. By default, \b Blaze assumes a cache size of 3 MiByte. However, for optimal
// speed the exact cache size of the system should be provided via the \c cacheSize value in the
// <tt>./blaze/config/CacheSize.h</tt> configuration file:
\code
#define BLAZE_CACHE_SIZE 3145728UL;
\endcode
// The cache size can also be specified via command line or by defining this symbol manually
// before including any \b Blaze header file:
\code
g++ ... -DBLAZE_CACHE_SIZE=3145728 ...
\endcode
\code
#define BLAZE_CACHE_SIZE 3145728
#include <blaze/Blaze.h>
\endcode
// \n \section vectorization Vectorization
// <hr>
//
// In order to achieve maximum performance and to exploit the compute power of a target platform
// the \b Blaze library attempts to vectorize all linear algebra operations by SSE, AVX, and/or
// AVX-512 intrinsics, depending on which instruction set is available. However, it is possible
// to disable the vectorization entirely by the compile time switch in the configuration file
// <tt>./blaze/config/Vectorization.h</tt>:
\code
#define BLAZE_USE_VECTORIZATION 1
\endcode
// It is also possible to (de-)activate vectorization via command line or by defining this symbol
// manually before including any \b Blaze header file:
\code
g++ ... -DBLAZE_USE_VECTORIZATION=1 ...
\endcode
\code
#define BLAZE_USE_VECTORIZATION 1
#include <blaze/Blaze.h>
\endcode
// In case the switch is set to 1, vectorization is enabled and the \b Blaze library is allowed
// to use intrinsics to speed up computations. In case the switch is set to 0, vectorization is
// disabled entirely and the \b Blaze library chooses default, non-vectorized functionality for
// the operations. Note that deactivating the vectorization may pose a severe performance
// limitation for a large number of operations!
//
//
// \n \section sleef Sleef
// <hr>
//
// For several complex operations \b Blaze can make use of the Sleef library for vectorization
// (https://github.com/shibatch/sleef). This compilation switch enables/disables the vectorization
// by means of Sleef. In case the switch is set to 1, \b Blaze uses Sleef for instance for the
// vectorized computation of trigonometric functions (i.e. \c sin(), \c cos(), \c tan(), etc.)
// and exponential functions (i.e. \c exp(), \c log(), ...).
\code
#define BLAZE_USE_SLEEF 1
\endcode
// It is also possible to enable/disable Sleef vectorization via command line or by defining this
// symbol manually before including any \b Blaze header file:
\code
g++ ... -DBLAZE_USE_SLEEF=1 ...
\endcode
\code
#define BLAZE_USE_SLEEF 1
#include <blaze/Blaze.h>
\endcode
// \n \section thresholds Thresholds
// <hr>
//
// For many computations \b Blaze distinguishes between small and large vectors and matrices.
// This separation is especially important for the parallel execution of computations, since
// the use of several threads only pays off for sufficiently large vectors and matrices.
// Additionally, it also enables \b Blaze to select kernels that are optimized for a specific
// size.
//
// In order to distinguish between small and large data structures \b Blaze provides several
// thresholds that can be adapted to the characteristics of the target platform. For instance,
// the \c DMATDVECMULT_THRESHOLD specifies the threshold between the application of the custom
// \b Blaze kernels for small dense matrix/dense vector multiplications and the BLAS kernels
// for large multiplications. All thresholds, including the thresholds for the OpenMP- and
// thread-based parallelization, are contained within the configuration file
// <tt><blaze/config/Thresholds.h></tt>.
//
//
// \n \section alignment Alignment
// <hr>
//
// For performance reasons, the vector types \ref vector_types_static_vector and
// \ref vector_types_hybrid_vector and the matrix types \ref matrix_types_static_matrix and
// \ref matrix_types_hybrid_matrix by default make use of aligned memory. Via the configuration
// file <tt>./blaze/config/Alignment.h</tt> it is possible to define the default alignment flag:
\code
#define BLAZE_DEFAULT_ALIGNMENT_FLAG blaze::aligned
\endcode
// Alternatively it is possible set the default alignment flag via command line or by defining
// this symbol manually before including any \b Blaze header file:
\code
g++ ... -DBLAZE_DEFAULT_ALIGNMENT_FLAG=blaze::aligned ...
\endcode
\code
#define BLAZE_DEFAULT_ALIGNMENT_FLAG blaze::aligned
#include <blaze/Blaze.h>
\endcode
// If \c BLAZE_DEFAULT_ALIGNMENT_FLAG is set to \c blaze::aligned then \ref vector_types_static_vector,
// \ref vector_types_hybrid_vector, \ref matrix_types_static_matrix, and \ref matrix_types_hybrid_matrix
// use aligned memory by default. If it is set to \c blaze::unaligned they don't enforce aligned
// memory. Note however that disabling alignment can considerably reduce the performance of all
// operations with these vector and matrix types!
//
//
// \n \section padding Padding
// <hr>
//
// By default the \b Blaze library uses padding for the vector types \ref vector_types_static_vector
// and \ref vector_types_hybrid_vector and the matrix types \ref matrix_types_static_matrix and
// \ref matrix_types_hybrid_matrix in order to achieve maximum performance in all operations. Due
// to padding, the proper alignment of data elements can be guaranteed and the need for remainder
// loops is minimized. However, on the downside padding introduces an additional memory overhead,
// which can be large depending on the used data type.
//
// The configuration file <tt>./blaze/config/Padding.h</tt> provides a compile time switch that
// can be used to define the default padding flag:
\code
#define BLAZE_DEFAULT_PADDING_FLAG blaze::padded
\endcode
// Alternatively it is possible to define the default padding flag via command line or by defining
// this symbol manually before including any \b Blaze header file:
\code
g++ ... -DBLAZE_DEFAULT_PADDING_FLAG=blaze::padded ...
\endcode
\code
#define BLAZE_DEFAULT_PADDING_FLAG blaze::padded
#include <blaze/Blaze.h>
\endcode
// If \c BLAZE_DEFAULT_ALIGNMENT_FLAG is set to \c blaze::padded, by default padding is enabled
// for \ref vector_types_static_vector, \ref vector_types_hybrid_vector, \ref matrix_types_static_matrix
// and \ref matrix_types_hybrid_matrix. If it is set to \c blaze::unpadded, then padding is by
// default disabled. Note however that disabling padding can considerably reduce the performance
// of all dense vector and matrix operations!
//
//
// \n \section streaming Streaming (Non-Temporal Stores)
// <hr>
//
// For vectors and matrices that don't fit into the cache anymore non-temporal stores can provide
// a significant performance advantage of about 20%. However, this advantage is only in effect in
// case the memory bandwidth of the target architecture is maxed out. If the target architecture's
// memory bandwidth cannot be exhausted the use of non-temporal stores can decrease performance
// instead of increasing it.
//
// The configuration file <tt>./blaze/config/Optimizations.h</tt> provides a compile time switch
// that can be used to (de-)activate streaming:
\code
#define BLAZE_USE_STREAMING 1
\endcode
// Alternatively streaming can be (de-)activated via command line or by defining this symbol
// manually before including any \b Blaze header file:
\code
g++ ... -DBLAZE_USE_STREAMING=1 ...
\endcode
\code
#define BLAZE_USE_STREAMING 1
#include <blaze/Blaze.h>
\endcode
// If \c BLAZE_USE_STREAMING is set to 1 streaming is enabled, if it is set to 0 streaming is
// disabled. It is recommended to consult the target architecture's white papers to decide whether
// streaming is beneficial or hurtful for performance.
//
//
// \n Previous: \ref customization Next: \ref vector_and_matrix_customization \n
*/
//*************************************************************************************************
//**Customization of Vectors and Matrices**********************************************************
/*!\page vector_and_matrix_customization Customization of Vectors and Matrices
//
// \tableofcontents
//
//
// \n \section custom_data_members Custom Data Members
// <hr>
//
// So far the \b Blaze library does not provide a lot of flexibility to customize the data
// members of existing \ref vector_types and \ref matrix_types. However, to some extend it is
// possible to customize vectors and matrices by inheritance. The following example gives an
// impression on how to create a simple variation of \ref matrix_types_custom_matrix, which
// automatically takes care of acquiring and releasing custom memory.
\code
template< typename Type // Data type of the matrix
, bool SO = defaultStorageOrder > // Storage order
class MyCustomMatrix
: public CustomMatrix< Type, unaligned, unpadded, SO >
{
public:
explicit inline MyCustomMatrix( size_t m, size_t n )
: CustomMatrix<Type,unaligned,unpadded,SO>()
, array_( new Type[m*n] )
{
this->reset( array_.get(), m, n );
}
private:
std::unique_ptr<Type[]> array_;
};
\endcode
// Please note that this is a simplified example with the intent to show the general approach.
// The number of constructors, the memory acquisition, and the kind of memory management can of
// course be adapted to specific requirements. Also, please note that since none of the \b Blaze
// vectors and matrices have virtual destructors polymorphic destruction cannot be used.
//
//
// \n \section custom_operations Custom Operations
// <hr>
//
// There are two approaches to extend \b Blaze with custom operations. First, the \c map()
// functions provide the possibility to execute componentwise custom operations on vectors and
// matrices. Second, it is possible to add customized free functions.
//
// \n \subsection custom_operations_map The map() Functions
//
// Via the unary and binary \c map() functions it is possible to execute componentwise custom
// operations on vectors and matrices. The unary \c map() function can be used to apply a custom
// operation on each single element of a dense vector or matrix or each non-zero element of a
// sparse vector or matrix. For instance, the following example demonstrates a custom square
// root computation on a dense matrix:
\code
blaze::DynamicMatrix<double> A, B;
B = map( A, []( double d ) { return std::sqrt( d ); } );
\endcode
// The binary \c map() function can be used to apply an operation pairwise to the elements of
// two dense vectors or two dense matrices. The following example demonstrates the merging of
// two matrices of double precision values into a matrix of double precision complex numbers:
\code
blaze::DynamicMatrix<double> real{ { 2.1, -4.2 }, { 1.0, 0.6 } };
blaze::DynamicMatrix<double> imag{ { 0.3, 1.4 }, { 2.9, -3.4 } };
blaze::DynamicMatrix< complex<double> > cplx;
// Creating the matrix
// ( ( 2.1, 0.3) (-4.2, 1.4) )
// ( ( 1.0, 2.9) ( 0.6, -3.4) )
cplx = map( real, imag, []( double r, double i ){ return complex<double>( r, i ); } );
\endcode
// These examples demonstrate the most convenient way of defining a unary custom operation by
// passing a lambda to the \c map() function. Alternatively, it is possible to pass a custom
// functor:
\code
struct Sqrt
{
double operator()( double a ) const
{
return std::sqrt( a );
}
};
B = map( A, Sqrt() );
\endcode
// In order for the functor to work in a call to \c map() it must define a function call operator,
// which accepts arguments of the type of the according vector or matrix elements.
//
// Although the operation is automatically parallelized depending on the size of the vector or
// matrix, no automatic vectorization is possible. In order to enable vectorization, a \c load()
// function can be added to the functor, which handles the vectorized computation. Depending on
// the data type this function is passed one of the following \b Blaze SIMD data types:
//
// <ul>
// <li>SIMD data types for fundamental data types
// <ul>
// <li>\c blaze::SIMDint8: Packed SIMD type for 8-bit signed integral data types</li>
// <li>\c blaze::SIMDuint8: Packed SIMD type for 8-bit unsigned integral data types</li>
// <li>\c blaze::SIMDint16: Packed SIMD type for 16-bit signed integral data types</li>
// <li>\c blaze::SIMDuint16: Packed SIMD type for 16-bit unsigned integral data types</li>
// <li>\c blaze::SIMDint32: Packed SIMD type for 32-bit signed integral data types</li>
// <li>\c blaze::SIMDuint32: Packed SIMD type for 32-bit unsigned integral data types</li>
// <li>\c blaze::SIMDint64: Packed SIMD type for 64-bit signed integral data types</li>
// <li>\c blaze::SIMDuint64: Packed SIMD type for 64-bit unsigned integral data types</li>
// <li>\c blaze::SIMDfloat: Packed SIMD type for single precision floating point data</li>
// <li>\c blaze::SIMDdouble: Packed SIMD type for double precision floating point data</li>
// </ul>
// </li>
// <li>SIMD data types for complex data types
// <ul>
// <li>\c blaze::SIMDcint8: Packed SIMD type for complex 8-bit signed integral data types</li>
// <li>\c blaze::SIMDcuint8: Packed SIMD type for complex 8-bit unsigned integral data types</li>
// <li>\c blaze::SIMDcint16: Packed SIMD type for complex 16-bit signed integral data types</li>
// <li>\c blaze::SIMDcuint16: Packed SIMD type for complex 16-bit unsigned integral data types</li>
// <li>\c blaze::SIMDcint32: Packed SIMD type for complex 32-bit signed integral data types</li>
// <li>\c blaze::SIMDcuint32: Packed SIMD type for complex 32-bit unsigned integral data types</li>
// <li>\c blaze::SIMDcint64: Packed SIMD type for complex 64-bit signed integral data types</li>
// <li>\c blaze::SIMDcuint64: Packed SIMD type for complex 64-bit unsigned integral data types</li>
// <li>\c blaze::SIMDcfloat: Packed SIMD type for complex single precision floating point data</li>
// <li>\c blaze::SIMDcdouble: Packed SIMD type for complex double precision floating point data</li>
// </ul>
// </li>
// </ul>
//
// All SIMD types provide the \c value data member for a direct access to the underlying intrinsic
// data element. In the following example, this intrinsic element is passed to the AVX function
// \c _mm256_sqrt_pd():
\code
struct Sqrt
{
double operator()( double a ) const
{
return std::sqrt( a );
}
SIMDdouble load( const SIMDdouble& a ) const
{
return _mm256_sqrt_pd( a.value );
}
};
\endcode
// In this example, whenever vectorization is generally applicable, the \c load() function is
// called instead of the function call operator for as long as the number of remaining elements
// is larger-or-equal to the width of the packed SIMD type. In all other cases (which also
// includes peel-off and remainder loops) the scalar operation is used.
//
// Please note that this example has two drawbacks: First, it will only compile in case the
// intrinsic \c _mm256_sqrt_pd() function is available (i.e. when AVX is active). Second, the
// availability of AVX is not taken into account. The first drawback can be alleviated by making
// the \c load() function a function template. The second drawback can be dealt with by adding a
// \c simdEnabled() function template to the functor:
\code
struct Sqrt
{
double operator()( double a ) const
{
return std::sqrt( a );
}
template< typename T >
T load( const T& a ) const
{
return _mm256_sqrt_pd( a.value );
}
template< typename T >
static constexpr bool simdEnabled() {
#if defined(__AVX__)
return true;
#else
return false;
#endif
}
};
\endcode
// The \c simdEnabled() function must be a \c static, \c constexpr function and must return whether
// or not vectorization is available for the given data type \c T. In case the function returns
// \c true, the \c load() function is used for a vectorized evaluation, in case the function
// returns \c false, \c load() is neither called nor instantiated.
//
// By default the \c map() function uses peel-off and remainder loops if the number of elements is
// not a multiple of the width of the packed SIMD type. However, all dense vector and matrix types
// in \b Blaze provide padding as an optimization. In case the custom operation preserves the
// value zero of the padding elements, it is possible to omit the peel-off and remainder loops,
// include the padding elements in the computation and by that increase performance. For that
// purpose the \c paddingEnabled() function can be added to the functor:
\code
struct Sqrt
{
// ...
static constexpr bool paddingEnabled() { return true; }
};
\endcode
// Also the \c paddingEnabled() function must be a \c static, \c constexpr function and must
// return whether padding elements can be used in the custom operation. In case the function
// returns \c true, the padding elements are used during a vectorized operation, in case the
// function returns \c false, the padding elements are not used.
//
// Note that this is a simplified example that is only working when used for dense vectors and
// matrices with double precision floating point elements. The following code shows the complete
// implementation of the according functor that is used within the \b Blaze library. The \b Blaze
// \c Sqrt functor is working for all data types that are providing a square root operation:
\code
namespace blaze {
struct Sqrt
{
template< typename T >
BLAZE_ALWAYS_INLINE auto operator()( const T& a ) const
{
return sqrt( a );
}
template< typename T >
static constexpr bool simdEnabled() { return HasSIMDSqrt<T>::value; }
static constexpr bool paddingEnabled() { return true; }
template< typename T >
BLAZE_ALWAYS_INLINE auto load( const T& a ) const
{
BLAZE_CONSTRAINT_MUST_BE_SIMD_PACK( T );
return sqrt( a );
}
};
} // namespace blaze
\endcode
// The same approach can be taken for binary custom operations. The following code demonstrates
// the \c Min functor of the \b Blaze library, which is working for all data types that provide
// a \c min() operation:
\code
struct Min
{
explicit inline Min()
{}
template< typename T1, typename T2 >
BLAZE_ALWAYS_INLINE decltype(auto) operator()( const T1& a, const T2& b ) const
{
return min( a, b );
}
template< typename T1, typename T2 >
static constexpr bool simdEnabled() { return HasSIMDMin<T1,T2>::value; }
static constexpr bool paddingEnabled() { return true; }
template< typename T1, typename T2 >
BLAZE_ALWAYS_INLINE decltype(auto) load( const T1& a, const T2& b ) const
{
BLAZE_CONSTRAINT_MUST_BE_SIMD_PACK( T1 );
BLAZE_CONSTRAINT_MUST_BE_SIMD_PACK( T2 );
return min( a, b );
}
};
\endcode
// For more information on the available \b Blaze SIMD data types and functions, please see the
// SIMD module in the complete \b Blaze documentation.
//
// \n \subsection custom_operations_free_functions Free Functions
//
// In order to extend \b Blaze with new functionality it is possible to add free functions. Free
// functions can be used either as wrappers around calls to the map() function or to implement
// general, non-componentwise operations. The following two examples will demonstrate both ideas.
//
// The first example shows the \c setToZero() function, which resets a sparse matrix to zero
// without affecting the sparsity pattern. It is implemented as a convenience wrapper around
// the map() function:
\code
template< typename MT // Type of the sparse matrix
, bool SO > // Storage order
void setToZero( blaze::SparseMatrix<MT,SO>& mat )
{
(~mat) = blaze::map( ~mat, []( const auto& value ){ return decltype(value){}; } );
}
\endcode
// The blaze::SparseMatrix class template is the base class for all kinds of sparse matrices and
// provides an abstraction from the actual type \c MT of the sparse matrix. However, due to the
// <a href="https://en.wikipedia.org/wiki/Curiously_recurring_template_pattern">Curiously Recurring Template Pattern (CRTP)</a>
// it also enables a conversion back to the actual type. This downcast is performed via the tilde
// operator (i.e. \c operator~()). The template parameter \c SO represents the storage order
// (blaze::rowMajor or blaze::columnMajor) of the matrix.
//
// The second example shows the \c countZeros() function, which counts the number of values, which
// are exactly zero, in a dense, row-major matrix:
\code
template< typename MT >
size_t countZeros( blaze::DenseMatrix<MT,rowMajor>& mat )
{
const size_t M( (~mat).rows() );
const size_t N( (~mat).columns() );
size_t count( 0UL );
for( size_t i=0UL; i<M; ++i ) {
for( size_t j=0UL; j<N; ++j ) {
if( blaze::isDefault<strict>( (~mat)(i,j) ) )
++count;
}
}
return count;
}
\endcode
// The blaze::DenseMatrix class template is the base class for all kinds of dense matrices. Again,
// it is possible to perform the conversion to the actual type via the tilde operator.
//
// The following two listings show the declarations of all vector and matrix base classes, which
// can be used for custom free functions:
\code
template< typename VT // Concrete type of the dense or sparse vector
, bool TF > // Transpose flag (blaze::columnVector or blaze::rowVector)
class Vector;
template< typename VT // Concrete type of the dense vector
, bool TF > // Transpose flag (blaze::columnVector or blaze::rowVector)
class DenseVector;
template< typename VT // Concrete type of the sparse vector
, bool TF > // Transpose flag (blaze::columnVector or blaze::rowVector)
class SparseVector;
\endcode
\code
template< typename MT // Concrete type of the dense or sparse matrix
, bool SO > // Storage order (blaze::rowMajor or blaze::columnMajor)
class Matrix;
template< typename MT // Concrete type of the dense matrix
, bool SO > // Storage order (blaze::rowMajor or blaze::columnMajor)
class DenseMatrix;
template< typename MT // Concrete type of the sparse matrix
, bool SO > // Storage order (blaze::rowMajor or blaze::columnMajor)
class SparseMatrix;
\endcode
// \n \section custom_data_types Custom Data Types
// <hr>
//
// \subsection custom_data_types_introduction Introduction
//
// The \b Blaze library is not restricted to integral, floating point and complex data types
// (called numeric types in \b Blaze), but it supports custom data types. For instance, the
// following example demonstrates that it is possible to use \c std::string as data type:
\code
blaze::DynamicVector<std::string> a{ "Hello, ", "Blaze " , "Expression" };
blaze::DynamicVector<std::string> b{ "World" , "Library", " Templates" };
const auto c( evaluate( a + b ) );
std::cout << "c =\n" << c << "\n\n";
const std::string maxString( max( c ) );
std::cout << "maxString = " << std::quoted(maxString) << "\n";
\endcode
// Output:
\code
c =
( Hello, World )
( Blaze Library )
( Expression Templates )
maxString = "Hello, World"
\endcode
// \b Blaze tries hard to make the use of custom data types as convenient, easy and intuitive as
// possible. In order to work flawlessly with \b Blaze, custom data types are required to provide
// a certain interface (depending on the operations that the type is used for). The following
// sections give an overview of the necessary steps to enable the use of the hypothetical custom
// data type \c custom::double_t for vector and matrix operations.
\code
namespace custom {
struct double_t
{
constexpr double_t() = default;
constexpr double_t( double i ) : value( i ) {}
double value{};
};
} // namespace custom
\endcode
// \subsection custom_data_types_arithmetic_operations Arithmetic Operations
//
// The \b Blaze library assumes that a custom data type provides \c operator<<() for streaming,
// \c operator+=() and \c operator+() for additions (which for instance includes additions inside
// matrix/vector multiplications, matrix/matrix multiplications, reduction or norm operations),
// \c operator-=() and \c operator-() for subtractions, \c operator*=() and \c operator*() for
// multiplications and \c operator/=() and \c operator/() for divisions:
\code
namespace custom {
constexpr double_t& operator+=( double_t& lhs, double_t rhs ) noexcept { lhs.value += rhs.value; return lhs; }
constexpr double_t& operator-=( double_t& lhs, double_t rhs ) noexcept { lhs.value -= rhs.value; return lhs; }
constexpr double_t& operator*=( double_t& lhs, double_t rhs ) noexcept { lhs.value *= rhs.value; return lhs; }
constexpr double_t& operator/=( double_t& lhs, double_t rhs ) noexcept { lhs.value /= rhs.value; return lhs; }
constexpr double_t operator+( double_t lhs, double_t rhs ) noexcept { return double_t{ lhs.value + rhs.value }; }
constexpr double_t operator-( double_t lhs, double_t rhs ) noexcept { return double_t{ lhs.value - rhs.value }; }
constexpr double_t operator*( double_t lhs, double_t rhs ) noexcept { return double_t{ lhs.value * rhs.value }; }
constexpr double_t operator/( double_t lhs, double_t rhs ) noexcept { return double_t{ lhs.value / rhs.value }; }
inline std::ostream& operator<<( std::ostream& os, double_t d )
{
return os << d.value;
}
} // namespace custom
\endcode
// Example:
\code
int main()
{
blaze::DynamicVector<custom::double_t> a{ 1.0, 2.0, 3.0, 4.0 };
blaze::DynamicVector<custom::double_t> b{ 0.1, 0.2, 0.3, 0.4 };
std::cout << "a + b =\n" << ( a + b ) << "\n";
std::cout << "a * b =\n" << ( a * b ) << "\n";
std::cout << "sum(a) = " << sum(a) << "\n"
<< "prod(a) = " << prod(a) << "\n";
}
\endcode
// Output:
\code
a + b =
( 1.1 )
( 2.2 )
( 3.3 )
( 4.4 )
a * b =
( 0.1 )
( 0.4 )
( 0.9 )
( 1.6 )
sum(a) = 10
prod(a) = 24
\endcode
// Note that similar steps are necessary if several custom data types are combined (as for instance
// \c custom::double_t and \c custom::float_t). Note that in this case both permutations need to
// be taken into account:
\code
custom::double_t operator+( const custom::double_t& a, const custom::float_t& b );
custom::double_t operator+( const custom::float_t& a, const custom::double_t& b );
// ...
\endcode
// Please note that only built-in data types apply for vectorization and thus custom data types
// cannot achieve maximum performance!
//
// \subsection custom_data_types_relational_operations Relational Operations
//
// In order to compare the element type, \b Blaze expects the equality operator (i.e. \c operator==())
// and the inequality operator (i.e. \c operator!=()). Alternatively it is possible to provide an
// \c equal() function, which distinguishes between strict and relaxed comparison:
\code
namespace custom {
constexpr bool operator==( double_t lhs, double_t rhs ) noexcept { return lhs.value == rhs.value; }
constexpr bool operator!=( double_t lhs, double_t rhs ) noexcept { return !( lhs == rhs ); }
template< blaze::RelaxationFlag RF >
constexpr bool equal( double_t lhs, double_t rhs ) noexcept { return blaze::equal<RF>( lhs.value, rhs.value ); }
} // namespace custom
\endcode
// Example:
\code
int main()
{
blaze::DynamicVector<custom::double_t> a{ 1.0, 2.0, 3.0, 4.0 };
blaze::DynamicVector<custom::double_t> b{ 0.1, 0.2, 0.3, 0.4 };
std::cout << "a == b: " << ( a == b ) << "\n"
<< "a != b: " << ( a != b ) << "\n";
}
\endcode
// Output:
\code
a == b: 0
a != b: 1
\endcode
// \subsection custom_data_types_elementwise_operations Elementwise Operations
//
// For the different kinds of elementwise operations on vectors and matrices (\c abs(), \c sin(),
// \c cos(), \c sqrt(), \c log(), \c exp(), \c min(), \c max(), ...), the custom type is required
// to provide the according function overload. Note that the \c sqrt() operation may also be
// required for several norm computations. Also, for any inversion operation, the type is required
// to suport the \c inv() function:
\code
namespace custom {
inline double_t abs ( double_t d ) noexcept { return double_t{ std::abs ( d.value ) }; }
inline double_t sin ( double_t d ) noexcept { return double_t{ std::sin ( d.value ) }; }
inline double_t cos ( double_t d ) noexcept { return double_t{ std::cos ( d.value ) }; }
inline double_t sqrt( double_t d ) noexcept { return double_t{ std::sqrt( d.value ) }; }
inline double_t log ( double_t d ) noexcept { return double_t{ std::log ( d.value ) }; }
inline double_t exp ( double_t d ) noexcept { return double_t{ std::exp ( d.value ) }; }
constexpr double_t inv ( double_t d ) noexcept { return double_t{ 1.0/d.value }; }
constexpr double_t min( double_t lhs, double_t rhs ) noexcept { return double_t{ blaze::min( lhs.value, rhs.value ) }; }
constexpr double_t max( double_t lhs, double_t rhs ) noexcept { return double_t{ blaze::max( lhs.value, rhs.value ) }; }
} // namespace custom
\endcode
// Example:
\code
int main()
{
blaze::DynamicVector<custom::double_t> a{ 1.0, 2.0, 3.0, 4.0 };
blaze::DynamicVector<custom::double_t> b{ 0.1, 0.2, 0.3, 0.4 };
std::cout << "abs(a) =\n" << abs(a) << "\n";
std::cout << "sin(a) =\n" << sin(a) << "\n";
std::cout << "cos(a) =\n" << cos(a) << "\n";
std::cout << "sqrt(a) =\n" << sqrt(a) << "\n";
std::cout << "log(a) =\n" << log(a) << "\n";
std::cout << "exp(a) =\n" << exp(a) << "\n\n";
std::cout << "min(a) =\n" << min(a) << "\n";
std::cout << "max(a) =\n" << max(a) << "\n\n";
std::cout << "min(a,b) =\n" << min(a,b) << "\n";
std::cout << "max(a,b) =\n" << max(a,b) << "\n";
std::cout << "norm(a) = " << norm(a) << "\n";
}
\endcode
// Output:
\code
abs(a) =
( 1 )
( 2 )
( 3 )
( 4 )
sin(a) =
( 0.841471 )
( 0.909297 )
( 0.14112 )
( -0.756802 )
cos(a) =
( 0.540302 )
( -0.416147 )
( -0.989992 )
( -0.653644 )
sqrt(a) =
( 1 )
( 1.41421 )
( 1.73205 )
( 2 )
log(a) =
( 0 )
( 0.693147 )
( 1.09861 )
( 1.38629 )
exp(a) =
( 2.71828 )
( 7.38906 )
( 20.0855 )
( 54.5982 )
min(a) = 1
max(a) = 4
min(a,b) =
( 0.1 )
( 0.2 )
( 0.3 )
( 0.4 )
max(a,b) =
( 1 )
( 2 )
( 3 )
( 4 )
norm(a) = 5.47723
\endcode
// \subsection custom_data_types_adaptors Adaptors
//
// If the custom data type is used in the context of the HermitianMatrix, UniLowerMatrix, or
// UniUpperMatrix adaptors, it will be necessary to provide overloads of the \c isZero(),
// \c isOne(), and \c isReal() functions:
\code
namespace custom {
template< blaze::RelaxationFlag RF >
constexpr bool isZero( double_t d ) { return blaze::isZero<RF>( d.value ); }
template< blaze::RelaxationFlag RF >
constexpr bool isOne ( double_t d ) { return blaze::isOne<RF> ( d.value ); }
template< blaze::RelaxationFlag RF >
constexpr bool isReal( double_t d ) { MAYBE_UNUSED( d ); return true; }
} // namespace custom
\endcode
// Example:
\code
int main()
{
blaze::UniLowerMatrix< blaze::DynamicMatrix<custom::double_t> > L
{ { 1.0, 0.0, 0.0 },
{ 2.0, 1.0, 0.0 },
{ 3.0, 4.0, 1.0 } };
blaze::UniUpperMatrix< blaze::DynamicMatrix<custom::double_t> > U
{ { 1.0, 2.0, 3.0 },
{ 0.0, 1.0, 4.0 },
{ 0.0, 0.0, 1.0 } };
const auto A( evaluate( L * U ) );
std::cout << "A =\n" << A << "\n";
}
\endcode
// Output:
\code
A =
( 1 2 3 )
( 2 5 10 )
( 3 10 26 )
\endcode
// \n Previous: \ref configuration_files Next: \ref grouping_tagging \n
*/
//*************************************************************************************************
//**Grouping/Tagging*******************************************************************************
/*!\page grouping_tagging Grouping/Tagging
//
// \tableofcontents
//
//
// \n \section grouping_tagging_tagging_and_groups Tagging and Groups
// <hr>
//
// Sometimes it may be desirable to separate two or more distinct groups of vectors and matrices,
// for instance in order to allow operations only within a group and to prevent operations across
// groups. This goal can be achieved by means of tags. All vector and matrix classes provide a
// template parameter to specify a tag (for instance, the fourth template parameter for
// blaze::DynamicVector and the sixth template parameter for blaze::StaticVector):
\code
template< typename Type, bool TF, typename Alloc, typename Tag >
class DynamicVector;
template< typename Type, size_t N, bool TF, AlignmentFlag AF, PaddingFlag PF, typename Tag >
class StaticVector;
\endcode
// By default, all vectors and matrices are associated with blaze::Group0 (i.e. the tag is set
// to blaze::Group0). However, it is possible to explicitly associate vectors and matrices with
// different groups:
\code
using blaze::DynamicVector;
using blaze::AlignedAllocator;
using blaze::Group0;
using blaze::Group1;
using blaze::columnVector;
DynamicVector<int,columnVector,AlignedAllocator<int>,Group0> a0, b0;
DynamicVector<int,columnVector,AlignedAllocator<int>,Group1> a1, b1;
a0 + b0; // Compiles, a0 and b0 are in the same group (Group0)
a1 + b1; // Compiles, a1 and b1 are in the same group (Group1)
a0 + b1; // Compilation error: a0 and b1 are not in the same group
\endcode
// All vectors or matrices that are associated with the same group can be freely combined with any
// other vector or matrix from the same group. The attempt to combine vectors and matrices from
// different groups results in a compilation error.
//
//
// \n \section grouping_tagging_creating_new_groups Creating New Groups
// <hr>
//
// \b Blaze provides the tags for the ten predefined groups blaze::Group0 through blaze::Group9.
// In order to create further groups, all that needs to be done is to create new instances of the
// blaze::GroupTag class template:
\code
using Group10 = blaze::GroupTag<10>;
using Group11 = blaze::GroupTag<11>;
// ... further groups
\endcode
// All groups based on the blaze::GroupTag class template will be treated as separate groups just
// as the ten predefined groups.
//
//
// \n \section grouping_tagging_custom_tags Custom Tags
// <hr>
//
// Sometimes it is not enough to separate vectors and matrices into different groups, but it is
// required to define the interaction between different groups. This situation for instance occurs
// if a vector or matrix is associated with a physical quantity. This problem can be solved by
// using custom tags. The following example gives an impression on how to define the physics on
// meters (represented by the \c Meter tag) and seconds (represented by the \c Second tag):
\code
struct Meter {}; // Definition of the 'Meter' tag
struct Second {}; // Definition of the 'Second' tag
struct SquareMeter {}; // Definition of the 'SquareMeter' tag
struct MeterPerSecond {}; // Definition of the 'MeterPerSecond' tag
\endcode
// The \c Meter and \c Second tags are not associated with the blaze::GroupTag class template. For
// that reason, by default, it is not possible to perform any operation on an accordingly tagged
// vector or matrix. All required operations need to be declared explicitly in order to specify
// the resulting tag of an operation. In the following code example, this happens by declaring
// both the addition for the \c Meter tag and the \c Second tag, the multiplication between two
// \c Meter tags and the division between \c Meter and \c Second. Note that it is enough to
// declare the operations, it is not necessary to define them!
\code
Meter operator+( Meter , Meter ); // Enabling addition between 'Meter'
Second operator+( Second, Second ); // Enabling addition between 'Second'
SquareMeter operator*( Meter , Meter ); // Enabling multiplication between 'Meter'
MeterPerSecond operator/( Meter , Second ); // Enabling division between 'Meter' and 'Second'
\endcode
// With these declarations it is now possible to add meters and seconds, but not to subtract them
// (no subtraction operator was declared). Also, it is possible to multiply meters and to divide
// meters and seconds:
\code
const DynamicVector<int,rowVector,AlignedAllocator<int>,Meter> m1{ 1, 2, 3 };
const DynamicVector<int,rowVector,AlignedAllocator<int>,Meter> m2{ 4, 5, 6 };
const DynamicVector<int,rowVector,AlignedAllocator<int>,Second> s1{ 1, 2, 3 };
const DynamicVector<int,rowVector,AlignedAllocator<int>,Second> s2{ 4, 5, 6 };
m1 + m2; // Compiles and results in vector tagged with 'Meter'
s1 + s2; // Compiles and results in vector tagged with 'Second'
m1 - m2; // Compilation error: No subtraction defined for 'Meter'!
m1 + s2; // Compilation error: No addition between 'Meter' and 'Second' defined!
m1 * m2; // Compiles and results in vector tagged with 'SquareMeter'
m1 / s1; // Compiles and results in vector tagged with 'MeterPerSecond'
\endcode
// At this point it is possible to use the \c pow2() function for vectors and matrices tagged with
// \c Meter since \c pow2() is based on multiplication, which has already been declared. However,
// it is not possible to use the \c abs() function:
\code
pow2( m1 ); // Compiles and results in vector tagged with 'SquareMeter'
abs ( m1 ); // Compilation error: No 'abs()' declared for the 'Meter' tag
\endcode
// In order to enable the \c abs() function it also needs to be explicitly declared for the
// \c Meter tag:
\code
Meter abs( Meter ); // Enabling the 'abs()' function on 'Meter'
abs ( m1 ); // Compiles and results in vector tagged with 'Meter'
\endcode
// \n Previous: \ref vector_and_matrix_customization Next: \ref error_reporting_customization \n
*/
//*************************************************************************************************
//**Customization of the Error Reporting Mechanism*************************************************
/*!\page error_reporting_customization Customization of the Error Reporting Mechanism
//
// \tableofcontents
//
//
// \n \section error_reporting_background Background
// <hr>
//
// The default way of \b Blaze to report errors of any kind is to throw a standard exception.
// However, although in general this approach works well, in certain environments and under
// special circumstances exceptions may not be the mechanism of choice and a different error
// reporting mechanism may be desirable. For this reason, \b Blaze provides several macros,
// which enable the customization of the error reporting mechanism. Via these macros it is
// possible to replace the standard exceptions by some other exception type or a completely
// different approach to report errors.
//
//
// \n \section error_reporting_general_customization Customization of the Reporting Mechanism
// <hr>
//
// In some cases it might be necessary to adapt the entire error reporting mechanism and to
// replace it by some other means to signal failure. The primary macro for this purpose is the
// \c BLAZE_THROW macro:
\code
#define BLAZE_THROW( EXCEPTION ) \
throw EXCEPTION
\endcode
// This macro represents the default mechanism of the \b Blaze library to report errors of any
// kind. In order to customize the error reporing mechanism all that needs to be done is to
// define the macro prior to including any \b Blaze header file. This will cause the \b Blaze
// specific mechanism to be overridden. The following example demonstrates this by replacing
// exceptions by a call to a \c log() function and a direct call to abort:
\code
#define BLAZE_THROW( EXCEPTION ) \
log( "..." ); \
abort()
#include <blaze/Blaze.h>
\endcode
// Doing this will trigger a call to \c log() and an abort instead of throwing an exception
// whenever an error (such as an invalid argument) is detected.
//
// \note It is possible to execute several statements instead of executing a single statement to
// throw an exception. Also note that it is recommended to define the macro such that a subsequent
// semicolon is required!
//
// \warning This macro is provided with the intention to assist in adapting \b Blaze to special
// conditions and environments. However, the customization of the error reporting mechanism via
// this macro can have a significant effect on the library. Thus be advised to use the macro
// with due care!
//
//
// \n \section error_reporting_exception_customization Customization of the Type of Exceptions
// <hr>
//
// In addition to the customization of the entire error reporting mechanism it is also possible
// to customize the type of exceptions being thrown. This can be achieved by customizing any
// number of the following macros:
\code
#define BLAZE_THROW_BAD_ALLOC \
BLAZE_THROW( std::bad_alloc() )
#define BLAZE_THROW_LOGIC_ERROR( MESSAGE ) \
BLAZE_THROW( std::logic_error( MESSAGE ) )
#define BLAZE_THROW_INVALID_ARGUMENT( MESSAGE ) \
BLAZE_THROW( std::invalid_argument( MESSAGE ) )
#define BLAZE_THROW_LENGTH_ERROR( MESSAGE ) \
BLAZE_THROW( std::length_error( MESSAGE ) )
#define BLAZE_THROW_OUT_OF_RANGE( MESSAGE ) \
BLAZE_THROW( std::out_of_range( MESSAGE ) )
#define BLAZE_THROW_RUNTIME_ERROR( MESSAGE ) \
BLAZE_THROW( std::runtime_error( MESSAGE ) )
\endcode
// In order to customize the type of exception the according macro has to be defined prior to
// including any \b Blaze header file. This will override the \b Blaze default behavior. The
// following example demonstrates this by replacing \c std::invalid_argument by a custom
// exception type:
\code
class InvalidArgument
{
public:
InvalidArgument();
explicit InvalidArgument( const std::string& message );
// ...
};
#define BLAZE_THROW_INVALID_ARGUMENT( MESSAGE ) \
BLAZE_THROW( InvalidArgument( MESSAGE ) )
#include <blaze/Blaze.h>
\endcode
// By manually defining the macro, an \c InvalidArgument exception is thrown instead of a
// \c std::invalid_argument exception. Note that it is recommended to define the macro such
// that a subsequent semicolon is required!
//
// \warning These macros are provided with the intention to assist in adapting \b Blaze to
// special conditions and environments. However, the customization of the type of an exception
// via this macro may have an effect on the library. Thus be advised to use the macro with due
// care!
//
//
// \n \section error_reporting_special_errors Customization of Special Errors
// <hr>
//
// Last but not least it is possible to customize the error reporting for special kinds of errors.
// This can be achieved by customizing any number of the following macros:
\code
#define BLAZE_THROW_DIVISION_BY_ZERO( MESSAGE ) \
BLAZE_THROW_RUNTIME_ERROR( MESSAGE )
#define BLAZE_THROW_LAPACK_ERROR( MESSAGE ) \
BLAZE_THROW_RUNTIME_ERROR( MESSAGE )
\endcode
// As explained in the previous sections, in order to customize the handling of special errors
// the according macro has to be defined prior to including any \b Blaze header file. This will
// override the \b Blaze default behavior.
//
//
// \n Previous: \ref grouping_tagging Next: \ref blas_functions \n
*/
//*************************************************************************************************
//**BLAS Functions*********************************************************************************
/*!\page blas_functions BLAS Functions
//
// \tableofcontents
//
//
// For vector/vector, matrix/vector and matrix/matrix multiplications with large dense matrices
// \b Blaze relies on the efficiency of BLAS libraries. For this purpose, \b Blaze implements
// several convenient C++ wrapper functions for several BLAS functions. The following sections
// give a complete overview of all available BLAS level 1, 2 and 3 functions.
//
//
// \n \section blas_level_1 BLAS Level 1
// <hr>
//
// \subsection blas_level_1_dotu Dot Product (dotu)
//
// The following wrapper functions provide a generic interface for the BLAS functions for the
// dot product of two dense vectors (\c cblas_sdot(), \c cblas_ddot(), \c cblas_cdotu_sub(), and
// \c cblas_zdotu_sub()):
\code
namespace blaze {
float dotu( blas_int_t n, const float* x, blas_int_t incX, const float* y, blas_int_t incY );
double dotu( blas_int_t n, const double* x, blas_int_t incX, const double* y, blas_int_t incY );
complex<float> dotu( blas_int_t n, const complex<float>* x, blas_int_t incX,
const complex<float>* y, blas_int_t incY );
complex<double> dotu( blas_int_t n, const complex<double>* x, blas_int_t incX,
const complex<double>* y, blas_int_t incY );
template< typename VT1, bool TF1, typename VT2, bool TF2 >
ElementType_<VT1> dotu( const DenseVector<VT1,TF1>& x, const DenseVector<VT2,TF2>& y );
} // namespace blaze
\endcode
// \subsection blas_level_1_dotc Complex Conjugate Dot Product (dotc)
//
// The following wrapper functions provide a generic interface for the BLAS functions for the
// complex conjugate dot product of two dense vectors (\c cblas_sdot(), \c cblas_ddot(),
// \c cblas_cdotc_sub(), and \c cblas_zdotc_sub()):
\code
namespace blaze {
float dotc( blas_int_t n, const float* x, blas_int_t incX, const float* y, blas_int_t incY );
double dotc( blas_int_t n, const double* x, blas_int_t incX, const double* y, blas_int_t incY );
complex<float> dotc( blas_int_t n, const complex<float>* x, blas_int_t incX,
const complex<float>* y, blas_int_t incY );
complex<double> dotc( blas_int_t n, const complex<double>* x, blas_int_t incX,
const complex<double>* y, blas_int_t incY );
template< typename VT1, bool TF1, typename VT2, bool TF2 >
ElementType_<VT1> dotc( const DenseVector<VT1,TF1>& x, const DenseVector<VT2,TF2>& y );
} // namespace blaze
\endcode
// \subsection blas_level_1_axpy Axpy Product (axpy)
//
// The following wrapper functions provide a generic interface for the BLAS functions for the
// axpy product of two dense vectors (\c cblas_saxpy(), \c cblas_daxpy(), \c cblas_caxpy(), and
// \c cblas_zaxpy()):
\code
namespace blaze {
void axpy( blas_int_t n, float alpha, const float* x, blas_int_t incX, float* y, blas_int_t incY );
void axpy( blas_int_t n, double alpha, const double* x, blas_int_t incX, double* y, blas_int_t incY );
void axpy( blas_int_t n, complex<float> alpha, const complex<float>* x,
blas_int_t incX, complex<float>* y, blas_int_t incY );
void axpy( blas_int_t n, complex<double> alpha, const complex<double>* x,
blas_int_t incX, complex<double>* y, blas_int_t incY );
template< typename VT1, bool TF1, typename VT2, bool TF2, typename ST >
void axpy( const DenseVector<VT1,TF1>& x, const DenseVector<VT2,TF2>& y, ST alpha );
} // namespace blaze
\endcode
// \n \section blas_level_2 BLAS Level 2
// <hr>
//
// \subsection blas_level_2_gemv General Matrix/Vector Multiplication (gemv)
//
// The following wrapper functions provide a generic interface for the BLAS functions for the
// general matrix/vector multiplication (\c cblas_sgemv(), \c cblas_dgemv(), \c cblas_cgemv(),
// and \c cblas_zgemv()):
\code
namespace blaze {
void gemv( CBLAS_ORDER layout, CBLAS_TRANSPOSE transA, blas_int_t m, blas_int_t n,
float alpha, const float* A, blas_int_t lda, const float* x, blas_int_t incX,
float beta, float* y, blas_int_t incY );
void gemv( CBLAS_ORDER layout, CBLAS_TRANSPOSE transA, blas_int_t m, blas_int_t n,
double alpha, const double* A, blas_int_t lda, const double* x, blas_int_t incX,
double beta, double* y, blas_int_t incY );
void gemv( CBLAS_ORDER layout, CBLAS_TRANSPOSE transA, blas_int_t m, blas_int_t n,
complex<float> alpha, const complex<float>* A, blas_int_t lda,
const complex<float>* x, blas_int_t incX, complex<float> beta,
complex<float>* y, blas_int_t incY );
void gemv( CBLAS_ORDER layout, CBLAS_TRANSPOSE transA, blas_int_t m, blas_int_t n,
complex<double> alpha, const complex<double>* A, blas_int_t lda,
const complex<double>* x, blas_int_t incX, complex<double> beta,
complex<double>* y, blas_int_t incY );
} // namespace blaze
\endcode
// \n \subsection blas_level_2_trmv Triangular Matrix/Vector Multiplication (trmv)
//
// The following wrapper functions provide a generic interface for the BLAS functions for the
// matrix/vector multiplication with a triangular matrix (\c cblas_strmv(), \c cblas_dtrmv(),
// \c cblas_ctrmv(), and \c cblas_ztrmv()):
\code
namespace blaze {
void trmv( CBLAS_ORDER order, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag,
blas_int_t n, const float* A, blas_int_t lda, float* x, blas_int_t incX );
void trmv( CBLAS_ORDER order, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag,
blas_int_t n, const double* A, blas_int_t lda, double* x, blas_int_t incX );
void trmv( CBLAS_ORDER order, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag,
blas_int_t n, const complex<float>* A, blas_int_t lda, complex<float>* x, blas_int_t incX );
void trmv( CBLAS_ORDER order, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag,
blas_int_t n, const complex<double>* A, blas_int_t lda, complex<double>* x, blas_int_t incX );
template< typename VT, typename MT, bool SO >
void trmv( DenseVector<VT,false>& x, const DenseMatrix<MT,SO>& A, CBLAS_UPLO uplo );
template< typename VT, typename MT, bool SO >
void trmv( DenseVector<VT,true>& x, const DenseMatrix<MT,SO>& A, CBLAS_UPLO uplo );
} // namespace blaze
\endcode
// \n \section blas_level_3 BLAS Level 3
// <hr>
//
// \subsection blas_level_3_gemm General Matrix/Matrix Multiplication (gemm)
//
// The following wrapper functions provide a generic interface for the BLAS functions for the
// general matrix/matrix multiplication (\c cblas_sgemm(), \c cblas_dgemm(), \c cblas_cgemm(),
// and \c cblas_zgemm()):
\code
namespace blaze {
void gemm( CBLAS_ORDER order, CBLAS_TRANSPOSE transA, CBLAS_TRANSPOSE transB,
blas_int_t m, blas_int_t n, blas_int_t k, float alpha, const float* A,
blas_int_t lda, const float* B, blas_int_t ldb, float beta, float* C,
blas_int_t ldc );
void gemm( CBLAS_ORDER order, CBLAS_TRANSPOSE transA, CBLAS_TRANSPOSE transB,
blas_int_t m, blas_int_t n, blas_int_t k, double alpha, const double* A,
blas_int_t lda, const double* B, blas_int_t ldb, double beta, float* C,
blas_int_t ldc );
void gemm( CBLAS_ORDER order, CBLAS_TRANSPOSE transA, CBLAS_TRANSPOSE transB,
blas_int_t m, blas_int_t n, blas_int_t k, complex<float> alpha,
const complex<float>* A, blas_int_t lda, const complex<float>* B,
blas_int_t ldb, complex<float> beta, float* C, blas_int_t ldc );
void gemm( CBLAS_ORDER order, CBLAS_TRANSPOSE transA, CBLAS_TRANSPOSE transB,
blas_int_t m, blas_int_t n, blas_int_t k, complex<double> alpha,
const complex<double>* A, blas_int_t lda, const complex<double>* B,
blas_int_t ldb, complex<double> beta, float* C, blas_int_t ldc );x
} // namespace blaze
\endcode
// \n \subsection blas_level_3_trmm Triangular Matrix/Matrix Multiplication (trmm)
//
// The following wrapper functions provide a generic interface for the BLAS functions for the
// matrix/matrix multiplication with a triangular matrix (\c cblas_strmm(), \c cblas_dtrmm(),
// \c cblas_ctrmm(), and \c cblas_ztrmm()):
\code
namespace blaze {
void trmm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA,
CBLAS_DIAG diag, blas_int_t m, blas_int_t n, float alpha, const float* A,
blas_int_t lda, float* B, blas_int_t ldb );
void trmm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA,
CBLAS_DIAG diag, blas_int_t m, blas_int_t n, double alpha, const double* A,
blas_int_t lda, double* B, blas_int_t ldb );
void trmm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA,
CBLAS_DIAG diag, blas_int_t m, blas_int_t n, complex<float> alpha,
const complex<float>* A, blas_int_t lda, complex<float>* B, blas_int_t ldb );
void trmm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA,
CBLAS_DIAG diag, blas_int_t m, blas_int_t n, complex<double> alpha,
const complex<double>* A, blas_int_t lda, complex<double>* B, blas_int_t ldb );
template< typename MT1, bool SO1, typename MT2, bool SO2, typename ST >
void trmm( DenseMatrix<MT1,SO1>& B, const DenseMatrix<MT2,SO2>& A,
CBLAS_SIDE side, CBLAS_UPLO uplo, ST alpha );
} // namespace blaze
\endcode
// \n \subsection blas_level_3_trsm Triangular System Solver (trsm)
//
// The following wrapper functions provide a generic interface for the BLAS functions for solving
// a triangular system of equations (\c cblas_strsm(), \c cblas_dtrsm(), \c cblas_ctrsm(), and
// \c cblas_ztrsm()):
\code
namespace blaze {
void trsm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA,
CBLAS_DIAG diag, blas_int_t m, blas_int_t n, float alpha, const float* A,
blas_int_t lda, float* B, blas_int_t ldb );
void trsm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA,
CBLAS_DIAG diag, blas_int_t m, blas_int_t n, double alpha, const double* A,
blas_int_t lda, double* B, blas_int_t ldb );
void trsm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA,
CBLAS_DIAG diag, blas_int_t m, blas_int_t n, complex<float> alpha,
const complex<float>* A, blas_int_t lda, complex<float>* B, blas_int_t ldb );
void trsm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA,
CBLAS_DIAG diag, blas_int_t m, blas_int_t n, complex<double> alpha,
const complex<double>* A, blas_int_t lda, complex<double>* B, blas_int_t ldb );
template< typename MT, bool SO, typename VT, bool TF, typename ST >
void trsm( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b,
CBLAS_SIDE side, CBLAS_UPLO uplo, ST alpha );
template< typename MT1, bool SO1, typename MT2, bool SO2, typename ST >
void trsm( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B,
CBLAS_SIDE side, CBLAS_UPLO uplo, ST alpha );
} // namespace blaze
\endcode
// \n Previous: \ref error_reporting_customization Next: \ref lapack_functions \n
*/
//*************************************************************************************************
//**LAPACK Functions*******************************************************************************
/*!\page lapack_functions LAPACK Functions
//
// \tableofcontents
//
//
// \n \section lapack_introction Introduction
// <hr>
//
// The \b Blaze library makes extensive use of the LAPACK functionality for various compute tasks
// (including the decomposition, inversion and the computation of the determinant of dense matrices).
// For this purpose, \b Blaze implements several convenient C++ wrapper functions for all required
// LAPACK functions. The following sections give a complete overview of all available LAPACK wrapper
// functions. For more details on the individual LAPACK functions see the \b Blaze function
// documentation or the LAPACK online documentation browser:
//
// http://www.netlib.org/lapack/explore-html/
//
// Most of the wrapper functions are implemented as thin wrappers around LAPACK functions. They
// provide the parameters of the original LAPACK functions and thus provide maximum flexibility:
\code
using blaze::blas_int_t;
constexpr size_t N( 100UL );
blaze::DynamicMatrix<double,blaze::columnMajor> A( N, N );
// ... Initializing the matrix
const blas_int_t m ( numeric_cast<blas_int_t>( A.rows() ) ); // == N
const blas_int_t n ( numeric_cast<blas_int_t>( A.columns() ) ); // == N
const blas_int_t lda ( numeric_cast<blas_int_t>( A.spacing() ) ); // >= N
const blas_int_t lwork( n*lda );
const std::unique_ptr<blas_int_t[]> ipiv( new blas_int_t[N] ); // No initialization required
const std::unique_ptr<double[]> work( new double[N] ); // No initialization required
blas_int_t info( 0 );
getrf( m, n, A.data(), lda, ipiv.get(), &info ); // Reports failure via 'info'
getri( n, A.data(), lda, ipiv.get(), work.get(), lwork, &info ); // Reports failure via 'info'
\endcode
// In this context, \c blas_int_t is either a 32-bit or 64-bit signed integral type, depending
// on the setting of the \c BLAZE_BLAS_IS_64BIT compilation switch (see \ref blas_mode).
//
// Additionally, \b Blaze provides wrappers that provide a higher level of abstraction. These
// wrappers provide a maximum of convenience:
\code
using blaze::blas_int_t;
constexpr size_t N( 100UL );
blaze::DynamicMatrix<double,blaze::columnMajor> A( N, N );
// ... Initializing the matrix
const std::unique_ptr<blas_int_t[]> ipiv( new blas_int_t[N] ); // No initialization required
getrf( A, ipiv.get() ); // Cannot fail
getri( A, ipiv.get() ); // Reports failure via exception
\endcode
// \note All functions only work for general, non-adapted matrices with \c float, \c double,
// \c complex<float>, or \c complex<double> element type. The attempt to call the function with
// adaptors or matrices of any other element type results in a compile time error!
//
// \note All functions can only be used if a fitting LAPACK library is available and linked to
// the final executable. Otherwise a call to this function will result in a linker error.
//
// \note For performance reasons all functions do only provide the basic exception safety guarantee,
// i.e. in case an exception is thrown the given matrix may already have been modified.
//
//
// \n \section lapack_decomposition Matrix Decomposition
// <hr>
//
// The following functions decompose/factorize the given dense matrix. Based on this decomposition
// the matrix can be inverted or used to solve a linear system of equations.
//
//
// \n \subsection lapack_lu_decomposition LU Decomposition
//
// The following functions provide an interface for the LAPACK functions \c sgetrf(), \c dgetrf(),
// \c cgetrf(), and \c zgetrf(), which compute the LU decomposition for the given general matrix:
\code
namespace blaze {
void getrf( blas_int_t m, blas_int_t n, float* A, blas_int_t lda, blas_int_t* ipiv, blas_int_t* info );
void getrf( blas_int_t m, blas_int_t n, double* A, blas_int_t lda, blas_int_t* ipiv, blas_int_t* info );
void getrf( blas_int_t m, blas_int_t n, complex<float>* A, blas_int_t lda, blas_int_t* ipiv, blas_int_t* info );
void getrf( blas_int_t m, blas_int_t n, complex<double>* A, blas_int_t lda, blas_int_t* ipiv, blas_int_t* info );
template< typename MT, bool SO >
void getrf( DenseMatrix<MT,SO>& A, blas_int_t* ipiv );
} // namespace blaze
\endcode
// The decomposition has the form
\f[ A = P \cdot L \cdot U, \f]\n
// where \c P is a permutation matrix, \c L is a lower unitriangular matrix, and \c U is an upper
// triangular matrix. The resulting decomposition is stored within \a A: In case of a column-major
// matrix, \c L is stored in the lower part of \a A and \c U is stored in the upper part. The unit
// diagonal elements of \c L are not stored. In case \a A is a row-major matrix the result is
// transposed.
//
// \note The LU decomposition will never fail, even for singular matrices. However, in case of a
// singular matrix the resulting decomposition cannot be used for a matrix inversion or solving
// a linear system of equations.
//
//
// \n \subsection lapack_ldlt_decomposition LDLT Decomposition
//
// The following functions provide an interface for the LAPACK functions \c ssytrf(), \c dsytrf(),
// \c csytrf(), and \c zsytrf(), which compute the LDLT (Bunch-Kaufman) decomposition for the given
// symmetric indefinite matrix:
\code
namespace blaze {
void sytrf( char uplo, blas_int_t n, float* A, blas_int_t lda, blas_int_t* ipiv, float* work, blas_int_t lwork, blas_int_t* info );
void sytrf( char uplo, blas_int_t n, double* A, blas_int_t lda, blas_int_t* ipiv, double* work, blas_int_t lwork, blas_int_t* info );
void sytrf( char uplo, blas_int_t n, complex<float>* A, blas_int_t lda, blas_int_t* ipiv, complex<float>* work, blas_int_t lwork, blas_int_t* info );
void sytrf( char uplo, blas_int_t n, complex<double>* A, blas_int_t lda, blas_int_t* ipiv, complex<double>* work, blas_int_t lwork, blas_int_t* info );
template< typename MT, bool SO >
void sytrf( DenseMatrix<MT,SO>& A, char uplo, blas_int_t* ipiv );
} // namespace blaze
\endcode
// The decomposition has the form
\f[ A = U D U^{T} \texttt{ (if uplo = 'U'), or }
A = L D L^{T} \texttt{ (if uplo = 'L'), } \f]
// where \c U (or \c L) is a product of permutation and unit upper (lower) triangular matrices,
// and \c D is symmetric and block diagonal with 1-by-1 and 2-by-2 diagonal blocks. The resulting
// decomposition is stored within \a A: In case \a uplo is set to \c 'L' the result is stored in
// the lower part of the matrix and the upper part remains untouched, in case \a uplo is set to
// \c 'U' the result is stored in the upper part and the lower part remains untouched.
//
// \note The Bunch-Kaufman decomposition will never fail, even for singular matrices. However, in
// case of a singular matrix the resulting decomposition cannot be used for a matrix inversion or
// solving a linear system of equations.
//
//
// \n \subsection lapack_ldlh_decomposition LDLH Decomposition
//
// The following functions provide an interface for the LAPACK functions \c chetrf() and \c zsytrf(),
// which compute the LDLH (Bunch-Kaufman) decomposition for the given Hermitian indefinite matrix:
\code
namespace blaze {
void hetrf( char uplo, blas_int_t n, complex<float>* A, blas_int_t lda, blas_int_t* ipiv, complex<float>* work, blas_int_t lwork, blas_int_t* info );
void hetrf( char uplo, blas_int_t n, complex<double>* A, blas_int_t lda, blas_int_t* ipiv, complex<double>* work, blas_int_t lwork, blas_int_t* info );
template< typename MT, bool SO >
void hetrf( DenseMatrix<MT,SO>& A, char uplo, blas_int_t* ipiv );
} // namespace blaze
\endcode
// The decomposition has the form
\f[ A = U D U^{H} \texttt{ (if uplo = 'U'), or }
A = L D L^{H} \texttt{ (if uplo = 'L'), } \f]
// where \c U (or \c L) is a product of permutation and unit upper (lower) triangular matrices,
// and \c D is Hermitian and block diagonal with 1-by-1 and 2-by-2 diagonal blocks. The resulting
// decomposition is stored within \a A: In case \a uplo is set to \c 'L' the result is stored in
// the lower part of the matrix and the upper part remains untouched, in case \a uplo is set to
// \c 'U' the result is stored in the upper part and the lower part remains untouched.
//
// \note The Bunch-Kaufman decomposition will never fail, even for singular matrices. However, in
// case of a singular matrix the resulting decomposition cannot be used for a matrix inversion or
// solving a linear system of equations.
//
//
// \n \subsection lapack_llh_decomposition Cholesky Decomposition
//
// The following functions provide an interface for the LAPACK functions \c spotrf(), \c dpotrf(),
// \c cpotrf(), and \c zpotrf(), which compute the Cholesky (LLH) decomposition for the given
// positive definite matrix:
\code
namespace blaze {
void potrf( char uplo, blas_int_t n, float* A, blas_int_t lda, blas_int_t* info );
void potrf( char uplo, blas_int_t n, double* A, blas_int_t lda, blas_int_t* info );
void potrf( char uplo, blas_int_t n, complex<float>* A, blas_int_t lda, blas_int_t* info );
void potrf( char uplo, blas_int_t n, complex<double>* A, blas_int_t lda, blas_int_t* info );
template< typename MT, bool SO >
void potrf( DenseMatrix<MT,SO>& A, char uplo );
} // namespace blaze
\endcode
// The decomposition has the form
\f[ A = U^{T} U \texttt{ (if uplo = 'U'), or }
A = L L^{T} \texttt{ (if uplo = 'L'), } \f]
// where \c U is an upper triangular matrix and \c L is a lower triangular matrix. The Cholesky
// decomposition fails if the given matrix \a A is not a positive definite matrix. In this case
// a \c std::invalid_argument exception is thrown.
//
//
// \n \subsection lapack_qr_decomposition QR Decomposition
//
// The following functions provide an interface for the LAPACK functions \c sgeqrf(), \c dgeqrf(),
// \c cgeqrf(), and \c zgeqrf(), which compute the QR decomposition of the given general matrix:
\code
namespace blaze {
void geqrf( blas_int_t m, blas_int_t n, float* A, blas_int_t lda, float* tau, float* work, blas_int_t lwork, blas_int_t* info );
void geqrf( blas_int_t m, blas_int_t n, double* A, blas_int_t lda, double* tau, double* work, blas_int_t lwork, blas_int_t* info );
void geqrf( blas_int_t m, blas_int_t n, complex<float>* A, blas_int_t lda, complex<float>* tau, complex<float>* work, blas_int_t lwork, blas_int_t* info );
void geqrf( blas_int_t m, blas_int_t n, complex<double>* A, blas_int_t lda, complex<double>* tau, complex<double>* work, blas_int_t lwork, blas_int_t* info );
template< typename MT, bool SO >
void geqrf( DenseMatrix<MT,SO>& A, typename MT::ElementType* tau );
} // namespace blaze
\endcode
// The decomposition has the form
\f[ A = Q \cdot R, \f]
// where the \c Q is represented as a product of elementary reflectors
\f[ Q = H(1) H(2) . . . H(k) \texttt{, with k = min(m,n).} \f]
// Each H(i) has the form
\f[ H(i) = I - tau \cdot v \cdot v^T, \f]
// where \c tau is a real scalar, and \c v is a real vector with <tt>v(0:i-1) = 0</tt> and
// <tt>v(i) = 1</tt>. <tt>v(i+1:m)</tt> is stored on exit in <tt>A(i+1:m,i)</tt>, and \c tau
// in \c tau(i). Thus on exit the elements on and above the diagonal of the matrix contain the
// min(\a m,\a n)-by-\a n upper trapezoidal matrix \c R (\c R is upper triangular if \a m >= \a n);
// the elements below the diagonal, with the array \c tau, represent the orthogonal matrix \c Q as
// a product of min(\a m,\a n) elementary reflectors.
//
// The following functions provide an interface for the LAPACK functions \c sorgqr(), \c dorgqr(),
// \c sorg2r(), \c dorg2r(), \c cungqr(), \c zunqqr(), \c cung2r(), and \c zung2r(), which
// reconstruct the \c Q matrix from a QR decomposition:
\code
namespace blaze {
void orgqr( blas_int_t m, blas_int_t n, blas_int_t k, float* A, blas_int_t lda, const float* tau, float* work, blas_int_t lwork, blas_int_t* info );
void orgqr( blas_int_t m, blas_int_t n, blas_int_t k, double* A, blas_int_t lda, const double* tau, double* work, blas_int_t lwork, blas_int_t* info );
template< typename MT, bool SO >
void orgqr( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau );
void org2r( blas_int_t m, blas_int_t n, blas_int_t k, float* A, blas_int_t lda, const float* tau, float* work, blas_int_t* info );
void org2r( blas_int_t m, blas_int_t n, blas_int_t k, double* A, blas_int_t lda, const double* tau, double* work, blas_int_t* info );
template< typename MT, bool SO >
void org2r( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau );
void ungqr( blas_int_t m, blas_int_t n, blas_int_t k, complex<float>* A, blas_int_t lda, const complex<float>* tau, complex<float>* work, blas_int_t lwork, blas_int_t* info );
void ungqr( blas_int_t m, blas_int_t n, blas_int_t k, complex<double>* A, blas_int_t lda, const complex<double>* tau, complex<double>* work, blas_int_t lwork, blas_int_t* info );
template< typename MT, bool SO >
void ungqr( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau );
void ung2r( blas_int_t m, blas_int_t n, blas_int_t k, complex<float>* A, blas_int_t lda, const complex<float>* tau, complex<float>* work, blas_int_t* info );
void ung2r( blas_int_t m, blas_int_t n, blas_int_t k, complex<double>* A, blas_int_t lda, const complex<double>* tau, complex<double>* work, blas_int_t* info );
template< typename MT, bool SO >
void ung2r( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau );
} // namespace blaze
\endcode
// The following functions provide an interface for the LAPACK functions \c sormqr(), \c dormqr(),
// \c cunmqr(), and \c zunmqr(), which can be used to multiply a matrix with the \c Q matrix from
// a QR decomposition:
\code
namespace blaze {
void ormqr( char side, char trans, blas_int_t m, blas_int_t n, blas_int_t k, const float* A, blas_int_t lda, const float* tau, float* C, blas_int_t ldc, float* work, blas_int_t lwork, blas_int_t* info );
void ormqr( char side, char trans, blas_int_t m, blas_int_t n, blas_int_t k, const double* A, blas_int_t lda, const double* tau, double* C, blas_int_t ldc, double* work, blas_int_t lwork, blas_int_t* info );
template< typename MT1, bool SO1, typename MT2, bool SO2 >
void ormqr( DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A, char side, char trans, const ElementType_<MT2>* tau );
void unmqr( char side, char trans, blas_int_t m, blas_int_t n, blas_int_t k, const complex<float>* A, blas_int_t lda, const complex<float>* tau, complex<float>* C, blas_int_t ldc, complex<float>* work, blas_int_t lwork, blas_int_t* info );
void unmqr( char side, char trans, blas_int_t m, blas_int_t n, blas_int_t k, const complex<double>* A, blas_int_t lda, const complex<double>* tau, complex<double>* C, blas_int_t ldc, complex<double>* work, blas_int_t lwork, blas_int_t* info );
template< typename MT1, bool SO, typename MT2 >
void unmqr( DenseMatrix<MT1,SO>& C, DenseMatrix<MT2,SO>& A, char side, char trans, ElementType_<MT2>* tau );
} // namespace blaze
\endcode
// \n \subsection lapack_rq_decomposition RQ Decomposition
//
// The following functions provide an interface for the LAPACK functions \c sgerqf(), \c dgerqf(),
// \c cgerqf(), and \c zgerqf(), which compute the RQ decomposition of the given general matrix:
\code
namespace blaze {
void gerqf( blas_int_t m, blas_int_t n, float* A, blas_int_t lda, float* tau, float* work, blas_int_t lwork, blas_int_t* info );
void gerqf( blas_int_t m, blas_int_t n, double* A, blas_int_t lda, double* tau, double* work, blas_int_t lwork, blas_int_t* info );
void gerqf( blas_int_t m, blas_int_t n, complex<float>* A, blas_int_t lda, complex<float>* tau, complex<float>* work, blas_int_t lwork, blas_int_t* info );
void gerqf( blas_int_t m, blas_int_t n, complex<double>* A, blas_int_t lda, complex<double>* tau, complex<double>* work, blas_int_t lwork, blas_int_t* info );
template< typename MT, bool SO >
void gerqf( DenseMatrix<MT,SO>& A, typename MT::ElementType* tau );
} // namespace blaze
\endcode
// The decomposition has the form
\f[ A = R \cdot Q, \f]
// where the \c Q is represented as a product of elementary reflectors
\f[ Q = H(1) H(2) . . . H(k) \texttt{, with k = min(m,n).} \f]
// Each H(i) has the form
\f[ H(i) = I - tau \cdot v \cdot v^T, \f]
// where \c tau is a real scalar, and \c v is a real vector with <tt>v(n-k+i+1:n) = 0</tt> and
// <tt>v(n-k+i) = 1</tt>. <tt>v(1:n-k+i-1)</tt> is stored on exit in <tt>A(m-k+i,1:n-k+i-1)</tt>,
// and \c tau in \c tau(i). Thus in case \a m <= \a n, the upper triangle of the subarray
// <tt>A(1:m,n-m+1:n)</tt> contains the \a m-by-\a m upper triangular matrix \c R and in case
// \a m >= \a n, the elements on and above the (\a m-\a n)-th subdiagonal contain the \a m-by-\a n
// upper trapezoidal matrix \c R; the remaining elements in combination with the array \c tau
// represent the orthogonal matrix \c Q as a product of min(\a m,\a n) elementary reflectors.
//
// The following functions provide an interface for the LAPACK functions \c sorgrq(), \c dorgrq(),
// \c sorgr2(), \c dorgr2(), \c cungrq(), \c zunqrq(), \c cungr2(), and \c zunqr2(), which
// reconstruct the \c Q matrix from a RQ decomposition:
\code
namespace blaze {
void orgrq( blas_int_t m, blas_int_t n, blas_int_t k, float* A, blas_int_t lda, const float* tau, float* work, blas_int_t lwork, blas_int_t* info );
void orgrq( blas_int_t m, blas_int_t n, blas_int_t k, double* A, blas_int_t lda, const double* tau, double* work, blas_int_t lwork, blas_int_t* info );
template< typename MT, bool SO >
void orgrq( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau );
void orgr2( blas_int_t m, blas_int_t n, blas_int_t k, float* A, blas_int_t lda, const float* tau, float* work, blas_int_t* info );
void orgr2( blas_int_t m, blas_int_t n, blas_int_t k, double* A, blas_int_t lda, const double* tau, double* work, blas_int_t* info );
template< typename MT, bool SO >
void orgr2( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau );
void ungrq( blas_int_t m, blas_int_t n, blas_int_t k, complex<float>* A, blas_int_t lda, const complex<float>* tau, complex<float>* work, blas_int_t lwork, blas_int_t* info );
void ungrq( blas_int_t m, blas_int_t n, blas_int_t k, complex<double>* A, blas_int_t lda, const complex<double>* tau, complex<double>* work, blas_int_t lwork, blas_int_t* info );
template< typename MT, bool SO >
void ungrq( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau );
void ungr2( blas_int_t m, blas_int_t n, blas_int_t k, complex<float>* A, blas_int_t lda, const complex<float>* tau, complex<float>* work, blas_int_t* info );
void ungr2( blas_int_t m, blas_int_t n, blas_int_t k, complex<double>* A, blas_int_t lda, const complex<double>* tau, complex<double>* work, blas_int_t* info );
template< typename MT, bool SO >
void ungr2( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau );
} // namespace blaze
\endcode
// The following functions provide an interface for the LAPACK functions \c sormrq(), \c dormrq(),
// \c cunmrq(), and \c zunmrq(), which can be used to multiply a matrix with the \c Q matrix from
// a RQ decomposition:
\code
namespace blaze {
void ormrq( char side, char trans, blas_int_t m, blas_int_t n, blas_int_t k, const float* A, blas_int_t lda, const float* tau, float* C, blas_int_t ldc, float* work, blas_int_t lwork, blas_int_t* info );
void ormrq( char side, char trans, blas_int_t m, blas_int_t n, blas_int_t k, const double* A, blas_int_t lda, const double* tau, double* C, blas_int_t ldc, double* work, blas_int_t lwork, blas_int_t* info );
template< typename MT1, bool SO1, typename MT2, bool SO2 >
void ormrq( DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A, char side, char trans, const ElementType_<MT2>* tau );
void unmrq( char side, char trans, blas_int_t m, blas_int_t n, blas_int_t k, const complex<float>* A, blas_int_t lda, const complex<float>* tau, complex<float>* C, blas_int_t ldc, complex<float>* work, blas_int_t lwork, blas_int_t* info );
void unmrq( char side, char trans, blas_int_t m, blas_int_t n, blas_int_t k, const complex<double>* A, blas_int_t lda, const complex<double>* tau, complex<double>* C, blas_int_t ldc, complex<double>* work, blas_int_t lwork, blas_int_t* info );
template< typename MT1, bool SO, typename MT2 >
void unmrq( DenseMatrix<MT1,SO>& C, DenseMatrix<MT2,SO>& A, char side, char trans, ElementType_<MT2>* tau );
} // namespace blaze
\endcode
// \n \subsection lapack_ql_decomposition QL Decomposition
//
// The following functions provide an interface for the LAPACK functions \c sgeqlf(), \c dgeqlf(),
// \c cgeqlf(), and \c zgeqlf(), which compute the QL decomposition of the given general matrix:
\code
namespace blaze {
void geqlf( blas_int_t m, blas_int_t n, float* A, blas_int_t lda, float* tau, float* work, blas_int_t lwork, blas_int_t* info );
void geqlf( blas_int_t m, blas_int_t n, double* A, blas_int_t lda, double* tau, double* work, blas_int_t lwork, blas_int_t* info );
void geqlf( blas_int_t m, blas_int_t n, complex<float>* A, blas_int_t lda, complex<float>* tau, complex<float>* work, blas_int_t lwork, blas_int_t* info );
void geqlf( blas_int_t m, blas_int_t n, complex<double>* A, blas_int_t lda, complex<double>* tau, complex<double>* work, blas_int_t lwork, blas_int_t* info );
template< typename MT, bool SO >
void geqlf( DenseMatrix<MT,SO>& A, typename MT::ElementType* tau );
} // namespace blaze
\endcode
// The decomposition has the form
\f[ A = Q \cdot L, \f]
// where the \c Q is represented as a product of elementary reflectors
\f[ Q = H(k) . . . H(2) H(1) \texttt{, with k = min(m,n).} \f]
// Each H(i) has the form
\f[ H(i) = I - tau \cdot v \cdot v^T, \f]
// where \c tau is a real scalar, and \c v is a real vector with <tt>v(m-k+i+1:m) = 0</tt> and
// <tt>v(m-k+i) = 1</tt>. <tt>v(1:m-k+i-1)</tt> is stored on exit in <tt>A(1:m-k+i-1,n-k+i)</tt>,
// and \c tau in \c tau(i). Thus in case \a m >= \a n, the lower triangle of the subarray
// A(m-n+1:m,1:n) contains the \a n-by-\a n lower triangular matrix \c L and in case \a m <= \a n,
// the elements on and below the (\a n-\a m)-th subdiagonal contain the \a m-by-\a n lower
// trapezoidal matrix \c L; the remaining elements in combination with the array \c tau represent
// the orthogonal matrix \c Q as a product of min(\a m,\a n) elementary reflectors.
//
// The following functions provide an interface for the LAPACK functions \c sorgql(), \c dorgql(),
// \c sorg2l(), \c dorg2l(), \c cungql(), \c zungql(), \c cung2l(), and \c zung2l(), which
// reconstruct the \c Q matrix from an QL decomposition:
\code
namespace blaze {
void orgql( blas_int_t m, blas_int_t n, blas_int_t k, float* A, blas_int_t lda, const float* tau, float* work, blas_int_t lwork, blas_int_t* info );
void orgql( blas_int_t m, blas_int_t n, blas_int_t k, double* A, blas_int_t lda, const double* tau, double* work, blas_int_t lwork, blas_int_t* info );
template< typename MT, bool SO >
void orgql( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau );
void org2l( blas_int_t m, blas_int_t n, blas_int_t k, float* A, blas_int_t lda, const float* tau, float* work, blas_int_t* info );
void org2l( blas_int_t m, blas_int_t n, blas_int_t k, double* A, blas_int_t lda, const double* tau, double* work, blas_int_t* info );
template< typename MT, bool SO >
void org2l( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau );
void ungql( blas_int_t m, blas_int_t n, blas_int_t k, complex<float>* A, blas_int_t lda, const complex<float>* tau, complex<float>* work, blas_int_t lwork, blas_int_t* info );
void ungql( blas_int_t m, blas_int_t n, blas_int_t k, complex<double>* A, blas_int_t lda, const complex<double>* tau, complex<double>* work, blas_int_t lwork, blas_int_t* info );
template< typename MT, bool SO >
void ungql( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau );
void ung2l( blas_int_t m, blas_int_t n, blas_int_t k, complex<float>* A, blas_int_t lda, const complex<float>* tau, complex<float>* work, blas_int_t* info );
void ung2l( blas_int_t m, blas_int_t n, blas_int_t k, complex<double>* A, blas_int_t lda, const complex<double>* tau, complex<double>* work, blas_int_t* info );
template< typename MT, bool SO >
void ung2l( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau );
} // namespace blaze
\endcode
// The following functions provide an interface for the LAPACK functions \c sormql(), \c dormql(),
// \c cunmql(), and \c zunmql(), which can be used to multiply a matrix with the \c Q matrix from
// a QL decomposition:
\code
namespace blaze {
void ormql( char side, char trans, blas_int_t m, blas_int_t n, blas_int_t k, const float* A, blas_int_t lda, const float* tau, float* C, blas_int_t ldc, float* work, blas_int_t lwork, blas_int_t* info );
void ormql( char side, char trans, blas_int_t m, blas_int_t n, blas_int_t k, const double* A, blas_int_t lda, const double* tau, double* C, blas_int_t ldc, double* work, blas_int_t lwork, blas_int_t* info );
template< typename MT1, bool SO1, typename MT2, bool SO2 >
void ormql( DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A, char side, char trans, const ElementType_<MT2>* tau );
void unmql( char side, char trans, blas_int_t m, blas_int_t n, blas_int_t k, const complex<float>* A, blas_int_t lda, const complex<float>* tau, complex<float>* C, blas_int_t ldc, complex<float>* work, blas_int_t lwork, blas_int_t* info );
void unmql( char side, char trans, blas_int_t m, blas_int_t n, blas_int_t k, const complex<double>* A, blas_int_t lda, const complex<double>* tau, complex<double>* C, blas_int_t ldc, complex<double>* work, blas_int_t lwork, blas_int_t* info );
template< typename MT1, bool SO, typename MT2 >
void unmql( DenseMatrix<MT1,SO>& C, DenseMatrix<MT2,SO>& A, char side, char trans, ElementType_<MT2>* tau );
} // namespace blaze
\endcode
// \n \subsection lapack_lq_decomposition LQ Decomposition
//
// The following functions provide an interface for the LAPACK functions \c sgelqf(), \c dgelqf(),
// \c cgelqf(), and \c zgelqf(), which compute the LQ decomposition of the given general matrix:
\code
namespace blaze {
void gelqf( blas_int_t m, blas_int_t n, float* A, blas_int_t lda, float* tau, float* work, blas_int_t lwork, blas_int_t* info );
void gelqf( blas_int_t m, blas_int_t n, double* A, blas_int_t lda, double* tau, double* work, blas_int_t lwork, blas_int_t* info );
void gelqf( blas_int_t m, blas_int_t n, complex<float>* A, blas_int_t lda, complex<float>* tau, complex<float>* work, blas_int_t lwork, blas_int_t* info );
void gelqf( blas_int_t m, blas_int_t n, complex<double>* A, blas_int_t lda, complex<double>* tau, complex<double>* work, blas_int_t lwork, blas_int_t* info );
template< typename MT, bool SO >
void gelqf( DenseMatrix<MT,SO>& A, typename MT::ElementType* tau );
} // namespace blaze
\endcode
// The decomposition has the form
\f[ A = L \cdot Q, \f]
// where the \c Q is represented as a product of elementary reflectors
\f[ Q = H(k) . . . H(2) H(1) \texttt{, with k = min(m,n).} \f]
// Each H(i) has the form
\f[ H(i) = I - tau \cdot v \cdot v^T, \f]
// where \c tau is a real scalar, and \c v is a real vector with <tt>v(0:i-1) = 0</tt> and
// <tt>v(i) = 1</tt>. <tt>v(i+1:n)</tt> is stored on exit in <tt>A(i,i+1:n)</tt>, and \c tau
// in \c tau(i). Thus on exit the elements on and below the diagonal of the matrix contain the
// \a m-by-min(\a m,\a n) lower trapezoidal matrix \c L (\c L is lower triangular if \a m <= \a n);
// the elements above the diagonal, with the array \c tau, represent the orthogonal matrix \c Q
// as a product of min(\a m,\a n) elementary reflectors.
//
// The following functions provide an interface for the LAPACK functions \c sorglq(), \c dorglq(),
// \c sorgl2(), \c dorgl2(), \c cunglq(), \c zunqlq(), \c cungl2(), and \c zunql2(), which
// reconstruct the \c Q matrix from an LQ decomposition:
\code
namespace blaze {
void orglq( blas_int_t m, blas_int_t n, blas_int_t k, float* A, blas_int_t lda, const float* tau, float* work, blas_int_t lwork, blas_int_t* info );
void orglq( blas_int_t m, blas_int_t n, blas_int_t k, double* A, blas_int_t lda, const double* tau, double* work, blas_int_t lwork, blas_int_t* info );
template< typename MT, bool SO >
void orglq( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau );
void orgl2( blas_int_t m, blas_int_t n, blas_int_t k, float* A, blas_int_t lda, const float* tau, float* work, blas_int_t* info );
void orgl2( blas_int_t m, blas_int_t n, blas_int_t k, double* A, blas_int_t lda, const double* tau, double* work, blas_int_t* info );
template< typename MT, bool SO >
void orgl2( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau );
void unglq( blas_int_t m, blas_int_t n, blas_int_t k, complex<float>* A, blas_int_t lda, const complex<float>* tau, complex<float>* work, blas_int_t lwork, blas_int_t* info );
void unglq( blas_int_t m, blas_int_t n, blas_int_t k, complex<double>* A, blas_int_t lda, const complex<double>* tau, complex<double>* work, blas_int_t lwork, blas_int_t* info );
template< typename MT, bool SO >
void unglq( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau );
void ungl2( blas_int_t m, blas_int_t n, blas_int_t k, complex<float>* A, blas_int_t lda, const complex<float>* tau, complex<float>* work, blas_int_t* info );
void ungl2( blas_int_t m, blas_int_t n, blas_int_t k, complex<double>* A, blas_int_t lda, const complex<double>* tau, complex<double>* work, blas_int_t* info );
template< typename MT, bool SO >
void ungl2( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau );
} // namespace blaze
\endcode
// The following functions provide an interface for the LAPACK functions \c sormlq(), \c dormlq(),
// \c cunmlq(), and \c zunmlq(), which can be used to multiply a matrix with the \c Q matrix from
// a LQ decomposition:
\code
namespace blaze {
void ormlq( char side, char trans, blas_int_t m, blas_int_t n, blas_int_t k, const float* A, blas_int_t lda, const float* tau, float* C, blas_int_t ldc, float* work, blas_int_t lwork, blas_int_t* info );
void ormlq( char side, char trans, blas_int_t m, blas_int_t n, blas_int_t k, const double* A, blas_int_t lda, const double* tau, double* C, blas_int_t ldc, double* work, blas_int_t lwork, blas_int_t* info );
template< typename MT1, bool SO1, typename MT2, bool SO2 >
void ormlq( DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A, char side, char trans, const ElementType_<MT2>* tau );
void unmlq( char side, char trans, blas_int_t m, blas_int_t n, blas_int_t k, const complex<float>* A, blas_int_t lda, const complex<float>* tau, complex<float>* C, blas_int_t ldc, complex<float>* work, blas_int_t lwork, blas_int_t* info );
void unmlq( char side, char trans, blas_int_t m, blas_int_t n, blas_int_t k, const complex<double>* A, blas_int_t lda, const complex<double>* tau, complex<double>* C, blas_int_t ldc, complex<double>* work, blas_int_t lwork, blas_int_t* info );
template< typename MT1, bool SO, typename MT2 >
void unmlq( DenseMatrix<MT1,SO>& C, DenseMatrix<MT2,SO>& A, char side, char trans, ElementType_<MT2>* tau );
} // namespace blaze
\endcode
// \n \section lapack_inversion Matrix Inversion
// <hr>
//
// Given a matrix that has already been decomposed, the following functions can be used to invert
// the matrix in-place.
//
//
// \n \subsection lapack_lu_inversion LU-based Inversion
//
// The following functions provide an interface for the LAPACK functions \c sgetri(), \c dgetri(),
// \c cgetri(), and \c zgetri(), which invert a general matrix that has already been decomposed by
// an \ref lapack_lu_decomposition :
\code
namespace blaze {
void getri( blas_int_t n, float* A, blas_int_t lda, const blas_int_t* ipiv, float* work, blas_int_t lwork, blas_int_t* info );
void getri( blas_int_t n, double* A, blas_int_t lda, const blas_int_t* ipiv, double* work, blas_int_t lwork, blas_int_t* info );
void getri( blas_int_t n, complex<float>* A, blas_int_t lda, const blas_int_t* ipiv, complex<float>* work, blas_int_t lwork, blas_int_t* info );
void getri( blas_int_t n, complex<double>* A, blas_int_t lda, const blas_int_t* ipiv, complex<double>* work, blas_int_t lwork, blas_int_t* info );
template< typename MT, bool SO >
void getri( DenseMatrix<MT,SO>& A, const blas_int_t* ipiv );
} // namespace blaze
\endcode
// The functions fail if ...
//
// - ... the given matrix is not a square matrix;
// - ... the given matrix is singular and not invertible.
//
// The first four functions report failure via the \c info argument, the fifth function throws a
// \c std::invalid_argument exception in case of an error.
//
//
// \n \subsection lapack_ldlt_inversion LDLT-based Inversion
//
// The following functions provide an interface for the LAPACK functions \c ssytri(), \c dsytri(),
// \c csytri(), and \c zsytri(), which invert a symmetric indefinite matrix that has already been
// decomposed by an \ref lapack_ldlt_decomposition :
\code
namespace blaze {
void sytri( char uplo, blas_int_t n, float* A, blas_int_t lda, const blas_int_t* ipiv, float* work, blas_int_t* info );
void sytri( char uplo, blas_int_t n, double* A, blas_int_t lda, const blas_int_t* ipiv, double* work, blas_int_t* info );
void sytri( char uplo, blas_int_t n, complex<float>* A, blas_int_t lda, const blas_int_t* ipiv, complex<float>* work, blas_int_t* info );
void sytri( char uplo, blas_int_t n, complex<double>* A, blas_int_t lda, const blas_int_t* ipiv, complex<double>* work, blas_int_t* info );
template< typename MT, bool SO >
void sytri( DenseMatrix<MT,SO>& A, char uplo, const blas_int_t* ipiv );
} // namespace blaze
\endcode
// The functions fail if ...
//
// - ... the given matrix is not a square matrix;
// - ... the given matrix is singular and not invertible.
//
// The first four functions report failure via the \c info argument, the fifth function throws a
// \c std::invalid_argument exception in case of an error.
//
//
// \n \subsection lapack_ldlh_inversion LDLH-based Inversion
//
// The following functions provide an interface for the LAPACK functions \c chetri() and
// \c zhetri(), which invert an Hermitian indefinite matrix that has already been decomposed by
// an \ref lapack_ldlh_decomposition :
\code
namespace blaze {
void hetri( char uplo, blas_int_t n, complex<float>* A, blas_int_t lda, const blas_int_t* ipiv, complex<float>* work, blas_int_t* info );
void hetri( char uplo, blas_int_t n, complex<double>* A, blas_int_t lda, const blas_int_t* ipiv, complex<double>* work, blas_int_t* info );
template< typename MT, bool SO >
void hetri( DenseMatrix<MT,SO>& A, char uplo, const blas_int_t* ipiv );
} // namespace blaze
\endcode
// The functions fail if ...
//
// - ... the given matrix is not a square matrix;
// - ... the given matrix is singular and not invertible.
//
// The first four functions report failure via the \c info argument, the fifth function throws a
// \c std::invalid_argument exception in case of an error.
//
//
// \n \subsection lapack_llh_inversion Cholesky-based Inversion
//
// The following functions provide an interface for the LAPACK functions \c spotri(), \c dpotri(),
// \c cpotri(), and \c zpotri(), which invert a positive definite matrix that has already been
// decomposed by an \ref lapack_llh_decomposition :
\code
namespace blaze {
void potri( char uplo, blas_int_t n, float* A, blas_int_t lda, blas_int_t* info );
void potri( char uplo, blas_int_t n, double* A, blas_int_t lda, blas_int_t* info );
void potri( char uplo, blas_int_t n, complex<float>* A, blas_int_t lda, blas_int_t* info );
void potri( char uplo, blas_int_t n, complex<double>* A, blas_int_t lda, blas_int_t* info );
template< typename MT, bool SO >
void potri( DenseMatrix<MT,SO>& A, char uplo );
} // namespace blaze
\endcode
// The functions fail if ...
//
// - ... the given matrix is not a square matrix;
// - ... the given \a uplo argument is neither 'L' nor 'U';
// - ... the given matrix is singular and not invertible.
//
// The first four functions report failure via the \c info argument, the fifth function throws a
// \c std::invalid_argument exception in case of an error.
//
//
// \n \subsection lapack_triangular_inversion Inversion of Triangular Matrices
//
// The following functions provide an interface for the LAPACK functions \c strtri(), \c dtrtri(),
// \c ctrtri(), and \c ztrtri(), which invert the given triangular matrix in-place:
\code
namespace blaze {
void trtri( char uplo, char diag, blas_int_t n, float* A, blas_int_t lda, blas_int_t* info );
void trtri( char uplo, char diag, blas_int_t n, double* A, blas_int_t lda, blas_int_t* info );
void trtri( char uplo, char diag, blas_int_t n, complex<float>* A, blas_int_t lda, blas_int_t* info );
void trtri( char uplo, char diag, blas_int_t n, complex<double>* A, blas_int_t lda, blas_int_t* info );
template< typename MT, bool SO >
void trtri( DenseMatrix<MT,SO>& A, char uplo, char diag );
} // namespace blaze
\endcode
// The functions fail if ...
//
// - ... the given matrix is not a square matrix;
// - ... the given \a uplo argument is neither 'L' nor 'U';
// - ... the given \a diag argument is neither 'U' nor 'N';
// - ... the given matrix is singular and not invertible.
//
// The first four functions report failure via the \c info argument, the fifth function throws a
// \c std::invalid_argument exception in case of an error.
//
//
// \n \section lapack_substitution Substitution
// <hr>
//
// Given a matrix that has already been decomposed the following functions can be used to perform
// the forward/backward substitution step to compute the solution to a system of linear equations.
// Note that depending on the storage order of the system matrix and the given right-hand side the
// functions solve different equation systems:
//
// Single right-hand side:
// - \f$ A *x=b \f$ if \a A is column-major
// - \f$ A^T*x=b \f$ if \a A is row-major
//
// Multiple right-hand sides:
// - \f$ A *X =B \f$ if both \a A and \a B are column-major
// - \f$ A^T*X =B \f$ if \a A is row-major and \a B is column-major
// - \f$ A *X^T=B^T \f$ if \a A is column-major and \a B is row-major
// - \f$ A^T*X^T=B^T \f$ if both \a A and \a B are row-major
//
// In this context the general system matrix \a A is a n-by-n matrix that has already been
// factorized by the according decomposition function, \a x and \a b are n-dimensional vectors
// and \a X and \a B are either row-major m-by-n matrices or column-major n-by-m matrices.
//
//
// \n \subsection lapack_lu_substitution LU-based Substitution
//
// The following functions provide an interface for the LAPACK functions \c sgetrs(), \c dgetrs(),
// \c cgetrs(), and \c zgetrs(), which perform the substitution step for a general matrix that has
// already been decomposed by an \ref lapack_lu_decomposition :
\code
namespace blaze {
void getrs( char trans, blas_int_t n, blas_int_t nrhs, const float* A, blas_int_t lda, const blas_int_t* ipiv, float* B, blas_int_t ldb, blas_int_t* info );
void getrs( char trans, blas_int_t n, blas_int_t nrhs, const double* A, blas_int_t lda, const blas_int_t* ipiv, double* B, blas_int_t ldb, blas_int_t* info );
void getrs( char trans, blas_int_t n, const complex<float>* A, blas_int_t lda, const blas_int_t* ipiv, complex<float>* B, blas_int_t ldb, blas_int_t* info );
void getrs( char trans, blas_int_t n, const complex<double>* A, blas_int_t lda, const blas_int_t* ipiv, complex<double>* B, blas_int_t ldb, blas_int_t* info );
template< typename MT, bool SO, typename VT, bool TF >
void getrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char trans, const blas_int_t* ipiv );
template< typename MT1, bool SO1, typename MT2, bool SO2 >
void getrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char trans, const blas_int_t* ipiv );
} // namespace blaze
\endcode
// Note that depending on the storage order of the system matrix and the given right-hand side the
// functions solve different equation systems (see \ref lapack_substitution). If the function exits
// successfully, the vector \a b or the matrix \a B contain the solution(s) of the linear system of
// equations. The functions fail if ...
//
// - ... the given system matrix is not a square matrix;
// - ... the given \a trans argument is neither 'N' nor 'T' nor 'C';
// - ... the sizes of the two given matrices do not match.
//
// The first four functions report failure via the \c info argument, the last two functions throw
// a \c std::invalid_argument exception in case of an error.
//
//
// \n \subsection lapack_ldlt_substitution LDLT-based Substitution
//
// The following functions provide an interface for the LAPACK functions \c ssytrs(), \c dsytrs(),
// \c csytrs(), and \c zsytrs(), which perform the substitution step for a symmetric indefinite
// matrix that has already been decomposed by an \ref lapack_ldlt_decomposition :
\code
namespace blaze {
void sytrs( char uplo, blas_int_t n, blas_int_t nrhs, const float* A, blas_int_t lda, const blas_int_t* ipiv, float* B, blas_int_t ldb, blas_int_t* info );
void sytrs( char uplo, blas_int_t n, blas_int_t nrhs, const double* A, blas_int_t lda, const blas_int_t* ipiv, double* B, blas_int_t ldb, blas_int_t* info );
void sytrs( char uplo, blas_int_t n, blas_int_t nrhs, const complex<float>* A, blas_int_t lda, const blas_int_t* ipiv, complex<float>* B, blas_int_t ldb, blas_int_t* info );
void sytrs( char uplo, blas_int_t n, blas_int_t nrhs, const complex<double>* A, blas_int_t lda, const blas_int_t* ipiv, complex<double>* B, blas_int_t ldb, blas_int_t* info );
template< typename MT, bool SO, typename VT, bool TF >
void sytrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, const blas_int_t* ipiv );
template< typename MT1, bool SO1, typename MT2, bool SO2 >
void sytrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, const blas_int_t* ipiv );
} // namespace blaze
\endcode
// Note that depending on the storage order of the system matrix and the given right-hand side the
// functions solve different equation systems (see \ref lapack_substitution). If the function exits
// successfully, the vector \a b or the matrix \a B contain the solution(s) of the linear system of
// equations. The functions fail if ...
//
// - ... the given system matrix is not a square matrix;
// - ... the given \a uplo argument is neither 'L' nor 'U';
// - ... the sizes of the two given matrices do not match.
//
// The first four functions report failure via the \c info argument, the last two functions throw
// a \c std::invalid_argument exception in case of an error.
//
//
// \n \subsection lapack_ldlh_substitution LDLH-based Substitution
//
// The following functions provide an interface for the LAPACK functions \c chetrs(), and \c zhetrs(),
// which perform the substitution step for an Hermitian indefinite matrix that has already been
// decomposed by an \ref lapack_ldlh_decomposition :
\code
namespace blaze {
void hetrs( char uplo, blas_int_t n, blas_int_t nrhs, const complex<float>* A, blas_int_t lda, const blas_int_t* ipiv, complex<float>* B, blas_int_t ldb, blas_int_t* info );
void hetrs( char uplo, blas_int_t n, blas_int_t nrhs, const complex<double>* A, blas_int_t lda, const blas_int_t* ipiv, complex<double>* B, blas_int_t ldb, blas_int_t* info );
template< typename MT, bool SO, typename VT, bool TF >
void hetrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, const blas_int_t* ipiv );
template< typename MT1, bool SO1, typename MT2, bool SO2 >
void hetrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, const blas_int_t* ipiv );
} // namespace blaze
\endcode
// Note that depending on the storage order of the system matrix and the given right-hand side the
// functions solve different equation systems (see \ref lapack_substitution). If the function exits
// successfully, the vector \a b or the matrix \a B contain the solution(s) of the linear system of
// equations. The functions fail if ...
//
// - ... the given system matrix is not a square matrix;
// - ... the given \a uplo argument is neither 'L' nor 'U';
// - ... the sizes of the two given matrices do not match.
//
// The first two functions report failure via the \c info argument, the last two functions throw
// a \c std::invalid_argument exception in case of an error.
//
//
// \n \subsection lapack_llh_substitution Cholesky-based Substitution
//
// The following functions provide an interface for the LAPACK functions \c spotrs(), \c dpotrs(),
// \c cpotrs(), and \c zpotrs(), which perform the substitution step for a positive definite matrix
// that has already been decomposed by an \ref lapack_llh_decomposition :
\code
namespace blaze {
void potrs( char uplo, blas_int_t n, blas_int_t nrhs, const float* A, blas_int_t lda, float* B, blas_int_t ldb, blas_int_t* info );
void potrs( char uplo, blas_int_t n, blas_int_t nrhs, const double* A, blas_int_t lda, double* B, blas_int_t ldb, blas_int_t* info );
void potrs( char uplo, blas_int_t n, blas_int_t nrhs, const complex<float>* A, blas_int_t lda, complex<float>* B, blas_int_t ldb, blas_int_t* info );
void potrs( char uplo, blas_int_t n, blas_int_t nrhs, const complex<double>* A, blas_int_t lda, complex<double>* B, blas_int_t ldb, blas_int_t* info );
template< typename MT, bool SO, typename VT, bool TF >
void potrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo );
template< typename MT1, bool SO1, typename MT2, bool SO2 >
void potrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo );
} // namespace blaze
\endcode
// Note that depending on the storage order of the system matrix and the given right-hand side the
// functions solve different equation systems (see \ref lapack_substitution). If the function exits
// successfully, the vector \a b or the matrix \a B contain the solution(s) of the linear system of
// equations. The functions fail if ...
//
// - ... the given system matrix is not a square matrix;
// - ... the given \a uplo argument is neither 'L' nor 'U';
// - ... the sizes of the two given matrices do not match.
//
// The first two functions report failure via the \c info argument, the last two functions throw
// a \c std::invalid_argument exception in case of an error.
//
//
// \n \subsection lapack_triangular_substitution Substitution for Triangular Matrices
//
// The following functions provide an interface for the LAPACK functions \c strtrs(), \c dtrtrs(),
// \c ctrtrs(), and \c ztrtrs(), which perform the substitution step for a triangular matrix:
\code
namespace blaze {
void trtrs( char uplo, char trans, char diag, blas_int_t n, blas_int_t nrhs, const float* A, blas_int_t lda, float* B, blas_int_t ldb, blas_int_t* info );
void trtrs( char uplo, char trans, char diag, blas_int_t n, blas_int_t nrhs, const double* A, blas_int_t lda, double* B, blas_int_t ldb, blas_int_t* info );
void trtrs( char uplo, char trans, char diag, blas_int_t n, blas_int_t nrhs, const complex<float>* A, blas_int_t lda, complex<float>* B, blas_int_t ldb, blas_int_t* info );
void trtrs( char uplo, char trans, char diag, blas_int_t n, blas_int_t nrhs, const complex<double>* A, blas_int_t lda, complex<double>* B, blas_int_t ldb, blas_int_t* info );
template< typename MT, bool SO, typename VT, bool TF >
void trtrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, char trans, char diag );
template< typename MT1, bool SO1, typename MT2, bool SO2 >
void trtrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, char trans, char diag );
} // namespace blaze
\endcode
// Note that depending on the storage order of the system matrix and the given right-hand side the
// functions solve different equation systems (see \ref lapack_substitution). If the function exits
// successfully, the vector \a b or the matrix \a B contain the solution(s) of the linear system of
// equations. The functions fail if ...
//
// - ... the given system matrix is not a square matrix;
// - ... the given \a uplo argument is neither 'L' nor 'U';
// - ... the given \a trans argument is neither 'N' nor 'T' nor 'C';
// - ... the given \a diag argument is neither 'U' nor 'N';
// - ... the sizes of the two given matrices do not match.
//
// The first four functions report failure via the \c info argument, the last two functions throw
// a \c std::invalid_argument exception in case of an error.
//
//
// \n \section lapack_linear_system_solver Linear System Solver
// <hr>
//
// The following functions represent compound functions that perform both the decomposition step
// as well as the substitution step to compute the solution to a system of linear equations. Note
// that depending on the storage order of the system matrix and the given right-hand side the
// functions solve different equation systems:
//
// Single right-hand side:
// - \f$ A *x=b \f$ if \a A is column-major
// - \f$ A^T*x=b \f$ if \a A is row-major
//
// Multiple right-hand sides:
// - \f$ A *X =B \f$ if both \a A and \a B are column-major
// - \f$ A^T*X =B \f$ if \a A is row-major and \a B is column-major
// - \f$ A *X^T=B^T \f$ if \a A is column-major and \a B is row-major
// - \f$ A^T*X^T=B^T \f$ if both \a A and \a B are row-major
//
// In this context the general system matrix \a A is a n-by-n matrix that has already been
// factorized by the according decomposition function, \a x and \a b are n-dimensional vectors
// and \a X and \a B are either row-major m-by-n matrices or column-major n-by-m matrices.
//
//
// \subsection lapack_lu_linear_system_solver LU-based Linear System Solver
//
// The following functions provide an interface for the LAPACK functions \c sgesv(), \c dgesv(),
// \c cgesv(), and \c zgesv(), which combine an \ref lapack_lu_decomposition and the according
// \ref lapack_lu_substitution :
\code
namespace blaze {
void gesv( blas_int_t n, blas_int_t nrhs, float* A, blas_int_t lda, blas_int_t* ipiv, float* B, blas_int_t ldb, blas_int_t* info );
void gesv( blas_int_t n, blas_int_t nrhs, double* A, blas_int_t lda, blas_int_t* ipiv, double* B, blas_int_t ldb, blas_int_t* info );
void gesv( blas_int_t n, blas_int_t nrhs, complex<float>* A, blas_int_t lda, blas_int_t* ipiv, complex<float>* B, blas_int_t ldb, blas_int_t* info );
void gesv( blas_int_t n, blas_int_t nrhs, complex<double>* A, blas_int_t lda, blas_int_t* ipiv, complex<double>* B, blas_int_t ldb, blas_int_t* info );
template< typename MT, bool SO, typename VT, bool TF >
void gesv( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, blas_int_t* ipiv );
template< typename MT1, bool SO1, typename MT2, bool SO2 >
void gesv( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, blas_int_t* ipiv );
} // namespace blaze
\endcode
// Note that depending on the storage order of the system matrix and the given right-hand side
// the functions solve different equation systems (see \ref lapack_linear_system_solver). If
// the function exits successfully, the vector \a b or the matrix \a B contain the solution(s)
// of the linear system of equations and \a A has been decomposed by means of an
// \ref lapack_lu_decomposition.
//
// The functions fail if ...
//
// - ... the given system matrix is not a square matrix;
// - ... the given system matrix is singular and not invertible.
//
// The first four functions report failure via the \c info argument, the fifth function throws a
// \c std::invalid_argument exception in case of an error.
//
//
// \n \subsection lapack_ldlt_linear_system_solver LDLT-based Linear System Solver
//
// The following functions provide an interface for the LAPACK functions \c ssysv(), \c dsysv(),
// \c csysv(), and \c zsysv(), which combine an \ref lapack_ldlt_decomposition and the according
// \ref lapack_ldlt_substitution :
\code
namespace blaze {
void sysv( char uplo, blas_int_t n, blas_int_t nrhs, float* A, blas_int_t lda, blas_int_t* ipiv, float* B, blas_int_t ldb, float* work, blas_int_t lwork, blas_int_t* info );
void sysv( char uplo, blas_int_t n, blas_int_t nrhs, double* A, blas_int_t lda, blas_int_t* ipiv, double* B, blas_int_t ldb, double* work, blas_int_t lwork, blas_int_t* info );
void sysv( char uplo, blas_int_t n, blas_int_t nrhs, complex<float>* A, blas_int_t lda, blas_int_t* ipiv, complex<float>* B, blas_int_t ldb, complex<float>* work, blas_int_t lwork, blas_int_t* info );
void sysv( char uplo, blas_int_t n, blas_int_t nrhs, complex<double>* A, blas_int_t lda, blas_int_t* ipiv, complex<double>* B, blas_int_t ldb, complex<double>* work, blas_int_t lwork, blas_int_t* info );
template< typename MT, bool SO, typename VT, bool TF >
void sysv( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, blas_int_t* ipiv );
template< typename MT1, bool SO1, typename MT2, bool SO2 >
void sysv( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, blas_int_t* ipiv );
} // namespace blaze
\endcode
// Note that depending on the storage order of the system matrix and the given right-hand side
// the functions solve different equation systems (see \ref lapack_linear_system_solver). If
// the function exits successfully, the vector \a b or the matrix \a B contain the solution(s)
// of the linear system of equations and \a A has been decomposed by means of an
// \ref lapack_ldlt_decomposition.
//
// The functions fail if ...
//
// - ... the given system matrix is not a square matrix;
// - ... the given \a uplo argument is neither 'L' nor 'U';
// - ... the sizes of the two given matrices do not match;
// - ... the given system matrix is singular and not invertible.
//
// The first four functions report failure via the \c info argument, the fifth function throws a
// \c std::invalid_argument exception in case of an error.
//
//
// \n \subsection lapack_ldlh_linear_system_solver LDLH-based Linear System Solver
//
// The following functions provide an interface for the LAPACK functions \c shesv(), \c dhesv(),
// \c chesv(), and \c zhesv(), which combine an \ref lapack_ldlh_decomposition and the according
// \ref lapack_ldlh_substitution :
\code
namespace blaze {
void hesv( char uplo, blas_int_t n, blas_int_t nrhs, complex<float>* A, blas_int_t lda, blas_int_t* ipiv, complex<float>* B, blas_int_t ldb, complex<float>* work, blas_int_t lwork, blas_int_t* info );
void hesv( char uplo, blas_int_t n, blas_int_t nrhs, complex<double>* A, blas_int_t lda, blas_int_t* ipiv, complex<double>* B, blas_int_t ldb, complex<double>* work, blas_int_t lwork, blas_int_t* info );
template< typename MT, bool SO, typename VT, bool TF >
void hesv( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, blas_int_t* ipiv );
template< typename MT1, bool SO1, typename MT2, bool SO2 >
void hesv( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, blas_int_t* ipiv );
} // namespace blaze
\endcode
// Note that depending on the storage order of the system matrix and the given right-hand side
// the functions solve different equation systems (see \ref lapack_linear_system_solver). If
// the function exits successfully, the vector \a b or the matrix \a B contain the solution(s)
// of the linear system of equations and \a A has been decomposed by means of an
// \ref lapack_ldlh_decomposition.
//
// The functions fail if ...
//
// - ... the given system matrix is not a square matrix;
// - ... the given \a uplo argument is neither 'L' nor 'U';
// - ... the sizes of the two given matrices do not match;
// - ... the given system matrix is singular and not invertible.
//
// The first two functions report failure via the \c info argument, the fifth function throws a
// \c std::invalid_argument exception in case of an error.
//
//
// \n \subsection lapack_llh_linear_system_solver Cholesky-based Linear System Solver
//
// The following functions provide an interface for the LAPACK functions \c sposv(), \c dposv(),
// \c cposv(), and \c zposv(), which combine an \ref lapack_llh_decomposition and the according
// \ref lapack_llh_substitution :
\code
namespace blaze {
void posv( char uplo, blas_int_t n, blas_int_t nrhs, float* A, blas_int_t lda, float* B, blas_int_t ldb, blas_int_t* info );
void posv( char uplo, blas_int_t n, blas_int_t nrhs, double* A, blas_int_t lda, double* B, blas_int_t ldb, blas_int_t* info );
void posv( char uplo, blas_int_t n, blas_int_t nrhs, complex<float>* A, blas_int_t lda, complex<float>* B, blas_int_t ldb, blas_int_t* info );
void posv( char uplo, blas_int_t n, blas_int_t nrhs, complex<double>* A, blas_int_t lda, complex<double>* B, blas_int_t ldb, blas_int_t* info );
template< typename MT, bool SO, typename VT, bool TF >
void posv( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo );
template< typename MT1, bool SO1, typename MT2, bool SO2 >
void posv( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo );
} // namespace blaze
\endcode
// Note that depending on the storage order of the system matrix and the given right-hand side
// the functions solve different equation systems (see \ref lapack_linear_system_solver). If
// the function exits successfully, the vector \a b or the matrix \a B contain the solution(s)
// of the linear system of equations and \a A has been decomposed by means of an
// \ref lapack_llh_decomposition.
//
// The functions fail if ...
//
// - ... the given system matrix is not a square matrix;
// - ... the given \a uplo argument is neither 'L' nor 'U';
// - ... the sizes of the two given matrices do not match;
// - ... the given system matrix is singular and not invertible.
//
// The first four functions report failure via the \c info argument, the fifth function throws a
// \c std::invalid_argument exception in case of an error.
//
//
// \n \subsection lapack_triangular_linear_system_solver Linear System Solver for Triangular Matrices
//
// The following functions provide an interface for the LAPACK functions \c strsv(), \c dtrsv(),
// \c ctrsv(), and \c ztrsv():
\code
namespace blaze {
void trsv( char uplo, char trans, char diag, blas_int_t n, const float* A, blas_int_t lda, float* x, blas_int_t incX );
void trsv( char uplo, char trans, char diag, blas_int_t n, const double* A, blas_int_t lda, double* x, blas_int_t incX );
void trsv( char uplo, char trans, char diag, blas_int_t n, const complex<float>* A, blas_int_t lda, complex<float>* x, blas_int_t incX );
void trsv( char uplo, char trans, char diag, blas_int_t n, const complex<double>* A, blas_int_t lda, complex<double>* x, blas_int_t incX );
template< typename MT, bool SO, typename VT, bool TF >
void trsv( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, char trans, char diag );
} // namespace blaze
\endcode
// Note that depending on the storage order of the system matrix and the given right-hand side
// the functions solve different equation systems (see \ref lapack_linear_system_solver). If the
// function exits successfully, the vector \a b or the matrix \a B contain the solution(s) of the
// linear system of equations.
//
// The functions fail if ...
//
// - ... the given system matrix is not a square matrix;
// - ... the given \a uplo argument is neither 'L' nor 'U';
// - ... the given \a trans argument is neither 'N' nor 'T' nor 'C';
// - ... the given \a diag argument is neither 'U' nor 'N'.
//
// The last function throws a \c std::invalid_argument exception in case of an error. Note that
// none of the functions does perform any test for singularity or near-singularity. Such tests
// must be performed prior to calling this function!
//
//
// \n \section lapack_eigenvalues Eigenvalues/Eigenvectors
//
// \subsection lapack_eigenvalues_general General Matrices
//
// The following functions provide an interface for the LAPACK functions \c sgeev(), \c dgeev(),
// \c cgeev(), and \c zgeev(), which compute the eigenvalues and optionally the eigenvectors of
// the given general matrix:
\code
namespace blaze {
void geev( char jobvl, char jobvr, blas_int_t n, float* A, blas_int_t lda, float* wr, float* wi, float* VL, blas_int_t ldvl, float* VR, blas_int_t ldvr, float* work, blas_int_t lwork, blas_int_t* info );
void geev( char jobvl, char jobvr, blas_int_t n, double* A, blas_int_t lda, double* wr, double* wi, double* VL, blas_int_t ldvl, double* VR, blas_int_t ldvr, double* work, blas_int_t lwork, blas_int_t* info );
void geev( char jobvl, char jobvr, blas_int_t n, complex<float>* A, blas_int_t lda, complex<float>* w, complex<float>* VL, blas_int_t ldvl, complex<float>* VR, blas_int_t ldvr, complex<float>* work, blas_int_t lwork, float* rwork, blas_int_t* info );
void geev( char jobvl, char jobvr, blas_int_t n, complex<double>* A, blas_int_t lda, complex<double>* w, complex<double>* VL, blas_int_t ldvl, complex<double>* VR, blas_int_t ldvr, complex<double>* work, blas_int_t lwork, double* rwork, blas_int_t* info );
template< typename MT, bool SO, typename VT, bool TF >
void geev( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w );
template< typename MT1, bool SO1, typename MT2, bool SO2, typename VT, bool TF >
void geev( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& VL, DenseVector<VT,TF>& w );
template< typename MT1, bool SO1, typename VT, bool TF, typename MT2, bool SO2 >
void geev( DenseMatrix<MT1,SO1>& A, DenseVector<VT,TF>& w, DenseMatrix<MT2,SO2>& VR );
template< typename MT1, bool SO1, typename MT2, bool SO2, typename VT, bool TF, typename MT3, bool SO3 >
void geev( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& VL, DenseVector<VT,TF>& w, DenseMatrix<MT3,SO3>& VR );
} // namespace blaze
\endcode
// The complex eigenvalues of the given matrix \a A are returned in the given vector \a w.
// Please note that no order of eigenvalues can be assumed, except that complex conjugate pairs
// of eigenvalues appear consecutively with the eigenvalue having the positive imaginary part
// first.
//
// If \a VR is provided as an argument, the right eigenvectors are returned in the rows of \a VR
// in case \a VR is a row-major matrix and in the columns of \a VR in case \a VR is a column-major
// matrix. The right eigenvector \f$v[j]\f$ of \a A satisfies
\f[ A * v[j] = lambda[j] * v[j], \f]
// where \f$lambda[j]\f$ is its eigenvalue.
//
// If \a VL is provided as an argument, the left eigenvectors are returned in the rows of \a VL
// in case \a VL is a row-major matrix and in the columns of \a VL in case \a VL is a column-major
// matrix. The left eigenvector \f$u[j]\f$ of \a A satisfies
\f[ u[j]^{H} * A = lambda[j] * u[j]^{H}, \f]
// where \f$u[j]^{H}\f$ denotes the conjugate transpose of \f$u[j]\f$.
//
// \a w, \a VL, and \a VR are resized to the correct dimensions (if possible and necessary). The
// functions fail if ...
//
// - ... the given matrix \a A is not a square matrix;
// - ... the given matrix \a VL is a fixed size matrix and the dimensions don't match;
// - ... the given vector \a w is a fixed size vector and the size doesn't match;
// - ... the given matrix \a VR is a fixed size matrix and the dimensions don't match;
// - ... the eigenvalue computation fails.
//
// The first four functions report failure via the \c info argument, the last four functions throw
// an exception in case of an error.
//
//
// \n \subsection lapack_eigenvalues_symmetric Symmetric Matrices
//
// The following functions provide an interface for the LAPACK functions \c ssyev() and \c dsyev(),
// which compute the eigenvalues and eigenvectors of the given symmetric matrix:
\code
namespace blaze {
void syev( char jobz, char uplo, blas_int_t n, float* A, blas_int_t lda, float* w, float* work, blas_int_t lwork, blas_int_t* info );
void syev( char jobz, char uplo, blas_int_t n, double* A, blas_int_t lda, double* w, double* work, blas_int_t lwork, blas_int_t* info );
template< typename MT, bool SO, typename VT, bool TF >
void syev( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char jobz, char uplo );
} // namespace blaze
\endcode
// Alternatively, the following functions can be used, which provide an interface to the LAPACK
// functions \c ssyevd() and \c dsyevd(). In contrast to the \c syev() functions they use a
// divide-and-conquer strategy for the computation of the left and right eigenvectors:
\code
namespace blaze {
void syevd( char jobz, char uplo, blas_int_t n, float* A, blas_int_t lda, float* w, float* work, blas_int_t lwork, blas_int_t* iwork, blas_int_t liwork, blas_int_t* info );
void syevd( char jobz, char uplo, blas_int_t n, double* A, blas_int_t lda, double* w, double* work, blas_int_t lwork, blas_int_t* iwork, blas_int_t liwork, blas_int_t* info );
template< typename MT, bool SO, typename VT, bool TF >
void syevd( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char jobz, char uplo );
} // namespace blaze
\endcode
// The real eigenvalues are returned in ascending order in the given vector \a w. \a w is resized
// to the correct size (if possible and necessary). In case \a A is a row-major matrix, the left
// eigenvectors are returned in the rows of \a A, in case \a A is a column-major matrix, the right
// eigenvectors are returned in the columns of \a A.
//
// The functions fail if ...
//
// - ... the given matrix \a A is not a square matrix;
// - ... the given vector \a w is a fixed size vector and the size doesn't match;
// - ... the given \a jobz argument is neither \c 'V' nor \c 'N';
// - ... the given \a uplo argument is neither \c 'L' nor \c 'U';
// - ... the eigenvalue computation fails.
//
// The first two functions report failure via the \c info argument, the last function throws an
// exception in case of an error.
//
// Via the following functions, which wrap the LAPACK functions \c ssyevx() and \c dsyevx(), it
// is possible to compute a subset of eigenvalues and/or eigenvectors of a symmetric matrix:
\code
namespace blaze {
void syevx( char jobz, char range, char uplo, blas_int_t n, float* A, blas_int_t lda, float vl, float vu, blas_int_t il, blas_int_t iu, float abstol, blas_int_t* m, float* w, float* Z, blas_int_t ldz, float* work, blas_int_t lwork, blas_int_t* iwork, blas_int_t* ifail, blas_int_t* info );
void syevx( char jobz, char range, char uplo, blas_int_t n, double* A, blas_int_t lda, double vl, double vu, blas_int_t il, blas_int_t iu, double abstol, blas_int_t* m, double* w, double* Z, blas_int_t ldz, double* work, blas_int_t lwork, blas_int_t* iwork, blas_int_t* ifail, blas_int_t* info );
template< typename MT, bool SO, typename VT, bool TF >
size_t syevx( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char uplo );
template< typename MT, bool SO, typename VT, bool TF, typename ST >
size_t syevx( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char uplo, ST low, ST upp );
template< typename MT1, bool SO1, typename VT, bool TF, typename MT2, bool SO2 >
size_t syevx( DenseMatrix<MT1,SO1>& A, DenseVector<VT,TF>& w, DenseMatrix<MT2,SO2>& Z, char uplo );
template< typename MT1, bool SO1, typename VT, bool TF, typename MT2, bool SO2, typename ST >
size_t syevx( DenseMatrix<MT1,SO1>& A, DenseVector<VT,TF>& w, DenseMatrix<MT2,SO2>& Z, char uplo, ST low, ST upp );
} // namespace blaze
\endcode
// The number of eigenvalues to be computed is specified by the lower bound \c low and the upper
// bound \c upp, which either form an integral or a floating point range.
//
// In case \a low and \a upp are of integral type, the function computes all eigenvalues in the
// index range \f$[low..upp]\f$. The \a num resulting real eigenvalues are stored in ascending
// order in the given vector \a w, which is either resized (if possible) or expected to be a
// \a num-dimensional vector. The eigenvectors are returned in the rows of \a Z in case \a Z is
// row-major matrix and in the columns of \a Z in case \a Z is a column-major matrix. \a Z is
// resized (if possible) or expected to be a \a num-by-\a n row-major matrix or a \a n-by-\a num
// column-major matrix.
//
// In case \a low and \a upp are of floating point type, the function computes all eigenvalues
// in the half-open interval \f$(low..upp]\f$. The resulting real eigenvalues are stored in
// ascending order in the given vector \a w, which is either resized (if possible) or expected
// to be an \a n-dimensional vector. The eigenvectors are returned in the rows of \a Z in case
// \a Z is a row-major matrix and in the columns of \a Z in case \a Z is a column-major matrix.
// \a Z is resized (if possible) or expected to be a \a n-by-\a n matrix.
//
// The functions fail if ...
//
// - ... the given matrix \a A is not a square matrix;
// - ... the given vector \a w is a fixed size vector and the size doesn't match;
// - ... the given matrix \a Z is a fixed size matrix and the dimensions don't match;
// - ... the given \a uplo argument is neither \c 'L' nor \c 'U';
// - ... the eigenvalue computation fails.
//
// The first two functions report failure via the \c info argument, the last four functions throw
// an exception in case of an error.
//
//
// \n \subsection lapack_eigenvalues_hermitian Hermitian Matrices
//
// The following functions provide an interface for the LAPACK functions \c cheev() and \c zheev(),
// which compute the eigenvalues and eigenvectors of the given Hermitian matrix:
\code
namespace blaze {
void heev( char jobz, char uplo, blas_int_t n, complex<float>* A, blas_int_t lda, float* w, complex<float>* work, blas_int_t lwork, float* rwork, blas_int_t* info );
void heev( char jobz, char uplo, blas_int_t n, complex<double>* A, blas_int_t lda, double* w, complex<double>* work, blas_int_t lwork, float* rwork, blas_int_t* info );
template< typename MT, bool SO, typename VT, bool TF >
void heev( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char jobz, char uplo );
} // namespace blaze
\endcode
// Alternatively, the following functions can be used, which provide an interface to the LAPACK
// functions \c cheevd() and \c zheevd(). In contrast to the \c heev() functions they use a
// divide-and-conquer strategy for the computation of the left and right eigenvectors:
\code
namespace blaze {
void heevd( char jobz, char uplo, blas_int_t n, complex<float>* A, blas_int_t lda, float* w, complex<float>* work, blas_int_t lwork, float* rwork, blas_int_t* lrwork, blas_int_t* iwork, blas_int_t* liwork, blas_int_t* info );
void heevd( char jobz, char uplo, blas_int_t n, complex<double>* A, blas_int_t lda, double* w, complex<double>* work, blas_int_t lwork, double* rwork, blas_int_t lrwork, blas_int_t* iwork, blas_int_t* liwork, blas_int_t* info );
template< typename MT, bool SO, typename VT, bool TF >
void heevd( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char jobz, char uplo );
} // namespace blaze
\endcode
// The real eigenvalues are returned in ascending order in the given vector \a w. \a w is resized
// to the correct size (if possible and necessary). In case \a A is a row-major matrix, the left
// eigenvectors are returned in the rows of \a A, in case \a A is a column-major matrix, the right
// eigenvectors are returned in the columns of \a A.
//
// The functions fail if ...
//
// - ... the given matrix \a A is not a square matrix;
// - ... the given vector \a w is a fixed size vector and the size doesn't match;
// - ... the given \a jobz argument is neither \c 'V' nor \c 'N';
// - ... the given \a uplo argument is neither \c 'L' nor \c 'U';
// - ... the eigenvalue computation fails.
//
// The first two functions report failure via the \c info argument, the last function throws an
// exception in case of an error.
//
// Via the following functions, which wrap the LAPACK functions \c cheevx() and \c zheevx(), it
// is possible to compute a subset of eigenvalues and/or eigenvectors of an Hermitian matrix:
\code
namespace blaze {
void heevx( char jobz, char range, char uplo, blas_int_t n, complex<float>* A, blas_int_t lda, float vl, float vu, blas_int_t il, blas_int_t iu, float abstol, blas_int_t* m, float* w, complex<float>* Z, blas_int_t ldz, complex<float>* work, blas_int_t lwork, float* rwork, blas_int_t* iwork, blas_int_t* ifail, blas_int_t* info );
void heevx( char jobz, char range, char uplo, blas_int_t n, complex<double>* A, blas_int_t lda, double vl, double vu, blas_int_t il, blas_int_t iu, double abstol, blas_int_t* m, double* w, complex<double>* Z, blas_int_t ldz, complex<double>* work, blas_int_t lwork, double* rwork, blas_int_t* iwork, blas_int_t* ifail, blas_int_t* info );
template< typename MT, bool SO, typename VT, bool TF >
size_t heevx( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char uplo );
template< typename MT, bool SO, typename VT, bool TF, typename ST >
size_t heevx( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char uplo, ST low, ST upp );
template< typename MT1, bool SO1, typename VT, bool TF, typename MT2, bool SO2 >
size_t heevx( DenseMatrix<MT1,SO1>& A, DenseVector<VT,TF>& w, DenseMatrix<MT2,SO2>& Z, char uplo );
template< typename MT1, bool SO1, typename VT, bool TF, typename MT2, bool SO2, typename ST >
size_t heevx( DenseMatrix<MT1,SO1>& A, DenseVector<VT,TF>& w, DenseMatrix<MT2,SO2>& Z, char uplo, ST low, ST upp );
} // namespace blaze
\endcode
// The number of eigenvalues to be computed is specified by the lower bound \c low and the upper
// bound \c upp, which either form an integral or a floating point range.
//
// In case \a low and \a upp are of integral type, the function computes all eigenvalues in the
// index range \f$[low..upp]\f$. The \a num resulting real eigenvalues are stored in ascending
// order in the given vector \a w, which is either resized (if possible) or expected to be a
// \a num-dimensional vector. The eigenvectors are returned in the rows of \a Z in case \a Z is
// row-major matrix and in the columns of \a Z in case \a Z is a column-major matrix. \a Z is
// resized (if possible) or expected to be a \a num-by-\a n row-major matrix or a \a n-by-\a num
// column-major matrix.
//
// In case \a low and \a upp are of floating point type, the function computes all eigenvalues
// in the half-open interval \f$(low..upp]\f$. The resulting real eigenvalues are stored in
// ascending order in the given vector \a w, which is either resized (if possible) or expected
// to be an \a n-dimensional vector. The eigenvectors are returned in the rows of \a Z in case
// \a Z is a row-major matrix and in the columns of \a Z in case \a Z is a column-major matrix.
// \a Z is resized (if possible) or expected to be a \a n-by-\a n matrix.
//
// The functions fail if ...
//
// - ... the given matrix \a A is not a square matrix;
// - ... the given vector \a w is a fixed size vector and the size doesn't match;
// - ... the given matrix \a Z is a fixed size matrix and the dimensions don't match;
// - ... the given \a uplo argument is neither \c 'L' nor \c 'U';
// - ... the eigenvalue computation fails.
//
// The first two functions report failure via the \c info argument, the last four functions throw
// an exception in case of an error.
//
//
// \n \section lapack_singular_values Singular Values/Singular Vectors
//
// The following functions provide an interface for the LAPACK functions \c sgesvd(), \c dgesvd(),
// \c cgesvd(), and \c zgesvd(), which perform a singular value decomposition (SVD) on the given
// general matrix:
\code
namespace blaze {
void gesvd( char jobu, char jobv, blas_int_t m, blas_int_t n, float* A, blas_int_t lda, float* s, float* U, blas_int_t ldu, float* V, blas_int_t ldv, float* work, blas_int_t lwork, blas_int_t* info );
void gesvd( char jobu, char jobv, blas_int_t m, blas_int_t n, double* A, blas_int_t lda, double* s, double* U, blas_int_t ldu, double* V, blas_int_t ldv, double* work, blas_int_t lwork, blas_int_t* info );
void gesvd( char jobu, char jobv, blas_int_t m, blas_int_t n, complex<float>* A, blas_int_t lda, float* s, complex<float>* U, blas_int_t ldu, complex<float>* V, blas_int_t ldv, complex<float>* work, blas_int_t lwork, float* rwork, blas_int_t* info );
void gesvd( char jobu, char jobv, blas_int_t m, blas_int_t n, complex<double>* A, blas_int_t lda, double* s, complex<double>* U, blas_int_t ldu, complex<double>* V, blas_int_t ldv, complex<double>* work, blas_int_t lwork, double* rwork, blas_int_t* info );
template< typename MT, bool SO, typename VT, bool TF >
void gesvd( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& s, char jobu, char jobv );
template< typename MT1, bool SO, typename MT2, typename VT, bool TF >
void gesvd( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, char jobu, char jobv );
template< typename MT1, bool SO, typename VT, bool TF, typename MT2 >
void gesvd( DenseMatrix<MT1,SO>& A, DenseVector<VT,TF>& s, DenseMatrix<MT2,SO>& V, char jobu, char jobv );
template< typename MT1, bool SO, typename MT2, typename VT, bool TF, typename MT3 >
void gesvd( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, DenseMatrix<MT3,SO>& V, char jobu, char jobv );
} // namespace blaze
\endcode
// Alternatively, the following functions can be used, which provide an interface to the LAPACK
// functions \c sgesdd(), \c dgesdd(), \c cgesdd(), and \c zgesdd(). In contrast to the \c gesvd()
// functions they compute the singular value decomposition (SVD) of the given general matrix by
// applying a divide-and-conquer strategy for the computation of the left and right singular
// vectors:
\code
namespace blaze {
void gesdd( char jobz, blas_int_t m, blas_int_t n, float* A, blas_int_t lda, float* s, float* U, blas_int_t ldu, float* V, blas_int_t ldv, float* work, blas_int_t lwork, blas_int_t* iwork, blas_int_t* info );
void gesdd( char jobz, blas_int_t m, blas_int_t n, double* A, blas_int_t lda, double* s, double* U, blas_int_t ldu, double* V, blas_int_t ldv, double* work, blas_int_t lwork, blas_int_t* iwork, blas_int_t* info );
void gesdd( char jobz, blas_int_t m, blas_int_t n, complex<float>* A, blas_int_t lda, float* s, complex<float>* U, blas_int_t ldu, complex<float>* V, blas_int_t ldv, complex<float>* work, blas_int_t lwork, float* rwork, blas_int_t* iwork, blas_int_t* info );
void gesdd( char jobz, blas_int_t m, blas_int_t n, complex<double>* A, blas_int_t lda, double* s, complex<double>* U, blas_int_t ldu, complex<double>* V, blas_int_t ldv, complex<double>* work, blas_int_t lwork, double* rwork, blas_int_t* iwork, blas_int_t* info );
template< typename MT, bool SO, typename VT, bool TF >
void gesdd( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& s );
template< typename MT1, bool SO, typename MT2, typename VT, bool TF >
void gesdd( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, char jobz );
template< typename MT1, bool SO, typename MT2, typename VT, bool TF >
void gesdd( DenseMatrix<MT1,SO>& A, DenseVector<VT,TF>& s, DenseMatrix<MT2,SO>& V, char jobz );
template< typename MT1, bool SO, typename MT2, typename VT, bool TF, typename MT3 >
void gesdd( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, DenseMatrix<MT3,SO>& V, char jobz );
} // namespace blaze
\endcode
// The resulting decomposition has the form
\f[ A = U \cdot S \cdot V, \f]
// where \a S is a \a m-by-\a n matrix, which is zero except for its min(\a m,\a n) diagonal
// elements, \a U is an \a m-by-\a m orthogonal matrix, and \a V is a \a n-by-\a n orthogonal
// matrix. The diagonal elements of \a S are the singular values of \a A, the first min(\a m,\a n)
// columns of \a U and rows of \a V are the left and right singular vectors of \a A, respectively.
//
// The resulting min(\a m,\a n) real and non-negative singular values are returned in descending
// order in the vector \a s, which is resized to the correct size (if possible and necessary).
//
// Via the following functions, which wrap the LAPACK functions \c sgesvdx(), \c dgesvdx(),
// \c cgesvdx(), and \c zgesvdx(), it is possible to compute a subset of singular values and/or
// vectors:
\code
namespace blaze {
void gesvdx( char jobu, char jobv, char range, blas_int_t m, blas_int_t n, float* A, blas_int_t lda, float vl, float vu, blas_int_t il, blas_int_t iu, blas_int_t* ns, float* s, float* U, blas_int_t ldu, float* V, blas_int_t ldv, float* work, blas_int_t lwork, blas_int_t* iwork, blas_int_t* info );
void gesvdx( char jobu, char jobv, char range, blas_int_t m, blas_int_t n, double* A, blas_int_t lda, double vl, double vu, blas_int_t il, blas_int_t iu, blas_int_t* ns, double* s, double* U, blas_int_t ldu, double* V, blas_int_t ldv, double* work, blas_int_t lwork, blas_int_t* iwork, blas_int_t* info );
void gesvdx( char jobu, char jobv, char range, blas_int_t m, blas_int_t n, complex<float>* A, blas_int_t lda, float vl, float vu, blas_int_t il, blas_int_t iu, blas_int_t* ns, float* s, complex<float>* U, blas_int_t ldu, complex<float>* V, blas_int_t ldv, complex<float>* work, blas_int_t lwork, float* rwork, blas_int_t* iwork, blas_int_t* info );
void gesvdx( char jobu, char jobv, char range, blas_int_t m, blas_int_t n, complex<double>* A, blas_int_t lda, double vl, double vu, blas_int_t il, blas_int_t iu, blas_int_t* ns, double* s, complex<double>* U, blas_int_t ldu, complex<double>* V, blas_int_t ldv, complex<double>* work, blas_int_t lwork, double* rwork, blas_int_t* iwork, blas_int_t* info );
template< typename MT, bool SO, typename VT, bool TF >
size_t gesvdx( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& s );
template< typename MT, bool SO, typename VT, bool TF, typename ST >
size_t gesvdx( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& s, ST low, ST upp );
template< typename MT1, bool SO, typename MT2, typename VT, bool TF >
size_t gesvdx( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s );
template< typename MT1, bool SO, typename MT2, typename VT, bool TF, typename ST >
size_t gesvdx( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, ST low, ST upp );
template< typename MT1, bool SO, typename VT, bool TF, typename MT2 >
size_t gesvdx( DenseMatrix<MT1,SO>& A, DenseVector<VT,TF>& s, DenseMatrix<MT2,SO>& V );
template< typename MT1, bool SO, typename VT, bool TF, typename MT2, typename ST >
size_t gesvdx( DenseMatrix<MT1,SO>& A, DenseVector<VT,TF>& s, DenseMatrix<MT2,SO>& V, ST low, ST upp );
template< typename MT1, bool SO, typename MT2, typename VT, bool TF, typename MT3 >
size_t gesvdx( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, DenseMatrix<MT3,SO>& V );
template< typename MT1, bool SO, typename MT2, typename VT, bool TF, typename MT3, typename ST >
size_t gesvdx( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, DenseMatrix<MT3,SO>& V, ST low, ST upp );
} // namespace blaze
\endcode
// The number of singular values to be computed is specified by the lower bound \a low and the
// upper bound \a upp, which either form an integral or a floating point range.
//
// In case \a low and \a upp form are of integral type, the function computes all singular values
// in the index range \f$[low..upp]\f$. The \a num resulting real and non-negative singular values
// are stored in descending order in the given vector \a s, which is either resized (if possible)
// or expected to be a \a num-dimensional vector. The resulting left singular vectors are stored
// in the given matrix \a U, which is either resized (if possible) or expected to be a
// \a m-by-\a num matrix. The resulting right singular vectors are stored in the given matrix \a V,
// which is either resized (if possible) or expected to be a \a num-by-\a n matrix.
//
// In case \a low and \a upp are of floating point type, the function computes all singular values
// in the half-open interval \f$(low..upp]\f$. The resulting real and non-negative singular values
// are stored in descending order in the given vector \a s, which is either resized (if possible)
// or expected to be a min(\a m,\a n)-dimensional vector. The resulting left singular vectors are
// stored in the given matrix \a U, which is either resized (if possible) or expected to be a
// \a m-by-min(\a m,\a n) matrix. The resulting right singular vectors are stored in the given
// matrix \a V, which is either resized (if possible) or expected to be a min(\a m,\a n)-by-\a n
// matrix.
//
// The functions fail if ...
//
// - ... the given matrix \a U is a fixed size matrix and the dimensions don't match;
// - ... the given vector \a s is a fixed size vector and the size doesn't match;
// - ... the given matrix \a V is a fixed size matrix and the dimensions don't match;
// - ... the given scalar values don't form a proper range;
// - ... the singular value decomposition fails.
//
// The first four functions report failure via the \c info argument, the remaining functions throw
// an exception in case of an error.
//
//
// \n Previous: \ref blas_functions Next: \ref block_vectors_and_matrices \n
*/
//*************************************************************************************************
//**Block Vectors and Matrices*********************************************************************
/*!\page block_vectors_and_matrices Block Vectors and Matrices
//
// \tableofcontents
//
//
// \n \section block_vectors_and_matrices_general General Concepts
// <hr>
//
// In addition to fundamental element types, the \b Blaze library supports vectors and matrices
// with non-fundamental element type. For instance, it is possible to define block matrices by
// using a matrix type as the element type:
\code
using blaze::DynamicMatrix;
using blaze::DynamicVector;
using blaze::rowMajor;
using blaze::columnVector;
DynamicMatrix< DynamicMatrix<double,rowMajor>, rowMajor > A;
DynamicVector< DynamicVector<double,columnVector >, columnVector > x, y;
// ... Resizing and initialization
y = A * x;
\endcode
// The matrix/vector multiplication in this example runs fully parallel and uses vectorization
// for every inner matrix/vector multiplication and vector addition.
//
//
// \n \section block_vectors_and_matrices_pitfalls Pitfalls
// <hr>
//
// The only thing to keep in mind when using non-fundamental element types is that all operations
// between the elements have to be well defined. More specifically, the size of vector and matrix
// elements has to match. The attempt to combine two non-matching elements results in either a
// compilation error (in case of statically sized elements) or an exception (for dynamically sized
// elements):
\code
DynamicVector< StaticVector<int,2UL> > a;
DynamicVector< StaticVector<int,3UL> > b;
DynamicVector< DynamicVector<int> > c( a + b ); // Compilation error: element size doesn't match
\endcode
// Therefore please don't forget that dynamically sized elements (e.g. \c blaze::DynamicVector,
// \c blaze::HybridVector, \c blaze::DynamicMatrix, \c blaze::HybridMatrix, ...) need to be sized
// accordingly upfront.
//
//
// \n \section block_vectors_and_matrices_examples Examples
// <hr>
//
// The first example demonstrates the multiplication between a statically sized block matrix
// and a block vector:
\code
using namespace blaze;
// ( ( 1 1 ) ( 2 2 ) ) ( ( 1 ) ) ( ( 10 ) )
// ( ( 1 1 ) ( 2 2 ) ) ( ( 1 ) ) ( ( 10 ) )
// ( ) * ( ) = ( )
// ( ( 3 3 ) ( 4 4 ) ) ( ( 2 ) ) ( ( 22 ) )
// ( ( 3 3 ) ( 4 4 ) ) ( ( 2 ) ) ( ( 22 ) )
using M2x2 = StaticMatrix<int,2UL,2UL,rowMajor>;
using V2 = StaticVector<int,2UL,columnVector>;
DynamicMatrix<M2x2,rowMajor> A{ { M2x2(1), M2x2(2) },
{ M2x2(3), M2x2(4) } };
DynamicVector<V2,columnVector> x{ V2(1), V2(2) };
DynamicVector<V2,columnVector> y( A * x );
\endcode
// The second example shows the multiplication between a compressed block matrix with blocks of
// varying size and a compressed block vector:
\code
using namespace blaze;
// ( ( 1 -2 3 ) ( 5 -1 ) ) ( ( 1 ) ) ( ( -3 ) )
// ( ( 4 1 0 ) ( 1 2 ) ) ( ( 0 ) ) ( ( 7 ) )
// ( ( 0 2 4 ) ( 3 1 ) ) ( ( 1 ) ) ( ( 3 ) )
// ( ) ( ) ( )
// ( ( 1 ) ) * ( ( 2 ) ) = ( ( 2 ) )
// ( ) ( ) ( )
// ( ( 0 -1 1 ) ( 1 0 ) ) ( ( -1 ) ) ( ( 0 ) )
// ( ( 2 -1 2 ) ( 0 1 ) ) ( ( 2 ) ) ( ( 6 ) )
using M3x3 = HybridMatrix<int,3UL,3UL,rowMajor>;
using V3 = HybridVector<int,3UL,columnVector>;
CompressedMatrix<M3x3,rowMajor> A( 3UL, 3UL, 5UL );
A(0,0) = M3x3{ { 1, -2, 3 }, { 4, 1, 0 }, { 0, 2, 4 } };
A(0,2) = M3x3{ { 5, -1 }, { 1, 2 }, { 3, 1 } };
A(1,1) = M3x3{ { 1 } };
A(2,0) = M3x3{ { 0, -1, 1 }, { 2, -1, 2 } };
A(2,2) = M3x3{ { 1, 0 }, { 0, 1 } };
CompressedVector<V3,columnVector> x( 3UL, 3UL );
x[0] = V3{ 1, 0, 1 };
x[1] = V3{ 2 };
x[2] = V3{ -1, 2 };
CompressedVector<V3,columnVector> y( A * x );
\endcode
// \n Previous: \ref lapack_functions Next: \ref intra_statement_optimization \n
*/
//*************************************************************************************************
//**Intra-Statement Optimization*******************************************************************
/*!\page intra_statement_optimization Intra-Statement Optimization
//
// One of the prime features of the \b Blaze library is the automatic intra-statement optimization.
// In order to optimize the overall performance of every single statement \b Blaze attempts to
// rearrange the operands based on their types. For instance, the following addition of dense and
// sparse vectors
\code
blaze::DynamicVector<double> d1, d2, d3;
blaze::CompressedVector<double> s1;
// ... Resizing and initialization
d3 = d1 + s1 + d2;
\endcode
// is automatically rearranged and evaluated as
\code
// ...
d3 = d1 + d2 + s1; // <- Note that s1 and d2 have been rearranged
\endcode
// This order of operands is highly favorable for the overall performance since the addition of
// the two dense vectors \c d1 and \c d2 can be handled much more efficiently in a vectorized
// fashion.
//
// This intra-statement optimization can have a tremendous effect on the performance of a statement.
// Consider for instance the following computation:
\code
blaze::DynamicMatrix<double> A, B;
blaze::DynamicVector<double> x, y;
// ... Resizing and initialization
y = A * B * x;
\endcode
// Since multiplications are evaluated from left to right, this statement would result in a
// matrix/matrix multiplication, followed by a matrix/vector multiplication. However, if the
// right subexpression is evaluated first, the performance can be dramatically improved since the
// matrix/matrix multiplication can be avoided in favor of a second matrix/vector multiplication.
// The \b Blaze library exploits this by automatically restructuring the expression such that the
// right multiplication is evaluated first:
\code
// ...
y = A * ( B * x );
\endcode
// Note however that although this intra-statement optimization may result in a measurable or
// even significant performance improvement, this behavior may be undesirable for several reasons,
// for instance because of numerical stability. Therefore, in case the order of evaluation matters,
// the best solution is to be explicit and to separate a statement into several statements:
\code
blaze::DynamicVector<double> d1, d2, d3;
blaze::CompressedVector<double> s1;
// ... Resizing and initialization
d3 = d1 + s1; // Compute the dense vector/sparse vector addition first ...
d3 += d2; // ... and afterwards add the second dense vector
\endcode
\code
// ...
blaze::DynamicMatrix<double> A, B, C;
blaze::DynamicVector<double> x, y;
// ... Resizing and initialization
C = A * B; // Compute the left-hand side matrix-matrix multiplication first ...
y = C * x; // ... before the right-hand side matrix-vector multiplication
\endcode
// Alternatively, it is also possible to use the \c eval() function to fix the order of evaluation:
\code
blaze::DynamicVector<double> d1, d2, d3;
blaze::CompressedVector<double> s1;
// ... Resizing and initialization
d3 = d1 + eval( s1 + d2 );
\endcode
\code
blaze::DynamicMatrix<double> A, B;
blaze::DynamicVector<double> x, y;
// ... Resizing and initialization
y = eval( A * B ) * x;
\endcode
// \n Previous: \ref block_vectors_and_matrices Next: \ref faq \n
*/
//*************************************************************************************************
//**FAQ********************************************************************************************
/*!\page faq Frequently Asked Questions (FAQ)
//
// \tableofcontents
//
//
// <hr>
// \section faq_padding A StaticVector/StaticMatrix is larger than expected. Is this a bug?
//
// The size of a \ref vector_types_static_vector, \ref matrix_types_static_matrix,
// \ref vector_types_hybrid_vector, or \ref matrix_types_hybrid_matrix can indeed be larger
// than expected:
\code
StaticVector<int,3> a;
StaticMatrix<int,3,3> A;
sizeof( a ); // Evaluates to 16, 32, or even 64, but not 12
sizeof( A ); // Evaluates to 48, 96, or even 144, but not 36
\endcode
// In order to achieve the maximum possible performance the \b Blaze library tries to enable
// SIMD vectorization even for small vectors. For that reason \b Blaze by default uses padding
// elements for all dense vectors and matrices to guarantee that at least a single SIMD vector
// can be loaded. Depending on the used SIMD technology that can significantly increase the size
// of a \ref vector_types_static_vector, \ref matrix_types_static_matrix,
// \ref vector_types_hybrid_vector, or \ref matrix_types_hybrid_matrix :
\code
StaticVector<int,3> a;
StaticMatrix<int,3,3> A;
sizeof( a ); // Evaluates to 16 in case of SSE, 32 in case of AVX, and 64 in case of AVX-512
// (under the assumption that an integer occupies 4 bytes)
sizeof( A ); // Evaluates to 48 in case of SSE, 96 in case of AVX, and 144 in case of AVX-512
// (under the assumption that an integer occupies 4 bytes)
\endcode
// The configuration files <tt>./blaze/config/Padding.h</tt> provides a compile time switch
// that can be used to (de-)activate padding:
\code
#define BLAZE_DEFAULT_PADDING_FLAG blaze::padded
\endcode
// Alternatively it is possible to (de-)activate padding via command line or by defining this
// symbol manually before including any \b Blaze header file:
\code
g++ ... -BLAZE_DEFAULT_PADDING_FLAG=blaze::padded ...
\endcode
\code
#define BLAZE_DEFAULT_PADDING_FLAG blaze::padded
#include <blaze/Blaze.h>
\endcode
// If \c BLAZE_DEFAULT_PADDING_FLAG is set to \c blaze::padded, by default padding is enabled for
// \ref vector_types_static_vector, \ref vector_types_hybrid_vector, \ref matrix_types_static_matrix,
// and \ref matrix_types_hybrid_matrix. If it is set to \c blaze::unpadded, then padding is by
// default disabled. Note however that disabling padding can considerably reduce the performance
// of all dense vector and matrix operations!
//
//
// <hr>
// \section faq_alignment Despite disabling padding, a StaticVector/StaticMatrix is still larger than expected. Is this a bug?
//
// Despite disabling padding via the \c BLAZE_DEFAULT_PADDING_FLAG compile time switch (see
// \ref faq_padding), the size of a \ref vector_types_static_vector, \ref matrix_types_static_matrix,
// \ref vector_types_hybrid_vector, or \ref matrix_types_hybrid_matrix can still be larger than
// expected:
\code
#define BLAZE_DEFAULT_PADDING_FLAG blaze::unpadded
#include <blaze/Blaze.h>
StaticVector<int,3> a;
StaticVector<int,5> b;
sizeof( a ); // Always evaluates to 12
sizeof( b ); // Evaluates to 32 with SSE (larger than expected) and to 20 with AVX or AVX-512 (expected)
\endcode
// The reason for this behavior is the used SIMD technology. If SSE is used, which provides 128
// bit wide registers, a single SIMD pack can usually hold 4 integers (128 bit divided by 32 bit).
// Since the second vector contains enough elements is possible to benefit from vectorization.
// However, SSE requires an alignment of 16 bytes, which ultimately results in a total size of
// 32 bytes for the \c StaticVector (2 times 16 bytes due to 5 integer elements). If AVX or AVX-512
// is used, which provide 256 bit or 512 bit wide registers, a single SIMD vector can hold 8 or 16
// integers, respectively. Even the second vector does not hold enough elements to benefit from
// vectorization, which is why \b Blaze does not enforce a 32 byte (for AVX) or even 64 byte
// alignment (for AVX-512).
//
// It is possible to disable the SIMD-specific alignment for \ref vector_types_static_vector,
// \ref matrix_types_static_matrix, \ref vector_types_hybrid_vector, or \ref matrix_types_hybrid_matrix
// via the compile time switch in the <tt>./blaze/config/Alignment.h</tt> configuration file:
\code
#define BLAZE_DEFAULT_ALIGNMENT_FLAG blaze::aligned
\endcode
// Alternatively it is possible set the default alignment flag via command line or by defining
// this symbol manually before including any \b Blaze header file:
\code
g++ ... -DBLAZE_DEFAULT_ALIGNMENT_FLAG=blaze::aligned ...
\endcode
\code
#define BLAZE_DEFAULT_ALIGNMENT_FLAG blaze::aligned
#include <blaze/Blaze.h>
\endcode
// If \c BLAZE_DEFAULT_ALIGNMENT_FLAG is set to \c blaze::aligned then \ref vector_types_static_vector,
// \ref vector_types_hybrid_vector, \ref matrix_types_static_matrix, and \ref matrix_types_hybrid_matrix
// use aligned memory by default. If it is set to \c blaze::unaligned they don't enforce aligned
// memory. Note however that disabling alignment can considerably reduce the performance of all
// operations with these vector and matrix types!
//
// Alternatively it is possible to disable the vectorization entirely by the compile time switch
// in the <tt>./blaze/config/Vectorization.h</tt> configuration file:
\code
#define BLAZE_USE_VECTORIZATION 1
\endcode
// It is also possible to (de-)activate vectorization via command line or by defining this symbol
// manually before including any \b Blaze header file:
\code
g++ ... -DBLAZE_USE_VECTORIZATION=1 ...
\endcode
\code
#define BLAZE_USE_VECTORIZATION 1
#include <blaze/Blaze.h>
\endcode
// In case the switch is set to 1, vectorization is enabled and the \b Blaze library is allowed
// to use intrinsics and the necessary alignment to speed up computations. In case the switch is
// set to 0, vectorization is disabled entirely and the \b Blaze library chooses default,
// non-vectorized functionality for the operations. Note that deactivating the vectorization may
// pose a severe performance limitation for a large number of operations!
//
//
// <hr>
// \section faq_std_vector I experience crashes when using StaticVector/StaticMatrix in a std::vector. Is this a bug?
//
// With active vectorization the elements of a \ref vector_types_static_vector,
// \ref vector_types_hybrid_vector, \ref matrix_types_static_matrix, and \ref matrix_types_hybrid_matrix
// are possibly over-aligned to meet the alignment requirements of the available instruction set
// (SSE, AVX, AVX-512, ...). The alignment for fundamental types (\c short, \c int, \c float,
// \c double, ...) and complex types (\c complex<float>, \c complex<double>, ...) is 16 bytes
// for SSE, 32 bytes for AVX, and 64 bytes for AVX-512. All other types are aligned according to
// their intrinsic alignment:
\code
struct Int { int i; };
using VT1 = blaze::StaticVector<double,3UL>;
using VT2 = blaze::StaticVector<complex<float>,2UL>;
using VT3 = blaze::StaticVector<Int,5UL>;
alignof( VT1 ); // Evaluates to 16 for SSE, 32 for AVX, and 64 for AVX-512
alignof( VT2 ); // Evaluates to 16 for SSE, 32 for AVX, and 64 for AVX-512
alignof( VT3 ); // Evaluates to 'alignof( Int )'
\endcode
// For this reason \ref vector_types_static_vector, \ref vector_types_hybrid_vector,
// \ref matrix_types_static_matrix, and \ref matrix_types_hybrid_matrix cannot be used in
// containers using dynamic memory such as \c std::vector without additionally providing an
// allocator that can provide over-aligned memory:
\code
using Type = blaze::StaticVector<double,3UL>;
using Allocator = blaze::AlignedAllocator<Type>;
std::vector<Type> v1; // Might be misaligned for AVX or AVX-512
std::vector<Type,Allocator> v2; // Properly aligned for AVX or AVX-512
\endcode
// It is possible to disable the vectorization entirely by the compile time switch in the
// <tt>./blaze/config/Vectorization.h</tt> configuration file:
\code
#define BLAZE_USE_VECTORIZATION 1
\endcode
// It is also possible to (de-)activate vectorization via command line or by defining this symbol
// manually before including any \b Blaze header file:
\code
g++ ... -DBLAZE_USE_VECTORIZATION=1 ...
\endcode
\code
#define BLAZE_USE_VECTORIZATION 1
#include <blaze/Blaze.h>
\endcode
// In case the switch is set to 1, vectorization is enabled and the \b Blaze library is allowed
// to use intrinsics and the necessary alignment to speed up computations. In case the switch is
// set to 0, vectorization is disabled entirely and the \b Blaze library chooses default,
// non-vectorized functionality for the operations. Note that deactivating the vectorization may
// pose a severe performance limitation for a large number of operations!
//
//
// <hr>
// \section faq_blas To which extend does Blaze make use of BLAS functions under the hood?
//
// Currently the only BLAS functions that are utilized by \b Blaze are the \c gemm() functions
// for the multiplication of two dense matrices (i.e. \c sgemm(), \c dgemm(), \c cgemm(), and
// \c zgemm()). All other operations are always and unconditionally performed by native \b Blaze
// kernels.
//
// The \c BLAZE_BLAS_MODE config switch (see <tt>./blaze/config/BLAS.h</tt>) determines whether
// \b Blaze is allowed to use BLAS kernels. If \c BLAZE_BLAS_MODE is set to 0 then \b Blaze
// does not utilize the BLAS kernels and unconditionally uses its own custom kernels. If
// \c BLAZE_BLAS_MODE is set to 1 then \b Blaze is allowed to choose between using BLAS kernels
// or its own custom kernels. In case of the dense matrix multiplication this decision is based
// on the size of the dense matrices. For large matrices, \b Blaze uses the BLAS kernels, for
// small matrices it uses its own custom kernels. The threshold for this decision can be
// configured via the \c BLAZE_DMATDMATMULT_THRESHOLD, \c BLAZE_DMATTDMATMULT_THRESHOLD,
// \c BLAZE_TDMATDMATMULT_THRESHOLD and \c BLAZE_TDMATTDMATMULT_THRESHOLD config switches
// (see <tt>./blaze/config/Thresholds.h</tt>).
//
// Please note that the extend to which \b Blaze uses BLAS kernels can change in future releases
// of \b Blaze!
//
//
// <hr>
// \section faq_lapack To which extend does Blaze make use of LAPACK functions under the hood?
//
// \b Blaze uses LAPACK functions for matrix decomposition, matrix inversion, computing the
// determinants and eigenvalues, and the SVD. In contrast to the BLAS functionality (see
// \ref faq_blas), you cannot disable LAPACK or switch to custom kernels. In case you try to
// use any of these functionalities, but do not provide (i.e. link) a LAPACK library you will
// get link time errors.
//
// Please note that the extend to which \b Blaze uses LAPACK kernels can change in future releases
// of \b Blaze!
//
//
// <hr>
// \section faq_sparse_matrix_setup What is the fastest way to setup a very large sparse matrix?
//
// The following examples give an overview of different approaches to setup a sparse, row-major NxN
// matrix with the following pattern, where all values on the diagonal and the two sub-diagonals
// are filled:
\f[\left(\begin{array}{*{9}{c}}
1 & 1 & 0 & 0 & 0 & \cdots & 0 & 0 & 0 \\
1 & 1 & 1 & 0 & 0 & \cdots & 0 & 0 & 0 \\
0 & 1 & 1 & 1 & 0 & \cdots & 0 & 0 & 0 \\
0 & 0 & 1 & 1 & 1 & \cdots & 0 & 0 & 0 \\
0 & 0 & 0 & 1 & 1 & \cdots & 0 & 0 & 0 \\
\vdots & \vdots & \vdots & \vdots & \vdots & \ddots & \vdots & \vdots & \vdots \\
0 & 0 & 0 & 0 & 0 & \cdots & 1 & 1 & 0 \\
0 & 0 & 0 & 0 & 0 & \cdots & 1 & 1 & 1 \\
0 & 0 & 0 & 0 & 0 & \cdots & 0 & 1 & 1 \\
\end{array}\right)\f]
// Special emphasis is given to the runtime until the matrix setup is complete. In all cases the
// runtime is benchmarked with Clang-9.0 (compilation flags \c -O2 and \c -DNDEBUG) for \c N=200000.
//
//
// <b>Approach 1: Using the function call operator</b>
//
// In this approach the function call operator (i.e. \c operator()) is used to insert the according
// elements into the matrix:
\code
blaze::CompressedMatrix<int,rowMajor> A( N, N );
A.reserve( N*3UL-2UL ); // Optional: Reserve capacity for all elements upfront
for( size_t i=0; i<N; ++i ) {
const size_t jbegin( i == 0UL ? 0UL : i-1UL );
const size_t jend ( i == N-1UL ? N-1UL : i+1UL );
for( size_t j=jbegin; j<=jend; ++j ) {
A(i,j) = 1;
}
}
\endcode
// This approach is the most general and convenient, but also the slowest of all (approx. \b 64
// seconds). With every call to \c operator(), a new element is inserted at the specified position.
// This implies shifting all subsequent elements and adapting every subsequent row. Since all
// non-zero elements are stored in a single array inside a \c CompressedMatrix, this approach is
// similar to inserting elements at the front of a \c std::vector; all subsequent elements have
// to be shifted.
//
//
// <b>Approach 2: Rowwise reserve and insert</b>
//
// The next approach performs a rowwise reservation of capacity:
\code
blaze::CompressedMatrix<int,rowMajor> A( N, N );
A.reserve( N*3UL ); // Allocate the total amount of memory
A.reserve( 0UL, 2UL ); // Reserve a capacity of 2 for row 0
for( size_t i=1; i<N-1UL; ++i ) {
A.reserve( i, 3UL ); // Reserve a capacity of 3 for row i
}
A.reserve( N-1UL, 2UL ); // Reserve a capacity of 2 for the last row
for( size_t i=0; i<N; ++i ) {
const size_t jbegin( i == 0UL ? 0UL : i-1UL );
const size_t jend ( i == N-1UL ? N-1UL : i+1UL );
for( size_t j=jbegin; j<=jend; ++j ) {
A.insert( i, j, 1 );
}
}
\endcode
// The first call to reserve() performs the memory allocation for the entire matrix. The complete
// matrix now holds the entire capacity, but each single row has a capacity of 0. Therefore the
// subsequent calls to \c reserve() divide the existing capacity to all rows.
//
// Unfortunately, also this approach is rather slow. The runtime is approx. \b 30 seconds. The
// downside of this approach is that changing the capacity of a single row causes a change in
// all following rows. Therefore this approach is similar to the first approach.
//
//
// <b>Approach 3: reserve/append/finalize</b>
//
// As the wiki explains, the most efficient way to fill a sparse matrix is a combination of
// \c reserve(), \c append() and \c finalize():
\code
CompressedMatrix<int,rowMajor> A( N, N );
A.reserve( N*3UL );
for( size_t i=0; i<N; ++i ) {
const size_t jbegin( i == 0UL ? 0UL : i-1UL );
const size_t jend ( i == N-1UL ? N-1UL : i+1UL );
for( size_t j=jbegin; j<=jend; ++j ) {
A.append( i, j, 1 );
}
A.finalize( i );
}
\endcode
// The initial call to \c reserve() allocates enough memory for all non-zero elements of the
// entire matrix. \c append() and \c finalize() are then used to insert the elements and to mark
// the end of each single row. This is a very low-level approach and very similar to writing to
// an array manually, which results in a mere \b 0.026 seconds. The \c append() function writes
// the new element to the next memory location, and at the end of each row or column the
// \c finalize() function sets the internal pointers accordingly. It is very important to note
// that the \c finalize() function has to be explicitly called for each row, even for empty ones!
// Else the internal data structure will be corrupt! Also note that although \c append() does not
// allocate new memory, it still invalidates all iterators returned by the \c end() functions!
//
//
// <b>Approach 4: Reservation via the constructor</b>
//
// In case the number of non-zero elements is known upfront, it is also possible to perform the
// reservation via the constructor of \c CompressedMatrix. For that purpose \c CompressedMatrix
// provides a constructor taking a \c std::vector<size_t>:
\code
std::vector<size_t> nonzeros( N, 3UL ); // Create a vector of N elements with value 3
nonzeros[ 0] = 2UL; // We need only 2 elements in the first row ...
nonzeros[N-1] = 2UL; // ... and last row.
CompressedMatrix<int,rowMajor> A( N, N, nonzeros );
//std::cerr << " Inserting values...\n";
for( size_t i=0; i<N; ++i ) {
const size_t jbegin( i == 0UL ? 0UL : i-1UL );
const size_t jend ( i == N-1UL ? N-1UL : i+1UL );
for( size_t j=jbegin; j<=jend; ++j ) {
A.insert( i, j, 1 );
}
}
\endcode
// The runtime for this approach is \b 0.027 seconds.
//
//
// <hr>
// \section faq_compile_times The compile time is too high if I include <blaze/Blaze.h>. Can I reduce it?
//
// The include file <tt><blaze/Blaze.h></tt> includes the entire functionality of the \b Blaze
// library, which by now is several hundred thousand lines of source code. That means that a lot
// of source code has to be parsed whenever <tt><blaze/Blaze.h></tt> is encountered. However, it
// is rare that everything is required within a single compilation unit. Therefore it is easily
// possible to reduce compile times by including only those \b Blaze features that are used within
// the compilation unit. For instance, instead of including <tt><blaze/Blaze.h></tt> it could be
// enough to include <tt><blaze/math/DynamicVector.h></tt>, which would reduce the compilation
// times by about 20%.
//
// Additionally we are taking care to implement new \b Blaze functionality such that compile times
// do not explode and try to reduce the compile times of existing features. Thus newer releases of
// \b Blaze can also improve compile times.
//
//
// <hr>
// \section faq_custom_operations Blaze does not provide feature XYZ. What can I do?
//
// In some cases you might be able to implement the required functionality very conveniently by
// building on the existing \c map() functions (see \ref custom_operations_map). For instance,
// the following code demonstrates the addition of a function that merges two vectors of floating
// point type into a vector of complex numbers:
\code
template< typename VT1, typename VT2, bool TF >
decltype(auto) zip( const blaze::DenseVector<VT1,TF>& lhs, const blaze::DenseVector<VT2,TF>& rhs )
{
return blaze::map( ~lhs, ~rhs, []( const auto& r, const auto& i ) {
using ET1 = ElementType_t<VT1>;
using ET2 = ElementType_t<VT2>;
return std::complex<std::common_type_t<ET1,ET2>>( r, i );
} );
}
\endcode
// You will find a summary of the necessary steps to create custom features in \ref customization.
//
// Sometimes, however, the available customization points might not be sufficient. In this case
// you are cordially invited to create a pull request that provides the implementation of a
// feature or to create an issue according to our \ref issue_creation_guidelines. Please try
// to explain the feature as descriptive as possible, for instance by providing conceptual code
// examples.
//
// \n Previous: \ref intra_statement_optimization Next: \ref issue_creation_guidelines \n
*/
//*************************************************************************************************
//**FAQ********************************************************************************************
/*!\page issue_creation_guidelines Issue Creation Guidelines
//
// \tableofcontents
//
//
// One of the most important aspects of the \b Blaze project is the
// <a href="https://bitbucket.org/blaze-lib/blaze/issues">issue management</a> on the official
// \b Blaze Bitbucket page. We cordially invite all \b Blaze users to submit feature requests
// and bug reports, as we believe that this is a significant part of making \b Blaze a better
// library. However, we are asking to follow a small set of guidelines when creating an issue
// to facilitate the issue management on our side and also to make issues more useful for users
// of \b Blaze.
//
//
// <hr>
// \section issues_title Title
//
// The title is the most important detail of an issue. A well chosen title makes it easy to grasp
// the idea of an issue and improves the discoverability. Therefore, please choose a title that
// is ...
//
// - ... as descriptive as possible;
// - ... as concise as possible;
// - ... as unambiguous as possible.
//
// Also, please create a separate issue for each idea/problem/etc. A very general title or an
// \"and\" in the title could be an indication that the issue is not specific enough and should
// be split into several issues.
//
// \subsection issues_title_good_examples Good Examples
//
// - \"Provide support for AVX-512 SIMD operations\"
// - \"Add support for the Boost Multiprecision Library\"
// - \"Introduce reduction operations into Blaze\"
// - \"Compilation error on KNL with -march=knl\"
//
// \subsection issues_title_bad_examples Bad Examples
//
// - \"Several requests\" (instead create separate issues for each single request)
// - \"Improve the performance\" (instead specify which operation should perform better)
// - \"Blaze library compilation error\" (instead try to be more specific)
//
//
// <hr>
// \section issues_description Description
//
// The description should help us to understand your idea or problem in as much detail as possible.
// Also, it helps to clearly spell out your expectations (how a feature is supposed to work, how
// the behavior should be, etc.). Please spend a couple of minutes to try to make the description
// as comprehensive as possible.
//
//
// <hr>
// \section issues_assignee Assignee
//
// There is no need to assign the issue to a particular person. It is perfectly ok if you just
// ignore this setting.
//
//
// <hr>
// \section issues_kind Kind of Issue
//
// There are four kinds of issues available in the Bitbucket issue tracker: \ref issues_kind_bug,
// \ref issues_kind_enhancement, \ref issues_kind_proposal, and \ref issues_kind_task. In the
// following we try to give guidelines on which kind to choose for a particular issue:
//
// \subsection issues_kind_bug Bug
//
// Please choose the category \ref issues_kind_bug if ...
//
// - ... you experience a compilation error despite your best efforts to get it right;
// - ... you experience a crash/failure despite your best efforts to get it right;
// - ... you experience problems when combining features;
// - ... a feature does not work as specified/documented (i.e. can be considered broken).
//
// Please \b don't choose the category \ref issues_kind_bug if ...
//
// - ... you feel a feature should work differently than it currently does (instead create a
// \ref issues_kind_proposal with a convincing title and description);
// - ... you are not sure how to use a feature (instead create an \ref issues_kind_enhancement
// issue to extend the documentation);
// - ... you are missing a feature (instead create a \ref issues_kind_proposal or
// \ref issues_kind_enhancement issue).
//
// If you select the category \ref issues_kind_bug, please also try to provide a minimum example
// that fails. That helps us to minimize the time to resolve the bug.
//
// As we try to keep \b Blaze bug-free, we will always prioritize bug issues. However, we will
// also quickly close bug issues as \"wontfix\" if the described issue is not a bug (i.e. one of
// the problems mentioned above). We will \b not relabel a bug issue to \ref issues_kind_enhancement
// or \ref issues_kind_proposal, even if they would be reasonable extensions to \b Blaze.
//
// \subsection issues_kind_enhancement Enhancement
//
// Please choose the category \ref issues_kind_enhancement if ...
//
// - ... you need an add-on to an existing feature;
// - ... you need an extension of an existing feature;
// - ... you need an extended documentation for an existing feature.
//
// \ref issues_kind_enhancement is very similar to \ref issues_kind_proposal, so we don't mind
// if an \ref issues_kind_enhancement is labeled as a \ref issues_kind_proposal or vice versa.
// Just make sure you don't request an extension or new feature as a \ref issues_kind_bug.
//
// \subsection issues_kind_proposal Proposal
//
// Please choose the category \ref issues_kind_proposal if ...
//
// - ... you want to request a new feature;
// - ... you want to change an existing feature.
//
// \ref issues_kind_proposal is very similar to \ref issues_kind_enhancement, so we don't mind if
// a \ref issues_kind_proposal is labeled as an \ref issues_kind_enhancement or vice versa. Just
// make sure you don't request an extension or new feature as a \ref issues_kind_bug.
//
// \subsection issues_kind_task Task
//
// Please choose the category \ref issues_kind_task if ...
//
// - ... you want us to do something not feature related;
// - ... you have something else in mind which does not fall in the other three categories.
//
//
// <hr>
// \section issues_priority Priority
//
// Via the priority of an issue you can tell us how important the issue is to you. Therefore the
// priority can have an influence on when we will deal with the issue. However, unfortunately we
// don't have an infinite amount of time and we can not deal with an arbitrary amount of issues
// at the same time. We will therefore take the priority into account, but mainly schedule the
// issues based on impact to all \b Blaze users and the estimated time to resolve it.
//
// You can choose between \ref issues_priority_blocker, \ref issues_priority_critical,
// \ref issues_priority_major, \ref issues_priority_minor, and \ref issues_priority_trivial.
//
// \subsection issues_priority_blocker Blocker
//
// Please choose a \ref issues_priority_blocker priority if ...
//
// - ... you cannot work with \b Blaze due to the described \ref issues_kind_bug;
// - ... the \ref issues_kind_bug likely has an influence on \b all \b Blaze users.
//
// Please note that the categories \ref issues_kind_enhancement or \ref issues_kind_proposal
// should never be a \ref issues_priority_blocker!
//
// \subsection issues_priority_critical Critical
//
// Please choose a \ref issues_priority_critical priority if ...
//
// - ... you can work around a \ref issues_kind_bug, but the workaround is (much) slower or awful;
// - ... you cannot use \b Blaze without the proposed feature;
// - ... you consider it to be essential for \b all \b Blaze users.
//
// \subsection issues_priority_major Major
//
// Please choose a \ref issues_priority_major priority if ...
//
// - ... a \ref issues_kind_bug or feature request is not \ref issues_priority_critical, but
// still very important to you;
// - ... you consider it to have a \ref issues_priority_major impact on most \b Blaze users.
//
// The \ref issues_priority_major category is the default setting in Bitbucket and we therefore
// consider it as the default priority for issues.
//
// \subsection issues_priority_minor Minor
//
// Please choose a \ref issues_priority_minor priority if ...
//
// - ... a \ref issues_kind_bug does not affect many \b Blaze users;
// - ... a feature request would only be useful for a small number of \b Blaze users;
// - ... a feature would be nice to have, but is not particularly important.
//
// \subsection issues_priority_trivial Trivial
//
// Please choose a \ref issues_priority_trivial priority if ...
//
// - ... a \ref issues_kind_bug hardly affects anyone;
// - ... a feature request would only be useful for very few \b Blaze users;
// - ... the expected time to resolve an issue is very small.
//
//
// <hr>
// \section issues_attachment Attachments
//
// You can always provide us with additional information in the form of attachments. Feel free
// to attach something to the issue if ...
//
// - ... it can help us to analyze a \ref issues_kind_bug;
// - ... you have some source code that demonstrates a problem;
// - ... you already have a working prototype that sketches the idea;
// - ... you have additional resources that could help us.
//
// We appreciate anything that simplifies our work and speeds up our progress.
//
// \n Previous: \ref faq Next: \ref blaze_references \n
*/
//*************************************************************************************************
//**Blaze References*******************************************************************************
/*!\page blaze_references Blaze References
//
// In case you need references to the \b Blaze library (for papers or other publications), please
// feel free to use one of the following references:
\code
@misc{blazelib,
author = "Klaus {Iglberger}",
title = "Blaze C++ Linear Algebra Library",
howpublished = "https://bitbucket.org/blaze-lib",
year = 2012
}
\endcode
\code
@article{iglberger2012_1,
author = "Klaus {Iglberger} and Georg {Hager} and Jan {Treibig} and Ulrich {R{\"u}de}",
title = "Expression Templates Revisited: A Performance Analysis of Current Methodologies",
journal = "SIAM Journal on Scientific Computing",
year = 2012,
volume = 34(2),
pages = C42--C69
}
\endcode
\code
@inproceedings{iglberger2012_2,
author = "Klaus {Iglberger} and Georg {Hager} and Jan {Treibig} and Ulrich {R{\"u}de}",
title = "High Performance Smart Expression Template Math Libraries",
booktitle = "Proceedings of the 2nd International Workshop on New Algorithms and Programming Models for the Manycore Era (APMM 2012) at HPCS 2012",
year = 2012
}
\endcode
// \n Previous: \ref issue_creation_guidelines
*/
//*************************************************************************************************
#endif
|
conv_dw_kernel_x86.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2020, OPEN AI LAB
* Author: qtang@openailab.com
*/
#include <stdint.h>
#include <stdlib.h>
#include <math.h>
#include "conv_dw_kernel_x86.h"
#if __SSE2__
#include <emmintrin.h>
#endif
#if __AVX__
#include <immintrin.h>
#endif
#define max(a, b) ((a) > (b) ? (a) : (b))
#define min(a, b) ((a) < (b) ? (a) : (b))
void relu(float* data, int size, int activation)
{
for (int i = 0; i < size; i++)
{
data[i] = max(data[i], ( float )0);
if (activation > 0)
{
data[i] = min(data[i], ( float )activation);
}
}
}
void pad(float* input, float* output, int in_h, int in_w, int out_h, int out_w, int top, int left, float v)
{
float* ptr = input;
float* outptr = output;
int y = 0;
// fill top
for (; y < top; y++)
{
int x = 0;
for (; x < out_w; x++)
{
outptr[x] = v;
}
outptr += out_w;
}
// fill center
for (; y < (top + in_h); y++)
{
int x = 0;
for (; x < left; x++)
{
outptr[x] = v;
}
if (in_w < 12)
{
for (; x < (left + in_w); x++)
{
outptr[x] = ptr[x - left];
}
}
else
{
memcpy(outptr + left, ptr, in_w * sizeof(float));
x += in_w;
}
for (; x < out_w; x++)
{
outptr[x] = v;
}
ptr += in_w;
outptr += out_w;
}
// fill bottom
for (; y < out_h; y++)
{
int x = 0;
for (; x < out_w; x++)
{
outptr[x] = v;
}
outptr += out_w;
}
}
#if __AVX__
static void convdw3x3s1(float* output, float* img_data, float* kernel_data, float* bias_data, int inc, int inh, int inw,
int outh, int outw, int num_thread)
{
int inwh = inw * inh;
int outwh = outw * outh;
int channel_count = inc >> 3;
int channel_remain = inc - (channel_count << 3);
// generate the image tmp
float* img_tmp = ( float* )sys_malloc(8 * inwh * (channel_count + 1) * sizeof(float));
float* kernel_tmp = ( float* )sys_malloc(8 * 9 * (channel_count + 1) * sizeof(float));
float* bias_tmp = ( float* )sys_malloc(8 * (channel_count + 1) * sizeof(float));
{
for (int i = 0; i < channel_count; i++)
{
int ii = i * 8;
const float* k0 = img_data + (ii + 0) * inwh;
const float* k1 = img_data + (ii + 1) * inwh;
const float* k2 = img_data + (ii + 2) * inwh;
const float* k3 = img_data + (ii + 3) * inwh;
const float* k4 = img_data + (ii + 4) * inwh;
const float* k5 = img_data + (ii + 5) * inwh;
const float* k6 = img_data + (ii + 6) * inwh;
const float* k7 = img_data + (ii + 7) * inwh;
const float* f0 = kernel_data + (ii + 0) * 9;
const float* f1 = kernel_data + (ii + 1) * 9;
const float* f2 = kernel_data + (ii + 2) * 9;
const float* f3 = kernel_data + (ii + 3) * 9;
const float* f4 = kernel_data + (ii + 4) * 9;
const float* f5 = kernel_data + (ii + 5) * 9;
const float* f6 = kernel_data + (ii + 6) * 9;
const float* f7 = kernel_data + (ii + 7) * 9;
const float* b0 = bias_data + (ii + 0);
const float* b1 = bias_data + (ii + 1);
const float* b2 = bias_data + (ii + 2);
const float* b3 = bias_data + (ii + 3);
const float* b4 = bias_data + (ii + 4);
const float* b5 = bias_data + (ii + 5);
const float* b6 = bias_data + (ii + 6);
const float* b7 = bias_data + (ii + 7);
float* tmp0 = img_tmp + ii * inwh;
float* tmp1 = kernel_tmp + ii * 9;
float* tmp2 = bias_tmp + ii;
for (int j = 0; j < inwh; j++)
{
tmp0[0] = k0[0];
tmp0[1] = k1[0];
tmp0[2] = k2[0];
tmp0[3] = k3[0];
tmp0[4] = k4[0];
tmp0[5] = k5[0];
tmp0[6] = k6[0];
tmp0[7] = k7[0];
tmp0 += 8;
k0++;
k1++;
k2++;
k3++;
k4++;
k5++;
k6++;
k7++;
}
for (int j = 0; j < 9; j++)
{
tmp1[0] = f0[0];
tmp1[1] = f1[0];
tmp1[2] = f2[0];
tmp1[3] = f3[0];
tmp1[4] = f4[0];
tmp1[5] = f5[0];
tmp1[6] = f6[0];
tmp1[7] = f7[0];
tmp1 += 8;
f0++;
f1++;
f2++;
f3++;
f4++;
f5++;
f6++;
f7++;
}
if (bias_data)
{
tmp2[0] = b0[0];
tmp2[1] = b1[0];
tmp2[2] = b2[0];
tmp2[3] = b3[0];
tmp2[4] = b4[0];
tmp2[5] = b5[0];
tmp2[6] = b6[0];
tmp2[7] = b7[0];
}
else
{
tmp2[0] = 0;
tmp2[1] = 0;
tmp2[2] = 0;
tmp2[3] = 0;
tmp2[4] = 0;
tmp2[5] = 0;
tmp2[6] = 0;
tmp2[7] = 0;
}
}
int i = 0;
for (; i + 3 < channel_remain; i += 4)
{
int ii = channel_count * 8 + i;
float* k0 = img_data + (ii + 0) * inwh;
float* k1 = img_data + (ii + 1) * inwh;
float* k2 = img_data + (ii + 2) * inwh;
float* k3 = img_data + (ii + 3) * inwh;
float* f0 = kernel_data + (ii + 0) * 9;
float* f1 = kernel_data + (ii + 1) * 9;
float* f2 = kernel_data + (ii + 2) * 9;
float* f3 = kernel_data + (ii + 3) * 9;
float* b0 = bias_data + (ii + 0);
float* b1 = bias_data + (ii + 1);
float* b2 = bias_data + (ii + 2);
float* b3 = bias_data + (ii + 3);
float* tmp0 = img_tmp + channel_count * 8 * inwh;
float* tmp1 = kernel_tmp + channel_count * 8 * 9;
float* tmp2 = bias_tmp + ii;
for (int j = 0; j < inwh; j++)
{
tmp0[0] = k0[0];
tmp0[1] = k1[0];
tmp0[2] = k2[0];
tmp0[3] = k3[0];
tmp0 += 8;
k0++;
k1++;
k2++;
k3++;
}
for (int j = 0; j < 9; j++)
{
tmp1[0] = f0[0];
tmp1[1] = f1[0];
tmp1[2] = f2[0];
tmp1[3] = f3[0];
tmp1 += 8;
f0++;
f1++;
f2++;
f3++;
}
if (bias_data)
{
tmp2[0] = b0[0];
tmp2[1] = b1[0];
tmp2[2] = b2[0];
tmp2[3] = b3[0];
}
else
{
tmp2[0] = 0;
tmp2[1] = 0;
tmp2[2] = 0;
tmp2[3] = 0;
}
}
for (; i < channel_remain; i++)
{
int ii = channel_count * 8 + i;
float* k0 = img_data + ii * inwh;
float* f0 = kernel_data + ii * 9;
float* b0 = bias_data + ii;
float* tmp0 = img_tmp + channel_count * 8 * inwh;
float* tmp1 = kernel_tmp + channel_count * 8 * 9;
float* tmp2 = bias_tmp + channel_count * 8;
for (int j = 0; j < inwh; j++)
{
tmp0[i] = k0[0];
tmp0 += 8;
k0++;
}
for (int j = 0; j < 9; j++)
{
tmp1[i] = f0[0];
tmp1 += 8;
f0++;
}
if (bias_data)
{
tmp2[i] = b0[0];
}
else
{
tmp2[i] = 0;
}
}
}
float* output_tmp = ( float* )sys_malloc(outwh * (channel_count + 1) * 8 * sizeof(float));
for (int c = 0; c < channel_count + 1; c++)
{
float* ktmp = kernel_tmp + c * 8 * 9;
float* btmp = bias_tmp + c * 8;
for (int i = 0; i < outh; i++)
{
int j = 0;
float* itmp0 = img_tmp + c * 8 * inwh + 8 * i * inw;
float* itmp1 = img_tmp + c * 8 * inwh + 8 * (i + 1) * inw;
float* itmp2 = img_tmp + c * 8 * inwh + 8 * (i + 2) * inw;
float* otmp = output_tmp + c * 8 * outwh + 8 * i * outw;
for (; j + 7 < outw; j += 8)
{
__m256 _sum0 = _mm256_loadu_ps(btmp);
__m256 _sum1 = _mm256_loadu_ps(btmp);
__m256 _sum2 = _mm256_loadu_ps(btmp);
__m256 _sum3 = _mm256_loadu_ps(btmp);
__m256 _sum4 = _mm256_loadu_ps(btmp);
__m256 _sum5 = _mm256_loadu_ps(btmp);
__m256 _sum6 = _mm256_loadu_ps(btmp);
__m256 _sum7 = _mm256_loadu_ps(btmp);
__m256 _va0 = _mm256_loadu_ps(itmp0);
__m256 _va1 = _mm256_loadu_ps(itmp0 + 8);
__m256 _va2 = _mm256_loadu_ps(itmp0 + 16);
__m256 _va3 = _mm256_loadu_ps(itmp0 + 24);
__m256 _va4 = _mm256_loadu_ps(itmp0 + 32);
__m256 _va5 = _mm256_loadu_ps(itmp0 + 40);
__m256 _va6 = _mm256_loadu_ps(itmp0 + 48);
__m256 _va7 = _mm256_loadu_ps(itmp0 + 56);
__m256 _va8 = _mm256_loadu_ps(itmp0 + 64);
__m256 _va9 = _mm256_loadu_ps(itmp0 + 72);
__m256 _vb0 = _mm256_loadu_ps(ktmp);
__m256 _vb1 = _mm256_loadu_ps(ktmp + 8);
__m256 _vb2 = _mm256_loadu_ps(ktmp + 16);
_sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0);
_sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1);
_sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0);
_sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1);
_sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0);
_sum2 = _mm256_fmadd_ps(_va2, _vb0, _sum2);
_sum3 = _mm256_fmadd_ps(_va3, _vb0, _sum3);
_sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1);
_sum2 = _mm256_fmadd_ps(_va3, _vb1, _sum2);
_sum3 = _mm256_fmadd_ps(_va4, _vb1, _sum3);
_sum4 = _mm256_fmadd_ps(_va4, _vb0, _sum4);
_sum2 = _mm256_fmadd_ps(_va4, _vb2, _sum2);
_sum3 = _mm256_fmadd_ps(_va5, _vb2, _sum3);
_sum5 = _mm256_fmadd_ps(_va5, _vb0, _sum5);
_sum4 = _mm256_fmadd_ps(_va5, _vb1, _sum4);
_sum5 = _mm256_fmadd_ps(_va6, _vb1, _sum5);
_sum4 = _mm256_fmadd_ps(_va6, _vb2, _sum4);
_sum6 = _mm256_fmadd_ps(_va6, _vb0, _sum6);
_sum7 = _mm256_fmadd_ps(_va7, _vb0, _sum7);
_sum5 = _mm256_fmadd_ps(_va7, _vb2, _sum5);
_sum6 = _mm256_fmadd_ps(_va7, _vb1, _sum6);
_sum7 = _mm256_fmadd_ps(_va8, _vb1, _sum7);
_sum6 = _mm256_fmadd_ps(_va8, _vb2, _sum6);
_sum7 = _mm256_fmadd_ps(_va9, _vb2, _sum7);
_va0 = _mm256_loadu_ps(itmp1);
_va1 = _mm256_loadu_ps(itmp1 + 8);
_va2 = _mm256_loadu_ps(itmp1 + 16);
_va3 = _mm256_loadu_ps(itmp1 + 24);
_va4 = _mm256_loadu_ps(itmp1 + 32);
_va5 = _mm256_loadu_ps(itmp1 + 40);
_va6 = _mm256_loadu_ps(itmp1 + 48);
_va7 = _mm256_loadu_ps(itmp1 + 56);
_va8 = _mm256_loadu_ps(itmp1 + 64);
_va9 = _mm256_loadu_ps(itmp1 + 72);
_vb0 = _mm256_loadu_ps(ktmp + 24);
_vb1 = _mm256_loadu_ps(ktmp + 32);
_vb2 = _mm256_loadu_ps(ktmp + 40);
_sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0);
_sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1);
_sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0);
_sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1);
_sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0);
_sum2 = _mm256_fmadd_ps(_va2, _vb0, _sum2);
_sum3 = _mm256_fmadd_ps(_va3, _vb0, _sum3);
_sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1);
_sum2 = _mm256_fmadd_ps(_va3, _vb1, _sum2);
_sum3 = _mm256_fmadd_ps(_va4, _vb1, _sum3);
_sum4 = _mm256_fmadd_ps(_va4, _vb0, _sum4);
_sum2 = _mm256_fmadd_ps(_va4, _vb2, _sum2);
_sum3 = _mm256_fmadd_ps(_va5, _vb2, _sum3);
_sum5 = _mm256_fmadd_ps(_va5, _vb0, _sum5);
_sum4 = _mm256_fmadd_ps(_va5, _vb1, _sum4);
_sum5 = _mm256_fmadd_ps(_va6, _vb1, _sum5);
_sum4 = _mm256_fmadd_ps(_va6, _vb2, _sum4);
_sum6 = _mm256_fmadd_ps(_va6, _vb0, _sum6);
_sum7 = _mm256_fmadd_ps(_va7, _vb0, _sum7);
_sum5 = _mm256_fmadd_ps(_va7, _vb2, _sum5);
_sum6 = _mm256_fmadd_ps(_va7, _vb1, _sum6);
_sum7 = _mm256_fmadd_ps(_va8, _vb1, _sum7);
_sum6 = _mm256_fmadd_ps(_va8, _vb2, _sum6);
_sum7 = _mm256_fmadd_ps(_va9, _vb2, _sum7);
_va0 = _mm256_loadu_ps(itmp2);
_va1 = _mm256_loadu_ps(itmp2 + 8);
_va2 = _mm256_loadu_ps(itmp2 + 16);
_va3 = _mm256_loadu_ps(itmp2 + 24);
_va4 = _mm256_loadu_ps(itmp2 + 32);
_va5 = _mm256_loadu_ps(itmp2 + 40);
_va6 = _mm256_loadu_ps(itmp2 + 48);
_va7 = _mm256_loadu_ps(itmp2 + 56);
_va8 = _mm256_loadu_ps(itmp2 + 64);
_va9 = _mm256_loadu_ps(itmp2 + 72);
_vb0 = _mm256_loadu_ps(ktmp + 48);
_vb1 = _mm256_loadu_ps(ktmp + 56);
_vb2 = _mm256_loadu_ps(ktmp + 64);
_sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0);
_sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1);
_sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0);
_sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1);
_sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0);
_sum2 = _mm256_fmadd_ps(_va2, _vb0, _sum2);
_sum3 = _mm256_fmadd_ps(_va3, _vb0, _sum3);
_sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1);
_sum2 = _mm256_fmadd_ps(_va3, _vb1, _sum2);
_sum3 = _mm256_fmadd_ps(_va4, _vb1, _sum3);
_sum4 = _mm256_fmadd_ps(_va4, _vb0, _sum4);
_sum2 = _mm256_fmadd_ps(_va4, _vb2, _sum2);
_sum3 = _mm256_fmadd_ps(_va5, _vb2, _sum3);
_sum5 = _mm256_fmadd_ps(_va5, _vb0, _sum5);
_sum4 = _mm256_fmadd_ps(_va5, _vb1, _sum4);
_sum5 = _mm256_fmadd_ps(_va6, _vb1, _sum5);
_sum4 = _mm256_fmadd_ps(_va6, _vb2, _sum4);
_sum6 = _mm256_fmadd_ps(_va6, _vb0, _sum6);
_sum7 = _mm256_fmadd_ps(_va7, _vb0, _sum7);
_sum5 = _mm256_fmadd_ps(_va7, _vb2, _sum5);
_sum6 = _mm256_fmadd_ps(_va7, _vb1, _sum6);
_sum7 = _mm256_fmadd_ps(_va8, _vb1, _sum7);
_sum6 = _mm256_fmadd_ps(_va8, _vb2, _sum6);
_sum7 = _mm256_fmadd_ps(_va9, _vb2, _sum7);
_mm256_storeu_ps(otmp, _sum0);
_mm256_storeu_ps(otmp + 8, _sum1);
_mm256_storeu_ps(otmp + 16, _sum2);
_mm256_storeu_ps(otmp + 24, _sum3);
_mm256_storeu_ps(otmp + 32, _sum4);
_mm256_storeu_ps(otmp + 40, _sum5);
_mm256_storeu_ps(otmp + 48, _sum6);
_mm256_storeu_ps(otmp + 56, _sum7);
itmp0 += 64;
itmp1 += 64;
itmp2 += 64;
otmp += 64;
}
for (; j + 3 < outw; j += 4)
{
__m256 _sum0 = _mm256_loadu_ps(btmp);
__m256 _sum1 = _mm256_loadu_ps(btmp);
__m256 _sum2 = _mm256_loadu_ps(btmp);
__m256 _sum3 = _mm256_loadu_ps(btmp);
__m256 _va0 = _mm256_loadu_ps(itmp0);
__m256 _va1 = _mm256_loadu_ps(itmp0 + 8);
__m256 _va2 = _mm256_loadu_ps(itmp0 + 16);
__m256 _va3 = _mm256_loadu_ps(itmp0 + 24);
__m256 _va4 = _mm256_loadu_ps(itmp0 + 32);
__m256 _va5 = _mm256_loadu_ps(itmp0 + 40);
__m256 _vb0 = _mm256_loadu_ps(ktmp);
__m256 _vb1 = _mm256_loadu_ps(ktmp + 8);
__m256 _vb2 = _mm256_loadu_ps(ktmp + 16);
_sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0);
_sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1);
_sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0);
_sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1);
_sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0);
_sum2 = _mm256_fmadd_ps(_va2, _vb0, _sum2);
_sum3 = _mm256_fmadd_ps(_va3, _vb0, _sum3);
_sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1);
_sum2 = _mm256_fmadd_ps(_va3, _vb1, _sum2);
_sum3 = _mm256_fmadd_ps(_va4, _vb1, _sum3);
_sum2 = _mm256_fmadd_ps(_va4, _vb2, _sum2);
_sum3 = _mm256_fmadd_ps(_va5, _vb2, _sum3);
_va0 = _mm256_loadu_ps(itmp1);
_va1 = _mm256_loadu_ps(itmp1 + 8);
_va2 = _mm256_loadu_ps(itmp1 + 16);
_va3 = _mm256_loadu_ps(itmp1 + 24);
_va4 = _mm256_loadu_ps(itmp1 + 32);
_va5 = _mm256_loadu_ps(itmp1 + 40);
_vb0 = _mm256_loadu_ps(ktmp + 24);
_vb1 = _mm256_loadu_ps(ktmp + 32);
_vb2 = _mm256_loadu_ps(ktmp + 40);
_sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0);
_sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1);
_sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0);
_sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1);
_sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0);
_sum2 = _mm256_fmadd_ps(_va2, _vb0, _sum2);
_sum3 = _mm256_fmadd_ps(_va3, _vb0, _sum3);
_sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1);
_sum2 = _mm256_fmadd_ps(_va3, _vb1, _sum2);
_sum3 = _mm256_fmadd_ps(_va4, _vb1, _sum3);
_sum2 = _mm256_fmadd_ps(_va4, _vb2, _sum2);
_sum3 = _mm256_fmadd_ps(_va5, _vb2, _sum3);
_va0 = _mm256_loadu_ps(itmp2);
_va1 = _mm256_loadu_ps(itmp2 + 8);
_va2 = _mm256_loadu_ps(itmp2 + 16);
_va3 = _mm256_loadu_ps(itmp2 + 24);
_va4 = _mm256_loadu_ps(itmp2 + 32);
_va5 = _mm256_loadu_ps(itmp2 + 40);
_vb0 = _mm256_loadu_ps(ktmp + 48);
_vb1 = _mm256_loadu_ps(ktmp + 56);
_vb2 = _mm256_loadu_ps(ktmp + 64);
_sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0);
_sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1);
_sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0);
_sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1);
_sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0);
_sum2 = _mm256_fmadd_ps(_va2, _vb0, _sum2);
_sum3 = _mm256_fmadd_ps(_va3, _vb0, _sum3);
_sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1);
_sum2 = _mm256_fmadd_ps(_va3, _vb1, _sum2);
_sum3 = _mm256_fmadd_ps(_va4, _vb1, _sum3);
_sum2 = _mm256_fmadd_ps(_va4, _vb2, _sum2);
_sum3 = _mm256_fmadd_ps(_va5, _vb2, _sum3);
_mm256_storeu_ps(otmp, _sum0);
_mm256_storeu_ps(otmp + 8, _sum1);
_mm256_storeu_ps(otmp + 16, _sum2);
_mm256_storeu_ps(otmp + 24, _sum3);
itmp0 += 32;
itmp1 += 32;
itmp2 += 32;
otmp += 32;
}
for (; j + 1 < outw; j += 2)
{
__m256 _sum0 = _mm256_loadu_ps(btmp);
__m256 _sum1 = _mm256_loadu_ps(btmp);
__m256 _va0 = _mm256_loadu_ps(itmp0);
__m256 _va1 = _mm256_loadu_ps(itmp0 + 8);
__m256 _va2 = _mm256_loadu_ps(itmp0 + 16);
__m256 _va3 = _mm256_loadu_ps(itmp0 + 24);
__m256 _vb0 = _mm256_loadu_ps(ktmp);
__m256 _vb1 = _mm256_loadu_ps(ktmp + 8);
__m256 _vb2 = _mm256_loadu_ps(ktmp + 16);
_sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0);
_sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1);
_sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0);
_sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1);
_sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0);
_sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1);
_va0 = _mm256_loadu_ps(itmp1);
_va1 = _mm256_loadu_ps(itmp1 + 8);
_va2 = _mm256_loadu_ps(itmp1 + 16);
_va3 = _mm256_loadu_ps(itmp1 + 24);
_vb0 = _mm256_loadu_ps(ktmp + 24);
_vb1 = _mm256_loadu_ps(ktmp + 32);
_vb2 = _mm256_loadu_ps(ktmp + 40);
_sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0);
_sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1);
_sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0);
_sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1);
_sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0);
_sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1);
_va0 = _mm256_loadu_ps(itmp2);
_va1 = _mm256_loadu_ps(itmp2 + 8);
_va2 = _mm256_loadu_ps(itmp2 + 16);
_va3 = _mm256_loadu_ps(itmp2 + 24);
_vb0 = _mm256_loadu_ps(ktmp + 48);
_vb1 = _mm256_loadu_ps(ktmp + 56);
_vb2 = _mm256_loadu_ps(ktmp + 64);
_sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0);
_sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1);
_sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0);
_sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1);
_sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0);
_sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1);
_mm256_storeu_ps(otmp, _sum0);
_mm256_storeu_ps(otmp + 8, _sum1);
itmp0 += 16;
itmp1 += 16;
itmp2 += 16;
otmp += 16;
}
for (; j < outw; j++)
{
__m256 _sum0 = _mm256_loadu_ps(btmp);
__m256 _va0 = _mm256_loadu_ps(itmp0);
__m256 _va1 = _mm256_loadu_ps(itmp0 + 8);
__m256 _va2 = _mm256_loadu_ps(itmp0 + 16);
__m256 _vb0 = _mm256_loadu_ps(ktmp);
__m256 _vb1 = _mm256_loadu_ps(ktmp + 8);
__m256 _vb2 = _mm256_loadu_ps(ktmp + 16);
_sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0);
_sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0);
_sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0);
_va0 = _mm256_loadu_ps(itmp1);
_va1 = _mm256_loadu_ps(itmp1 + 8);
_va2 = _mm256_loadu_ps(itmp1 + 16);
_vb0 = _mm256_loadu_ps(ktmp + 24);
_vb1 = _mm256_loadu_ps(ktmp + 32);
_vb2 = _mm256_loadu_ps(ktmp + 40);
_sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0);
_sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0);
_sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0);
_va0 = _mm256_loadu_ps(itmp2);
_va1 = _mm256_loadu_ps(itmp2 + 8);
_va2 = _mm256_loadu_ps(itmp2 + 16);
_vb0 = _mm256_loadu_ps(ktmp + 48);
_vb1 = _mm256_loadu_ps(ktmp + 56);
_vb2 = _mm256_loadu_ps(ktmp + 64);
_sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0);
_sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0);
_sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0);
_mm256_storeu_ps(otmp, _sum0);
itmp0 += 8;
itmp1 += 8;
itmp2 += 8;
otmp += 8;
}
}
}
// load_data
{
for (int i = 0; i < channel_count; i++)
{
float* otmp = output_tmp + i * 8 * outwh;
float* tmp0 = output + i * 8 * outwh;
float* tmp1 = output + i * 8 * outwh + 1 * outwh;
float* tmp2 = output + i * 8 * outwh + 2 * outwh;
float* tmp3 = output + i * 8 * outwh + 3 * outwh;
float* tmp4 = output + i * 8 * outwh + 4 * outwh;
float* tmp5 = output + i * 8 * outwh + 5 * outwh;
float* tmp6 = output + i * 8 * outwh + 6 * outwh;
float* tmp7 = output + i * 8 * outwh + 7 * outwh;
for (int i = 0; i < outwh; i++)
{
tmp0[0] = otmp[0];
tmp1[0] = otmp[1];
tmp2[0] = otmp[2];
tmp3[0] = otmp[3];
tmp4[0] = otmp[4];
tmp5[0] = otmp[5];
tmp6[0] = otmp[6];
tmp7[0] = otmp[7];
otmp += 8;
tmp0++;
tmp1++;
tmp2++;
tmp3++;
tmp4++;
tmp5++;
tmp6++;
tmp7++;
}
}
int i = 0;
for (; i + 3 < channel_remain; i += 4)
{
int ii = channel_count * 8 + i;
float* otmp = output_tmp + ii * outwh;
float* tmp0 = output + ii * outwh;
float* tmp1 = output + ii * outwh + 1 * outwh;
float* tmp2 = output + ii * outwh + 2 * outwh;
float* tmp3 = output + ii * outwh + 3 * outwh;
for (int j = 0; j < outwh; j++)
{
tmp0[0] = otmp[0];
tmp1[0] = otmp[1];
tmp2[0] = otmp[2];
tmp3[0] = otmp[3];
otmp += 8;
tmp0++;
tmp1++;
tmp2++;
tmp3++;
}
}
for (; i < channel_remain; i++)
{
int ii = channel_count * 8 + i;
float* otmp = output_tmp + channel_count * 8 * outwh;
float* tmp0 = output + ii * outwh;
for (int j = 0; j < outwh; j++)
{
tmp0[0] = otmp[i];
otmp += 8;
tmp0++;
}
}
}
sys_free(output_tmp);
sys_free(img_tmp);
sys_free(kernel_tmp);
sys_free(bias_tmp);
}
static void convdw3x3s2(float* output, float* img_data, float* kernel_data, float* bias_data, int inc, int inh, int inw,
int outh, int outw, int num_thread)
{
int inwh = inw * inh;
int outwh = outw * outh;
int channel_count = inc >> 3;
int channel_remain = inc - (channel_count << 3);
// generate the image tmp
float* img_tmp = ( float* )sys_malloc(8 * inwh * (channel_count + 1) * sizeof(float));
float* kernel_tmp = ( float* )sys_malloc(8 * 9 * (channel_count + 1) * sizeof(float));
float* bias_tmp = ( float* )sys_malloc(8 * (channel_count + 1) * sizeof(float));
{
for (int i = 0; i < channel_count; i++)
{
int ii = i * 8;
const float* k0 = img_data + (ii + 0) * inwh;
const float* k1 = img_data + (ii + 1) * inwh;
const float* k2 = img_data + (ii + 2) * inwh;
const float* k3 = img_data + (ii + 3) * inwh;
const float* k4 = img_data + (ii + 4) * inwh;
const float* k5 = img_data + (ii + 5) * inwh;
const float* k6 = img_data + (ii + 6) * inwh;
const float* k7 = img_data + (ii + 7) * inwh;
const float* f0 = kernel_data + (ii + 0) * 9;
const float* f1 = kernel_data + (ii + 1) * 9;
const float* f2 = kernel_data + (ii + 2) * 9;
const float* f3 = kernel_data + (ii + 3) * 9;
const float* f4 = kernel_data + (ii + 4) * 9;
const float* f5 = kernel_data + (ii + 5) * 9;
const float* f6 = kernel_data + (ii + 6) * 9;
const float* f7 = kernel_data + (ii + 7) * 9;
const float* b0 = bias_data + (ii + 0);
const float* b1 = bias_data + (ii + 1);
const float* b2 = bias_data + (ii + 2);
const float* b3 = bias_data + (ii + 3);
const float* b4 = bias_data + (ii + 4);
const float* b5 = bias_data + (ii + 5);
const float* b6 = bias_data + (ii + 6);
const float* b7 = bias_data + (ii + 7);
float* tmp0 = img_tmp + ii * inwh;
float* tmp1 = kernel_tmp + ii * 9;
float* tmp2 = bias_tmp + ii;
for (int j = 0; j < inwh; j++)
{
tmp0[0] = k0[0];
tmp0[1] = k1[0];
tmp0[2] = k2[0];
tmp0[3] = k3[0];
tmp0[4] = k4[0];
tmp0[5] = k5[0];
tmp0[6] = k6[0];
tmp0[7] = k7[0];
tmp0 += 8;
k0++;
k1++;
k2++;
k3++;
k4++;
k5++;
k6++;
k7++;
}
for (int j = 0; j < 9; j++)
{
tmp1[0] = f0[0];
tmp1[1] = f1[0];
tmp1[2] = f2[0];
tmp1[3] = f3[0];
tmp1[4] = f4[0];
tmp1[5] = f5[0];
tmp1[6] = f6[0];
tmp1[7] = f7[0];
tmp1 += 8;
f0++;
f1++;
f2++;
f3++;
f4++;
f5++;
f6++;
f7++;
}
if (bias_data)
{
tmp2[0] = b0[0];
tmp2[1] = b1[0];
tmp2[2] = b2[0];
tmp2[3] = b3[0];
tmp2[4] = b4[0];
tmp2[5] = b5[0];
tmp2[6] = b6[0];
tmp2[7] = b7[0];
}
else
{
tmp2[0] = 0;
tmp2[1] = 0;
tmp2[2] = 0;
tmp2[3] = 0;
tmp2[4] = 0;
tmp2[5] = 0;
tmp2[6] = 0;
tmp2[7] = 0;
}
}
int i = 0;
for (; i + 3 < channel_remain; i += 4)
{
int ii = channel_count * 8 + i;
float* k0 = img_data + (ii + 0) * inwh;
float* k1 = img_data + (ii + 1) * inwh;
float* k2 = img_data + (ii + 2) * inwh;
float* k3 = img_data + (ii + 3) * inwh;
float* f0 = kernel_data + (ii + 0) * 9;
float* f1 = kernel_data + (ii + 1) * 9;
float* f2 = kernel_data + (ii + 2) * 9;
float* f3 = kernel_data + (ii + 3) * 9;
float* b0 = bias_data + (ii + 0);
float* b1 = bias_data + (ii + 1);
float* b2 = bias_data + (ii + 2);
float* b3 = bias_data + (ii + 3);
float* tmp0 = img_tmp + channel_count * 8 * inwh;
float* tmp1 = kernel_tmp + channel_count * 8 * 9;
float* tmp2 = bias_tmp + ii;
for (int j = 0; j < inwh; j++)
{
tmp0[0] = k0[0];
tmp0[1] = k1[0];
tmp0[2] = k2[0];
tmp0[3] = k3[0];
tmp0 += 8;
k0++;
k1++;
k2++;
k3++;
}
for (int j = 0; j < 9; j++)
{
tmp1[0] = f0[0];
tmp1[1] = f1[0];
tmp1[2] = f2[0];
tmp1[3] = f3[0];
tmp1 += 8;
f0++;
f1++;
f2++;
f3++;
}
if (bias_data)
{
tmp2[0] = b0[0];
tmp2[1] = b1[0];
tmp2[2] = b2[0];
tmp2[3] = b3[0];
}
else
{
tmp2[0] = 0;
tmp2[1] = 0;
tmp2[2] = 0;
tmp2[3] = 0;
}
}
for (; i < channel_remain; i++)
{
int ii = channel_count * 8 + i;
float* k0 = img_data + ii * inwh;
float* f0 = kernel_data + ii * 9;
float* b0 = bias_data + ii;
float* tmp0 = img_tmp + channel_count * 8 * inwh;
float* tmp1 = kernel_tmp + channel_count * 8 * 9;
float* tmp2 = bias_tmp + channel_count * 8;
for (int j = 0; j < inwh; j++)
{
tmp0[i] = k0[0];
tmp0 += 8;
k0++;
}
for (int j = 0; j < 9; j++)
{
tmp1[i] = f0[0];
tmp1 += 8;
f0++;
}
if (bias_data)
{
tmp2[i] = b0[0];
}
else
{
tmp2[i] = 0;
}
}
}
float* output_tmp = ( float* )sys_malloc(outwh * (channel_count + 1) * 8 * sizeof(float));
for (int c = 0; c < channel_count + 1; c++)
{
float* ktmp = kernel_tmp + c * 8 * 9;
float* btmp = bias_tmp + c * 8;
for (int i = 0; i < outh; i++)
{
int j = 0;
float* itmp0 = img_tmp + c * 8 * inwh + 8 * i * 2 * inw;
float* itmp1 = img_tmp + c * 8 * inwh + 8 * (i * 2 + 1) * inw;
float* itmp2 = img_tmp + c * 8 * inwh + 8 * (i * 2 + 2) * inw;
float* otmp = output_tmp + c * 8 * outwh + 8 * i * outw;
for (; j + 3 < outw; j += 4)
{
__m256 _sum0 = _mm256_loadu_ps(btmp);
__m256 _sum1 = _mm256_loadu_ps(btmp);
__m256 _sum2 = _mm256_loadu_ps(btmp);
__m256 _sum3 = _mm256_loadu_ps(btmp);
__m256 _va0 = _mm256_loadu_ps(itmp0);
__m256 _va1 = _mm256_loadu_ps(itmp0 + 8);
__m256 _va2 = _mm256_loadu_ps(itmp0 + 16);
__m256 _va3 = _mm256_loadu_ps(itmp0 + 24);
__m256 _va4 = _mm256_loadu_ps(itmp0 + 32);
__m256 _va5 = _mm256_loadu_ps(itmp0 + 40);
__m256 _va6 = _mm256_loadu_ps(itmp0 + 48);
__m256 _va7 = _mm256_loadu_ps(itmp0 + 56);
__m256 _va8 = _mm256_loadu_ps(itmp0 + 64);
__m256 _vb0 = _mm256_loadu_ps(ktmp);
__m256 _vb1 = _mm256_loadu_ps(ktmp + 8);
__m256 _vb2 = _mm256_loadu_ps(ktmp + 16);
_sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0);
_sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0);
_sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0);
_sum1 = _mm256_fmadd_ps(_va2, _vb0, _sum1);
_sum1 = _mm256_fmadd_ps(_va3, _vb1, _sum1);
_sum1 = _mm256_fmadd_ps(_va4, _vb2, _sum1);
_sum2 = _mm256_fmadd_ps(_va4, _vb0, _sum2);
_sum2 = _mm256_fmadd_ps(_va5, _vb1, _sum2);
_sum2 = _mm256_fmadd_ps(_va6, _vb2, _sum2);
_sum3 = _mm256_fmadd_ps(_va6, _vb0, _sum3);
_sum3 = _mm256_fmadd_ps(_va7, _vb1, _sum3);
_sum3 = _mm256_fmadd_ps(_va8, _vb2, _sum3);
_va0 = _mm256_loadu_ps(itmp1);
_va1 = _mm256_loadu_ps(itmp1 + 8);
_va2 = _mm256_loadu_ps(itmp1 + 16);
_va3 = _mm256_loadu_ps(itmp1 + 24);
_va4 = _mm256_loadu_ps(itmp1 + 32);
_va5 = _mm256_loadu_ps(itmp1 + 40);
_va6 = _mm256_loadu_ps(itmp1 + 48);
_va7 = _mm256_loadu_ps(itmp1 + 56);
_va8 = _mm256_loadu_ps(itmp1 + 64);
_vb0 = _mm256_loadu_ps(ktmp + 24);
_vb1 = _mm256_loadu_ps(ktmp + 32);
_vb2 = _mm256_loadu_ps(ktmp + 40);
_sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0);
_sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0);
_sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0);
_sum1 = _mm256_fmadd_ps(_va2, _vb0, _sum1);
_sum1 = _mm256_fmadd_ps(_va3, _vb1, _sum1);
_sum1 = _mm256_fmadd_ps(_va4, _vb2, _sum1);
_sum2 = _mm256_fmadd_ps(_va4, _vb0, _sum2);
_sum2 = _mm256_fmadd_ps(_va5, _vb1, _sum2);
_sum2 = _mm256_fmadd_ps(_va6, _vb2, _sum2);
_sum3 = _mm256_fmadd_ps(_va6, _vb0, _sum3);
_sum3 = _mm256_fmadd_ps(_va7, _vb1, _sum3);
_sum3 = _mm256_fmadd_ps(_va8, _vb2, _sum3);
_va0 = _mm256_loadu_ps(itmp2);
_va1 = _mm256_loadu_ps(itmp2 + 8);
_va2 = _mm256_loadu_ps(itmp2 + 16);
_va3 = _mm256_loadu_ps(itmp2 + 24);
_va4 = _mm256_loadu_ps(itmp2 + 32);
_va5 = _mm256_loadu_ps(itmp2 + 40);
_va6 = _mm256_loadu_ps(itmp2 + 48);
_va7 = _mm256_loadu_ps(itmp2 + 56);
_va8 = _mm256_loadu_ps(itmp2 + 64);
_vb0 = _mm256_loadu_ps(ktmp + 48);
_vb1 = _mm256_loadu_ps(ktmp + 56);
_vb2 = _mm256_loadu_ps(ktmp + 64);
_sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0);
_sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0);
_sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0);
_sum1 = _mm256_fmadd_ps(_va2, _vb0, _sum1);
_sum1 = _mm256_fmadd_ps(_va3, _vb1, _sum1);
_sum1 = _mm256_fmadd_ps(_va4, _vb2, _sum1);
_sum2 = _mm256_fmadd_ps(_va4, _vb0, _sum2);
_sum2 = _mm256_fmadd_ps(_va5, _vb1, _sum2);
_sum2 = _mm256_fmadd_ps(_va6, _vb2, _sum2);
_sum3 = _mm256_fmadd_ps(_va6, _vb0, _sum3);
_sum3 = _mm256_fmadd_ps(_va7, _vb1, _sum3);
_sum3 = _mm256_fmadd_ps(_va8, _vb2, _sum3);
_mm256_storeu_ps(otmp, _sum0);
_mm256_storeu_ps(otmp + 8, _sum1);
_mm256_storeu_ps(otmp + 16, _sum2);
_mm256_storeu_ps(otmp + 24, _sum3);
itmp0 += 64;
itmp1 += 64;
itmp2 += 64;
otmp += 32;
}
for (; j + 1 < outw; j += 2)
{
__m256 _sum0 = _mm256_loadu_ps(btmp);
__m256 _sum1 = _mm256_loadu_ps(btmp);
__m256 _va0 = _mm256_loadu_ps(itmp0);
__m256 _va1 = _mm256_loadu_ps(itmp0 + 8);
__m256 _va2 = _mm256_loadu_ps(itmp0 + 16);
__m256 _va3 = _mm256_loadu_ps(itmp0 + 24);
__m256 _va4 = _mm256_loadu_ps(itmp0 + 32);
__m256 _vb0 = _mm256_loadu_ps(ktmp);
__m256 _vb1 = _mm256_loadu_ps(ktmp + 8);
__m256 _vb2 = _mm256_loadu_ps(ktmp + 16);
_sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0);
_sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0);
_sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0);
_sum1 = _mm256_fmadd_ps(_va2, _vb0, _sum1);
_sum1 = _mm256_fmadd_ps(_va3, _vb1, _sum1);
_sum1 = _mm256_fmadd_ps(_va4, _vb2, _sum1);
_va0 = _mm256_loadu_ps(itmp1);
_va1 = _mm256_loadu_ps(itmp1 + 8);
_va2 = _mm256_loadu_ps(itmp1 + 16);
_va3 = _mm256_loadu_ps(itmp1 + 24);
_va4 = _mm256_loadu_ps(itmp1 + 32);
_vb0 = _mm256_loadu_ps(ktmp + 24);
_vb1 = _mm256_loadu_ps(ktmp + 32);
_vb2 = _mm256_loadu_ps(ktmp + 40);
_sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0);
_sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0);
_sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0);
_sum1 = _mm256_fmadd_ps(_va2, _vb0, _sum1);
_sum1 = _mm256_fmadd_ps(_va3, _vb1, _sum1);
_sum1 = _mm256_fmadd_ps(_va4, _vb2, _sum1);
_va0 = _mm256_loadu_ps(itmp2);
_va1 = _mm256_loadu_ps(itmp2 + 8);
_va2 = _mm256_loadu_ps(itmp2 + 16);
_va3 = _mm256_loadu_ps(itmp2 + 24);
_va4 = _mm256_loadu_ps(itmp2 + 32);
_vb0 = _mm256_loadu_ps(ktmp + 48);
_vb1 = _mm256_loadu_ps(ktmp + 56);
_vb2 = _mm256_loadu_ps(ktmp + 64);
_sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0);
_sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0);
_sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0);
_sum1 = _mm256_fmadd_ps(_va2, _vb0, _sum1);
_sum1 = _mm256_fmadd_ps(_va3, _vb1, _sum1);
_sum1 = _mm256_fmadd_ps(_va4, _vb2, _sum1);
_mm256_storeu_ps(otmp, _sum0);
_mm256_storeu_ps(otmp + 8, _sum1);
itmp0 += 32;
itmp1 += 32;
itmp2 += 32;
otmp += 16;
}
for (; j < outw; j++)
{
__m256 _sum0 = _mm256_loadu_ps(btmp);
__m256 _va0 = _mm256_loadu_ps(itmp0);
__m256 _va1 = _mm256_loadu_ps(itmp0 + 8);
__m256 _va2 = _mm256_loadu_ps(itmp0 + 16);
__m256 _vb0 = _mm256_loadu_ps(ktmp);
__m256 _vb1 = _mm256_loadu_ps(ktmp + 8);
__m256 _vb2 = _mm256_loadu_ps(ktmp + 16);
_sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0);
_sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0);
_sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0);
_va0 = _mm256_loadu_ps(itmp1);
_va1 = _mm256_loadu_ps(itmp1 + 8);
_va2 = _mm256_loadu_ps(itmp1 + 16);
_vb0 = _mm256_loadu_ps(ktmp + 24);
_vb1 = _mm256_loadu_ps(ktmp + 32);
_vb2 = _mm256_loadu_ps(ktmp + 40);
_sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0);
_sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0);
_sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0);
_va0 = _mm256_loadu_ps(itmp2);
_va1 = _mm256_loadu_ps(itmp2 + 8);
_va2 = _mm256_loadu_ps(itmp2 + 16);
_vb0 = _mm256_loadu_ps(ktmp + 48);
_vb1 = _mm256_loadu_ps(ktmp + 56);
_vb2 = _mm256_loadu_ps(ktmp + 64);
_sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0);
_sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0);
_sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0);
_mm256_storeu_ps(otmp, _sum0);
itmp0 += 16;
itmp1 += 16;
itmp2 += 16;
otmp += 8;
}
}
}
// load_data
{
for (int i = 0; i < channel_count; i++)
{
float* otmp = output_tmp + i * 8 * outwh;
float* tmp0 = output + i * 8 * outwh;
float* tmp1 = output + i * 8 * outwh + 1 * outwh;
float* tmp2 = output + i * 8 * outwh + 2 * outwh;
float* tmp3 = output + i * 8 * outwh + 3 * outwh;
float* tmp4 = output + i * 8 * outwh + 4 * outwh;
float* tmp5 = output + i * 8 * outwh + 5 * outwh;
float* tmp6 = output + i * 8 * outwh + 6 * outwh;
float* tmp7 = output + i * 8 * outwh + 7 * outwh;
for (int i = 0; i < outwh; i++)
{
tmp0[0] = otmp[0];
tmp1[0] = otmp[1];
tmp2[0] = otmp[2];
tmp3[0] = otmp[3];
tmp4[0] = otmp[4];
tmp5[0] = otmp[5];
tmp6[0] = otmp[6];
tmp7[0] = otmp[7];
otmp += 8;
tmp0++;
tmp1++;
tmp2++;
tmp3++;
tmp4++;
tmp5++;
tmp6++;
tmp7++;
}
}
int i = 0;
for (; i + 3 < channel_remain; i += 4)
{
int ii = channel_count * 8 + i;
float* otmp = output_tmp + ii * outwh;
float* tmp0 = output + ii * outwh;
float* tmp1 = output + ii * outwh + 1 * outwh;
float* tmp2 = output + ii * outwh + 2 * outwh;
float* tmp3 = output + ii * outwh + 3 * outwh;
for (int j = 0; j < outwh; j++)
{
tmp0[0] = otmp[0];
tmp1[0] = otmp[1];
tmp2[0] = otmp[2];
tmp3[0] = otmp[3];
otmp += 8;
tmp0++;
tmp1++;
tmp2++;
tmp3++;
}
}
for (; i < channel_remain; i++)
{
int ii = channel_count * 8 + i;
float* otmp = output_tmp + channel_count * 8 * outwh;
float* tmp0 = output + ii * outwh;
for (int j = 0; j < outwh; j++)
{
tmp0[0] = otmp[i];
otmp += 8;
tmp0++;
}
}
}
sys_free(output_tmp);
sys_free(img_tmp);
sys_free(kernel_tmp);
sys_free(bias_tmp);
}
#elif __SSE2__
static void convdw3x3s1(float* output, float* img_data, float* kernel_data, float* bias_data, int inc, int inh, int inw,
int outh, int outw, int num_thread)
{
int inwh = inw * inh;
int outwh = outw * outh;
int channel_count = inc >> 2;
int channel_remain = inc - (channel_count << 2);
// generate the image tmp
float* img_tmp = ( float* )sys_malloc(4 * inwh * (channel_count + 1) * sizeof(float));
float* kernel_tmp = ( float* )sys_malloc(4 * 9 * (channel_count + 1) * sizeof(float));
float* bias_tmp = ( float* )sys_malloc(4 * (channel_count + 1) * sizeof(float));
{
for (int i = 0; i < channel_count; i++)
{
int ii = i * 4;
float* k0 = img_data + (ii + 0) * inwh;
float* k1 = img_data + (ii + 1) * inwh;
float* k2 = img_data + (ii + 2) * inwh;
float* k3 = img_data + (ii + 3) * inwh;
float* f0 = kernel_data + (ii + 0) * 9;
float* f1 = kernel_data + (ii + 1) * 9;
float* f2 = kernel_data + (ii + 2) * 9;
float* f3 = kernel_data + (ii + 3) * 9;
float* b0 = bias_data + (ii + 0);
float* b1 = bias_data + (ii + 1);
float* b2 = bias_data + (ii + 2);
float* b3 = bias_data + (ii + 3);
float* tmp0 = img_tmp + ii * inwh;
float* tmp1 = kernel_tmp + ii * 9;
float* tmp2 = bias_tmp + ii;
for (int j = 0; j < inwh; j++)
{
tmp0[0] = k0[0];
tmp0[1] = k1[0];
tmp0[2] = k2[0];
tmp0[3] = k3[0];
tmp0 += 4;
k0++;
k1++;
k2++;
k3++;
}
for (int j = 0; j < 9; j++)
{
tmp1[0] = f0[0];
tmp1[1] = f1[0];
tmp1[2] = f2[0];
tmp1[3] = f3[0];
tmp1 += 4;
f0++;
f1++;
f2++;
f3++;
}
if (bias_data)
{
tmp2[0] = b0[0];
tmp2[1] = b1[0];
tmp2[2] = b2[0];
tmp2[3] = b3[0];
}
else
{
tmp2[0] = 0;
tmp2[1] = 0;
tmp2[2] = 0;
tmp2[3] = 0;
}
}
for (int i = 0; i < channel_remain; i++)
{
int ii = channel_count * 4 + i;
float* k0 = img_data + ii * inwh;
float* f0 = kernel_data + ii * 9;
float* b0 = bias_data + ii;
float* tmp0 = img_tmp + channel_count * 4 * inwh;
float* tmp1 = kernel_tmp + channel_count * 4 * 9;
float* tmp2 = bias_tmp + channel_count * 4;
for (int j = 0; j < inwh; j++)
{
tmp0[i] = k0[0];
tmp0 += 4;
k0++;
}
for (int j = 0; j < 9; j++)
{
tmp1[i] = f0[0];
tmp1 += 4;
f0++;
}
if (bias_data)
{
tmp2[i] = b0[0];
}
else
{
tmp2[i] = 0;
}
}
}
float* output_tmp = ( float* )sys_malloc(outwh * 4 * (channel_count + 1) * sizeof(float));
for (int c = 0; c < channel_count + 1; c++)
{
float* ktmp = kernel_tmp + c * 4 * 9;
float* btmp = bias_tmp + c * 4;
for (int i = 0; i < outh; i++)
{
int j = 0;
float* itmp0 = img_tmp + c * 4 * inwh + 4 * i * inw;
float* itmp1 = img_tmp + c * 4 * inwh + 4 * (i + 1) * inw;
float* itmp2 = img_tmp + c * 4 * inwh + 4 * (i + 2) * inw;
float* otmp = output_tmp + c * 4 * outwh + 4 * i * outw;
for (; j + 7 < outw; j += 8)
{
#if __SSE__
__m128 _sum0 = _mm_loadu_ps(btmp);
__m128 _sum1 = _mm_loadu_ps(btmp);
__m128 _sum2 = _mm_loadu_ps(btmp);
__m128 _sum3 = _mm_loadu_ps(btmp);
__m128 _sum4 = _mm_loadu_ps(btmp);
__m128 _sum5 = _mm_loadu_ps(btmp);
__m128 _sum6 = _mm_loadu_ps(btmp);
__m128 _sum7 = _mm_loadu_ps(btmp);
__m128 _va0 = _mm_loadu_ps(itmp0);
__m128 _va1 = _mm_loadu_ps(itmp0 + 4);
__m128 _va2 = _mm_loadu_ps(itmp0 + 8);
__m128 _va3 = _mm_loadu_ps(itmp0 + 12);
__m128 _va4 = _mm_loadu_ps(itmp0 + 16);
__m128 _va5 = _mm_loadu_ps(itmp0 + 20);
__m128 _va6 = _mm_loadu_ps(itmp0 + 24);
__m128 _va7 = _mm_loadu_ps(itmp0 + 28);
__m128 _va8 = _mm_loadu_ps(itmp0 + 32);
__m128 _va9 = _mm_loadu_ps(itmp0 + 36);
__m128 _vb0 = _mm_loadu_ps(ktmp);
__m128 _vb1 = _mm_loadu_ps(ktmp + 4);
__m128 _vb2 = _mm_loadu_ps(ktmp + 8);
_sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0);
_sum1 = _mm_add_ps(_mm_mul_ps(_va1, _vb0), _sum1);
_sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0);
_sum1 = _mm_add_ps(_mm_mul_ps(_va2, _vb1), _sum1);
_sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0);
_sum2 = _mm_add_ps(_mm_mul_ps(_va2, _vb0), _sum2);
_sum3 = _mm_add_ps(_mm_mul_ps(_va3, _vb0), _sum3);
_sum1 = _mm_add_ps(_mm_mul_ps(_va3, _vb2), _sum1);
_sum2 = _mm_add_ps(_mm_mul_ps(_va3, _vb1), _sum2);
_sum3 = _mm_add_ps(_mm_mul_ps(_va4, _vb1), _sum3);
_sum4 = _mm_add_ps(_mm_mul_ps(_va4, _vb0), _sum4);
_sum2 = _mm_add_ps(_mm_mul_ps(_va4, _vb2), _sum2);
_sum3 = _mm_add_ps(_mm_mul_ps(_va5, _vb2), _sum3);
_sum5 = _mm_add_ps(_mm_mul_ps(_va5, _vb0), _sum5);
_sum4 = _mm_add_ps(_mm_mul_ps(_va5, _vb1), _sum4);
_sum5 = _mm_add_ps(_mm_mul_ps(_va6, _vb1), _sum5);
_sum4 = _mm_add_ps(_mm_mul_ps(_va6, _vb2), _sum4);
_sum6 = _mm_add_ps(_mm_mul_ps(_va6, _vb0), _sum6);
_sum7 = _mm_add_ps(_mm_mul_ps(_va7, _vb0), _sum7);
_sum5 = _mm_add_ps(_mm_mul_ps(_va7, _vb2), _sum5);
_sum6 = _mm_add_ps(_mm_mul_ps(_va7, _vb1), _sum6);
_sum7 = _mm_add_ps(_mm_mul_ps(_va8, _vb1), _sum7);
_sum6 = _mm_add_ps(_mm_mul_ps(_va8, _vb2), _sum6);
_sum7 = _mm_add_ps(_mm_mul_ps(_va9, _vb2), _sum7);
_va0 = _mm_loadu_ps(itmp1);
_va1 = _mm_loadu_ps(itmp1 + 4);
_va2 = _mm_loadu_ps(itmp1 + 8);
_va3 = _mm_loadu_ps(itmp1 + 12);
_va4 = _mm_loadu_ps(itmp1 + 16);
_va5 = _mm_loadu_ps(itmp1 + 20);
_va6 = _mm_loadu_ps(itmp1 + 24);
_va7 = _mm_loadu_ps(itmp1 + 28);
_va8 = _mm_loadu_ps(itmp1 + 32);
_va9 = _mm_loadu_ps(itmp1 + 36);
_vb0 = _mm_loadu_ps(ktmp + 12);
_vb1 = _mm_loadu_ps(ktmp + 16);
_vb2 = _mm_loadu_ps(ktmp + 20);
_sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0);
_sum1 = _mm_add_ps(_mm_mul_ps(_va1, _vb0), _sum1);
_sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0);
_sum1 = _mm_add_ps(_mm_mul_ps(_va2, _vb1), _sum1);
_sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0);
_sum2 = _mm_add_ps(_mm_mul_ps(_va2, _vb0), _sum2);
_sum3 = _mm_add_ps(_mm_mul_ps(_va3, _vb0), _sum3);
_sum1 = _mm_add_ps(_mm_mul_ps(_va3, _vb2), _sum1);
_sum2 = _mm_add_ps(_mm_mul_ps(_va3, _vb1), _sum2);
_sum3 = _mm_add_ps(_mm_mul_ps(_va4, _vb1), _sum3);
_sum4 = _mm_add_ps(_mm_mul_ps(_va4, _vb0), _sum4);
_sum2 = _mm_add_ps(_mm_mul_ps(_va4, _vb2), _sum2);
_sum3 = _mm_add_ps(_mm_mul_ps(_va5, _vb2), _sum3);
_sum5 = _mm_add_ps(_mm_mul_ps(_va5, _vb0), _sum5);
_sum4 = _mm_add_ps(_mm_mul_ps(_va5, _vb1), _sum4);
_sum5 = _mm_add_ps(_mm_mul_ps(_va6, _vb1), _sum5);
_sum4 = _mm_add_ps(_mm_mul_ps(_va6, _vb2), _sum4);
_sum6 = _mm_add_ps(_mm_mul_ps(_va6, _vb0), _sum6);
_sum7 = _mm_add_ps(_mm_mul_ps(_va7, _vb0), _sum7);
_sum5 = _mm_add_ps(_mm_mul_ps(_va7, _vb2), _sum5);
_sum6 = _mm_add_ps(_mm_mul_ps(_va7, _vb1), _sum6);
_sum7 = _mm_add_ps(_mm_mul_ps(_va8, _vb1), _sum7);
_sum6 = _mm_add_ps(_mm_mul_ps(_va8, _vb2), _sum6);
_sum7 = _mm_add_ps(_mm_mul_ps(_va9, _vb2), _sum7);
_va0 = _mm_loadu_ps(itmp2);
_va1 = _mm_loadu_ps(itmp2 + 4);
_va2 = _mm_loadu_ps(itmp2 + 8);
_va3 = _mm_loadu_ps(itmp2 + 12);
_va4 = _mm_loadu_ps(itmp2 + 16);
_va5 = _mm_loadu_ps(itmp2 + 20);
_va6 = _mm_loadu_ps(itmp2 + 24);
_va7 = _mm_loadu_ps(itmp2 + 28);
_va8 = _mm_loadu_ps(itmp2 + 32);
_va9 = _mm_loadu_ps(itmp2 + 36);
_vb0 = _mm_loadu_ps(ktmp + 24);
_vb1 = _mm_loadu_ps(ktmp + 28);
_vb2 = _mm_loadu_ps(ktmp + 32);
_sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0);
_sum1 = _mm_add_ps(_mm_mul_ps(_va1, _vb0), _sum1);
_sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0);
_sum1 = _mm_add_ps(_mm_mul_ps(_va2, _vb1), _sum1);
_sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0);
_sum2 = _mm_add_ps(_mm_mul_ps(_va2, _vb0), _sum2);
_sum3 = _mm_add_ps(_mm_mul_ps(_va3, _vb0), _sum3);
_sum1 = _mm_add_ps(_mm_mul_ps(_va3, _vb2), _sum1);
_sum2 = _mm_add_ps(_mm_mul_ps(_va3, _vb1), _sum2);
_sum3 = _mm_add_ps(_mm_mul_ps(_va4, _vb1), _sum3);
_sum4 = _mm_add_ps(_mm_mul_ps(_va4, _vb0), _sum4);
_sum2 = _mm_add_ps(_mm_mul_ps(_va4, _vb2), _sum2);
_sum3 = _mm_add_ps(_mm_mul_ps(_va5, _vb2), _sum3);
_sum5 = _mm_add_ps(_mm_mul_ps(_va5, _vb0), _sum5);
_sum4 = _mm_add_ps(_mm_mul_ps(_va5, _vb1), _sum4);
_sum5 = _mm_add_ps(_mm_mul_ps(_va6, _vb1), _sum5);
_sum4 = _mm_add_ps(_mm_mul_ps(_va6, _vb2), _sum4);
_sum6 = _mm_add_ps(_mm_mul_ps(_va6, _vb0), _sum6);
_sum7 = _mm_add_ps(_mm_mul_ps(_va7, _vb0), _sum7);
_sum5 = _mm_add_ps(_mm_mul_ps(_va7, _vb2), _sum5);
_sum6 = _mm_add_ps(_mm_mul_ps(_va7, _vb1), _sum6);
_sum7 = _mm_add_ps(_mm_mul_ps(_va8, _vb1), _sum7);
_sum6 = _mm_add_ps(_mm_mul_ps(_va8, _vb2), _sum6);
_sum7 = _mm_add_ps(_mm_mul_ps(_va9, _vb2), _sum7);
_mm_storeu_ps(otmp, _sum0);
_mm_storeu_ps(otmp + 4, _sum1);
_mm_storeu_ps(otmp + 8, _sum2);
_mm_storeu_ps(otmp + 12, _sum3);
_mm_storeu_ps(otmp + 16, _sum4);
_mm_storeu_ps(otmp + 20, _sum5);
_mm_storeu_ps(otmp + 24, _sum6);
_mm_storeu_ps(otmp + 28, _sum7);
#else
float sum0[4] = {btmp[0]};
float sum1[4] = {btmp[0]};
float sum2[4] = {btmp[0]};
float sum3[4] = {btmp[0]};
float sum4[4] = {btmp[0]};
float sum5[4] = {btmp[0]};
float sum6[4] = {btmp[0]};
float sum7[4] = {btmp[0]};
for (int k = 0; k < 4; k++)
{
sum0[k] += itmp0[k] * ktmp[k];
sum0[k] += itmp1[k] * ktmp[k + 12];
sum0[k] += itmp2[k] * ktmp[k + 24];
sum0[k] += itmp0[k + 4] * ktmp[k + 4];
sum0[k] += itmp1[k + 4] * ktmp[k + 16];
sum0[k] += itmp2[k + 4] * ktmp[k + 28];
sum0[k] += itmp0[k + 8] * ktmp[k + 8];
sum0[k] += itmp1[k + 8] * ktmp[k + 20];
sum0[k] += itmp2[k + 8] * ktmp[k + 32];
sum1[k] += itmp0[k + 4] * ktmp[k];
sum1[k] += itmp1[k + 4] * ktmp[k + 12];
sum1[k] += itmp2[k + 4] * ktmp[k + 24];
sum1[k] += itmp0[k + 8] * ktmp[k + 4];
sum1[k] += itmp1[k + 8] * ktmp[k + 16];
sum1[k] += itmp2[k + 8] * ktmp[k + 28];
sum1[k] += itmp0[k + 12] * ktmp[k + 8];
sum1[k] += itmp1[k + 12] * ktmp[k + 20];
sum1[k] += itmp2[k + 12] * ktmp[k + 32];
sum2[k] += itmp0[k + 8] * ktmp[k];
sum2[k] += itmp1[k + 8] * ktmp[k + 12];
sum2[k] += itmp2[k + 8] * ktmp[k + 24];
sum2[k] += itmp0[k + 12] * ktmp[k + 4];
sum2[k] += itmp1[k + 12] * ktmp[k + 16];
sum2[k] += itmp2[k + 12] * ktmp[k + 28];
sum2[k] += itmp0[k + 16] * ktmp[k + 8];
sum2[k] += itmp1[k + 16] * ktmp[k + 20];
sum2[k] += itmp2[k + 16] * ktmp[k + 32];
sum3[k] += itmp0[k + 12] * ktmp[k];
sum3[k] += itmp1[k + 12] * ktmp[k + 12];
sum3[k] += itmp2[k + 12] * ktmp[k + 24];
sum3[k] += itmp0[k + 16] * ktmp[k + 4];
sum3[k] += itmp1[k + 16] * ktmp[k + 16];
sum3[k] += itmp2[k + 16] * ktmp[k + 28];
sum3[k] += itmp0[k + 20] * ktmp[k + 8];
sum3[k] += itmp1[k + 20] * ktmp[k + 20];
sum3[k] += itmp2[k + 20] * ktmp[k + 32];
sum4[k] += itmp0[k + 16] * ktmp[k];
sum4[k] += itmp1[k + 16] * ktmp[k + 12];
sum4[k] += itmp2[k + 16] * ktmp[k + 24];
sum4[k] += itmp0[k + 20] * ktmp[k + 4];
sum4[k] += itmp1[k + 20] * ktmp[k + 16];
sum4[k] += itmp2[k + 20] * ktmp[k + 28];
sum4[k] += itmp0[k + 24] * ktmp[k + 8];
sum4[k] += itmp1[k + 24] * ktmp[k + 20];
sum4[k] += itmp2[k + 24] * ktmp[k + 32];
sum5[k] += itmp0[k + 20] * ktmp[k];
sum5[k] += itmp1[k + 20] * ktmp[k + 12];
sum5[k] += itmp2[k + 20] * ktmp[k + 24];
sum5[k] += itmp0[k + 24] * ktmp[k + 4];
sum5[k] += itmp1[k + 24] * ktmp[k + 16];
sum5[k] += itmp2[k + 24] * ktmp[k + 28];
sum5[k] += itmp0[k + 28] * ktmp[k + 8];
sum5[k] += itmp1[k + 28] * ktmp[k + 20];
sum5[k] += itmp2[k + 28] * ktmp[k + 32];
sum6[k] += itmp0[k + 24] * ktmp[k];
sum6[k] += itmp1[k + 24] * ktmp[k + 12];
sum6[k] += itmp2[k + 24] * ktmp[k + 24];
sum6[k] += itmp0[k + 28] * ktmp[k + 4];
sum6[k] += itmp1[k + 28] * ktmp[k + 16];
sum6[k] += itmp2[k + 28] * ktmp[k + 28];
sum6[k] += itmp0[k + 32] * ktmp[k + 8];
sum6[k] += itmp1[k + 32] * ktmp[k + 20];
sum6[k] += itmp2[k + 32] * ktmp[k + 32];
sum7[k] += itmp0[k + 28] * ktmp[k];
sum7[k] += itmp1[k + 28] * ktmp[k + 12];
sum7[k] += itmp2[k + 28] * ktmp[k + 24];
sum7[k] += itmp0[k + 32] * ktmp[k + 4];
sum7[k] += itmp1[k + 32] * ktmp[k + 16];
sum7[k] += itmp2[k + 32] * ktmp[k + 28];
sum7[k] += itmp0[k + 36] * ktmp[k + 8];
sum7[k] += itmp1[k + 36] * ktmp[k + 20];
sum7[k] += itmp2[k + 36] * ktmp[k + 32];
}
for (int k = 0; k < 4; k++)
{
otmp[k] = sum0[k];
otmp[k + 4] = sum1[k];
otmp[k + 8] = sum2[k];
otmp[k + 12] = sum3[k];
otmp[k + 16] = sum4[k];
otmp[k + 20] = sum5[k];
otmp[k + 24] = sum6[k];
otmp[k + 28] = sum7[k];
}
#endif
itmp0 += 32;
itmp1 += 32;
itmp2 += 32;
otmp += 32;
}
for (; j + 3 < outw; j += 4)
{
#if __SSE__
__m128 _sum0 = _mm_loadu_ps(btmp);
__m128 _sum1 = _mm_loadu_ps(btmp);
__m128 _sum2 = _mm_loadu_ps(btmp);
__m128 _sum3 = _mm_loadu_ps(btmp);
__m128 _va0 = _mm_loadu_ps(itmp0);
__m128 _va1 = _mm_loadu_ps(itmp0 + 4);
__m128 _va2 = _mm_loadu_ps(itmp0 + 8);
__m128 _va3 = _mm_loadu_ps(itmp0 + 12);
__m128 _va4 = _mm_loadu_ps(itmp0 + 16);
__m128 _va5 = _mm_loadu_ps(itmp0 + 20);
__m128 _vb0 = _mm_loadu_ps(ktmp);
__m128 _vb1 = _mm_loadu_ps(ktmp + 4);
__m128 _vb2 = _mm_loadu_ps(ktmp + 8);
_sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0);
_sum1 = _mm_add_ps(_mm_mul_ps(_va1, _vb0), _sum1);
_sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0);
_sum1 = _mm_add_ps(_mm_mul_ps(_va2, _vb1), _sum1);
_sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0);
_sum2 = _mm_add_ps(_mm_mul_ps(_va2, _vb0), _sum2);
_sum3 = _mm_add_ps(_mm_mul_ps(_va3, _vb0), _sum3);
_sum1 = _mm_add_ps(_mm_mul_ps(_va3, _vb2), _sum1);
_sum2 = _mm_add_ps(_mm_mul_ps(_va3, _vb1), _sum2);
_sum3 = _mm_add_ps(_mm_mul_ps(_va4, _vb1), _sum3);
_sum2 = _mm_add_ps(_mm_mul_ps(_va4, _vb2), _sum2);
_sum3 = _mm_add_ps(_mm_mul_ps(_va5, _vb2), _sum3);
_va0 = _mm_loadu_ps(itmp1);
_va1 = _mm_loadu_ps(itmp1 + 4);
_va2 = _mm_loadu_ps(itmp1 + 8);
_va3 = _mm_loadu_ps(itmp1 + 12);
_va4 = _mm_loadu_ps(itmp1 + 16);
_va5 = _mm_loadu_ps(itmp1 + 20);
_vb0 = _mm_loadu_ps(ktmp + 12);
_vb1 = _mm_loadu_ps(ktmp + 16);
_vb2 = _mm_loadu_ps(ktmp + 20);
_sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0);
_sum1 = _mm_add_ps(_mm_mul_ps(_va1, _vb0), _sum1);
_sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0);
_sum1 = _mm_add_ps(_mm_mul_ps(_va2, _vb1), _sum1);
_sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0);
_sum2 = _mm_add_ps(_mm_mul_ps(_va2, _vb0), _sum2);
_sum3 = _mm_add_ps(_mm_mul_ps(_va3, _vb0), _sum3);
_sum1 = _mm_add_ps(_mm_mul_ps(_va3, _vb2), _sum1);
_sum2 = _mm_add_ps(_mm_mul_ps(_va3, _vb1), _sum2);
_sum3 = _mm_add_ps(_mm_mul_ps(_va4, _vb1), _sum3);
_sum2 = _mm_add_ps(_mm_mul_ps(_va4, _vb2), _sum2);
_sum3 = _mm_add_ps(_mm_mul_ps(_va5, _vb2), _sum3);
_va0 = _mm_loadu_ps(itmp2);
_va1 = _mm_loadu_ps(itmp2 + 4);
_va2 = _mm_loadu_ps(itmp2 + 8);
_va3 = _mm_loadu_ps(itmp2 + 12);
_va4 = _mm_loadu_ps(itmp2 + 16);
_va5 = _mm_loadu_ps(itmp2 + 20);
_vb0 = _mm_loadu_ps(ktmp + 24);
_vb1 = _mm_loadu_ps(ktmp + 28);
_vb2 = _mm_loadu_ps(ktmp + 32);
_sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0);
_sum1 = _mm_add_ps(_mm_mul_ps(_va1, _vb0), _sum1);
_sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0);
_sum1 = _mm_add_ps(_mm_mul_ps(_va2, _vb1), _sum1);
_sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0);
_sum2 = _mm_add_ps(_mm_mul_ps(_va2, _vb0), _sum2);
_sum3 = _mm_add_ps(_mm_mul_ps(_va3, _vb0), _sum3);
_sum1 = _mm_add_ps(_mm_mul_ps(_va3, _vb2), _sum1);
_sum2 = _mm_add_ps(_mm_mul_ps(_va3, _vb1), _sum2);
_sum3 = _mm_add_ps(_mm_mul_ps(_va4, _vb1), _sum3);
_sum2 = _mm_add_ps(_mm_mul_ps(_va4, _vb2), _sum2);
_sum3 = _mm_add_ps(_mm_mul_ps(_va5, _vb2), _sum3);
_mm_storeu_ps(otmp, _sum0);
_mm_storeu_ps(otmp + 4, _sum1);
_mm_storeu_ps(otmp + 8, _sum2);
_mm_storeu_ps(otmp + 12, _sum3);
#else
float sum0[4] = {btmp[0]};
float sum1[4] = {btmp[0]};
float sum2[4] = {btmp[0]};
float sum3[4] = {btmp[0]};
for (int k = 0; k < 4; k++)
{
sum0[k] += itmp0[k] * ktmp[k];
sum0[k] += itmp1[k] * ktmp[k + 12];
sum0[k] += itmp2[k] * ktmp[k + 24];
sum0[k] += itmp0[k + 4] * ktmp[k + 4];
sum0[k] += itmp1[k + 4] * ktmp[k + 16];
sum0[k] += itmp2[k + 4] * ktmp[k + 28];
sum0[k] += itmp0[k + 8] * ktmp[k + 8];
sum0[k] += itmp1[k + 8] * ktmp[k + 20];
sum0[k] += itmp2[k + 8] * ktmp[k + 32];
sum1[k] += itmp0[k + 4] * ktmp[k];
sum1[k] += itmp1[k + 4] * ktmp[k + 12];
sum1[k] += itmp2[k + 4] * ktmp[k + 24];
sum1[k] += itmp0[k + 8] * ktmp[k + 4];
sum1[k] += itmp1[k + 8] * ktmp[k + 16];
sum1[k] += itmp2[k + 8] * ktmp[k + 28];
sum1[k] += itmp0[k + 12] * ktmp[k + 8];
sum1[k] += itmp1[k + 12] * ktmp[k + 20];
sum1[k] += itmp2[k + 12] * ktmp[k + 32];
sum2[k] += itmp0[k + 8] * ktmp[k];
sum2[k] += itmp1[k + 8] * ktmp[k + 12];
sum2[k] += itmp2[k + 8] * ktmp[k + 24];
sum2[k] += itmp0[k + 12] * ktmp[k + 4];
sum2[k] += itmp1[k + 12] * ktmp[k + 16];
sum2[k] += itmp2[k + 12] * ktmp[k + 28];
sum2[k] += itmp0[k + 16] * ktmp[k + 8];
sum2[k] += itmp1[k + 16] * ktmp[k + 20];
sum2[k] += itmp2[k + 16] * ktmp[k + 32];
sum3[k] += itmp0[k + 12] * ktmp[k];
sum3[k] += itmp1[k + 12] * ktmp[k + 12];
sum3[k] += itmp2[k + 12] * ktmp[k + 24];
sum3[k] += itmp0[k + 16] * ktmp[k + 4];
sum3[k] += itmp1[k + 16] * ktmp[k + 16];
sum3[k] += itmp2[k + 16] * ktmp[k + 28];
sum3[k] += itmp0[k + 20] * ktmp[k + 8];
sum3[k] += itmp1[k + 20] * ktmp[k + 20];
sum3[k] += itmp2[k + 20] * ktmp[k + 32];
}
for (int k = 0; k < 4; k++)
{
otmp[k] = sum0[k];
otmp[k + 4] = sum1[k];
otmp[k + 8] = sum2[k];
otmp[k + 12] = sum3[k];
}
#endif
itmp0 += 16;
itmp1 += 16;
itmp2 += 16;
otmp += 16;
}
for (; j < outw; j++)
{
#if __SSE__
__m128 _sum0 = _mm_loadu_ps(btmp);
__m128 _va0 = _mm_loadu_ps(itmp0);
__m128 _va1 = _mm_loadu_ps(itmp0 + 4);
__m128 _va2 = _mm_loadu_ps(itmp0 + 8);
__m128 _vb0 = _mm_loadu_ps(ktmp);
__m128 _vb1 = _mm_loadu_ps(ktmp + 4);
__m128 _vb2 = _mm_loadu_ps(ktmp + 8);
_sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0);
_sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0);
_sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0);
_va0 = _mm_loadu_ps(itmp1);
_va1 = _mm_loadu_ps(itmp1 + 4);
_va2 = _mm_loadu_ps(itmp1 + 8);
_vb0 = _mm_loadu_ps(ktmp + 12);
_vb1 = _mm_loadu_ps(ktmp + 16);
_vb2 = _mm_loadu_ps(ktmp + 20);
_sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0);
_sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0);
_sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0);
_va0 = _mm_loadu_ps(itmp2);
_va1 = _mm_loadu_ps(itmp2 + 4);
_va2 = _mm_loadu_ps(itmp2 + 8);
_vb0 = _mm_loadu_ps(ktmp + 24);
_vb1 = _mm_loadu_ps(ktmp + 28);
_vb2 = _mm_loadu_ps(ktmp + 32);
_sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0);
_sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0);
_sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0);
_mm_storeu_ps(otmp, _sum0);
#else
float sum0[4] = {btmp[0]};
for (int k = 0; k < 4; k++)
{
sum0[k] += itmp0[k] * ktmp[k];
sum0[k] += itmp1[k] * ktmp[k + 12];
sum0[k] += itmp2[k] * ktmp[k + 24];
sum0[k] += itmp0[k + 4] * ktmp[k + 4];
sum0[k] += itmp1[k + 4] * ktmp[k + 16];
sum0[k] += itmp2[k + 4] * ktmp[k + 28];
sum0[k] += itmp0[k + 8] * ktmp[k + 8];
sum0[k] += itmp1[k + 8] * ktmp[k + 20];
sum0[k] += itmp2[k + 8] * ktmp[k + 32];
}
for (int k = 0; k < 4; k++)
{
otmp[k] = sum0[k];
}
#endif
itmp0 += 4;
itmp1 += 4;
itmp2 += 4;
otmp += 4;
}
}
}
{
for (int i = 0; i < channel_count; i++)
{
float* otmp = output_tmp + i * 4 * outwh;
float* tmp0 = output + i * 4 * outwh;
float* tmp1 = output + i * 4 * outwh + 1 * outwh;
float* tmp2 = output + i * 4 * outwh + 2 * outwh;
float* tmp3 = output + i * 4 * outwh + 3 * outwh;
for (int i = 0; i < outwh; i++)
{
tmp0[0] = otmp[0];
tmp1[0] = otmp[1];
tmp2[0] = otmp[2];
tmp3[0] = otmp[3];
otmp += 4;
tmp0++;
tmp1++;
tmp2++;
tmp3++;
}
}
for (int i = 0; i < channel_remain; i++)
{
int ii = channel_count * 4 + i;
float* otmp = output_tmp + channel_count * 4 * outwh;
float* tmp0 = output + ii * outwh;
for (int j = 0; j < outwh; j++)
{
tmp0[0] = otmp[i];
otmp += 4;
tmp0++;
}
}
}
sys_free(output_tmp);
sys_free(img_tmp);
sys_free(kernel_tmp);
sys_free(bias_tmp);
}
static void convdw3x3s2(float* output, float* img_data, float* kernel_data, float* bias_data, int inc, int inh, int inw,
int outh, int outw, int num_thread)
{
int inwh = inw * inh;
int outwh = outw * outh;
int channel_count = inc >> 2;
int channel_remain = inc - (channel_count << 2);
// generate the image tmp
float* img_tmp = ( float* )sys_malloc(4 * inwh * (channel_count + 1) * sizeof(float));
float* kernel_tmp = ( float* )sys_malloc(4 * 9 * (channel_count + 1) * sizeof(float));
float* bias_tmp = ( float* )sys_malloc(4 * (channel_count + 1) * sizeof(float));
{
for (int i = 0; i < channel_count; i++)
{
int ii = i * 4;
float* k0 = img_data + (ii + 0) * inwh;
float* k1 = img_data + (ii + 1) * inwh;
float* k2 = img_data + (ii + 2) * inwh;
float* k3 = img_data + (ii + 3) * inwh;
float* f0 = kernel_data + (ii + 0) * 9;
float* f1 = kernel_data + (ii + 1) * 9;
float* f2 = kernel_data + (ii + 2) * 9;
float* f3 = kernel_data + (ii + 3) * 9;
float* b0 = bias_data + (ii + 0);
float* b1 = bias_data + (ii + 1);
float* b2 = bias_data + (ii + 2);
float* b3 = bias_data + (ii + 3);
float* tmp0 = img_tmp + ii * inwh;
float* tmp1 = kernel_tmp + ii * 9;
float* tmp2 = bias_tmp + ii;
for (int j = 0; j < inwh; j++)
{
tmp0[0] = k0[0];
tmp0[1] = k1[0];
tmp0[2] = k2[0];
tmp0[3] = k3[0];
tmp0 += 4;
k0++;
k1++;
k2++;
k3++;
}
for (int j = 0; j < 9; j++)
{
tmp1[0] = f0[0];
tmp1[1] = f1[0];
tmp1[2] = f2[0];
tmp1[3] = f3[0];
tmp1 += 4;
f0++;
f1++;
f2++;
f3++;
}
if (bias_data)
{
tmp2[0] = b0[0];
tmp2[1] = b1[0];
tmp2[2] = b2[0];
tmp2[3] = b3[0];
}
else
{
tmp2[0] = 0;
tmp2[1] = 0;
tmp2[2] = 0;
tmp2[3] = 0;
}
}
for (int i = 0; i < channel_remain; i++)
{
int ii = channel_count * 4 + i;
float* k0 = img_data + ii * inwh;
float* f0 = kernel_data + ii * 9;
float* b0 = bias_data + ii;
float* tmp0 = img_tmp + channel_count * 4 * inwh;
float* tmp1 = kernel_tmp + channel_count * 4 * 9;
float* tmp2 = bias_tmp + channel_count * 4;
for (int j = 0; j < inwh; j++)
{
tmp0[i] = k0[0];
tmp0 += 4;
k0++;
}
for (int j = 0; j < 9; j++)
{
tmp1[i] = f0[0];
tmp1 += 4;
f0++;
}
if (bias_data)
{
tmp2[i] = b0[0];
}
else
{
tmp2[i] = 0;
}
}
}
float* output_tmp = ( float* )sys_malloc(outwh * 4 * (channel_count + 1) * sizeof(float));
for (int c = 0; c < channel_count + 1; c++)
{
float* ktmp = kernel_tmp + c * 4 * 9;
float* btmp = bias_tmp + c * 4;
for (int i = 0; i < outh; i++)
{
int j = 0;
float* itmp0 = img_tmp + c * 4 * inwh + 4 * i * 2 * inw;
float* itmp1 = img_tmp + c * 4 * inwh + 4 * (i * 2 + 1) * inw;
float* itmp2 = img_tmp + c * 4 * inwh + 4 * (i * 2 + 2) * inw;
float* otmp = output_tmp + c * 4 * outwh + 4 * i * outw;
for (; j + 3 < outw; j += 4)
{
#if __SSE__
__m128 _sum0 = _mm_loadu_ps(btmp);
__m128 _sum1 = _mm_loadu_ps(btmp);
__m128 _sum2 = _mm_loadu_ps(btmp);
__m128 _sum3 = _mm_loadu_ps(btmp);
__m128 _va0 = _mm_loadu_ps(itmp0);
__m128 _va1 = _mm_loadu_ps(itmp0 + 4);
__m128 _va2 = _mm_loadu_ps(itmp0 + 8);
__m128 _va3 = _mm_loadu_ps(itmp0 + 12);
__m128 _va4 = _mm_loadu_ps(itmp0 + 16);
__m128 _va5 = _mm_loadu_ps(itmp0 + 20);
__m128 _va6 = _mm_loadu_ps(itmp0 + 24);
__m128 _va7 = _mm_loadu_ps(itmp0 + 28);
__m128 _va8 = _mm_loadu_ps(itmp0 + 32);
__m128 _vb0 = _mm_loadu_ps(ktmp);
__m128 _vb1 = _mm_loadu_ps(ktmp + 4);
__m128 _vb2 = _mm_loadu_ps(ktmp + 8);
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va0, _vb0));
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va1, _vb1));
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va2, _vb2));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va2, _vb0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va3, _vb1));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va4, _vb2));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va4, _vb0));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va5, _vb1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va6, _vb2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va6, _vb0));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va7, _vb1));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va8, _vb2));
_va0 = _mm_loadu_ps(itmp1);
_va1 = _mm_loadu_ps(itmp1 + 4);
_va2 = _mm_loadu_ps(itmp1 + 8);
_va3 = _mm_loadu_ps(itmp1 + 12);
_va4 = _mm_loadu_ps(itmp1 + 16);
_va5 = _mm_loadu_ps(itmp1 + 20);
_va6 = _mm_loadu_ps(itmp1 + 24);
_va7 = _mm_loadu_ps(itmp1 + 28);
_va8 = _mm_loadu_ps(itmp1 + 32);
_vb0 = _mm_loadu_ps(ktmp + 12);
_vb1 = _mm_loadu_ps(ktmp + 16);
_vb2 = _mm_loadu_ps(ktmp + 20);
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va0, _vb0));
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va1, _vb1));
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va2, _vb2));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va2, _vb0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va3, _vb1));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va4, _vb2));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va4, _vb0));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va5, _vb1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va6, _vb2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va6, _vb0));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va7, _vb1));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va8, _vb2));
_va0 = _mm_loadu_ps(itmp2);
_va1 = _mm_loadu_ps(itmp2 + 4);
_va2 = _mm_loadu_ps(itmp2 + 8);
_va3 = _mm_loadu_ps(itmp2 + 12);
_va4 = _mm_loadu_ps(itmp2 + 16);
_va5 = _mm_loadu_ps(itmp2 + 20);
_va6 = _mm_loadu_ps(itmp2 + 24);
_va7 = _mm_loadu_ps(itmp2 + 28);
_va8 = _mm_loadu_ps(itmp2 + 32);
_vb0 = _mm_loadu_ps(ktmp + 24);
_vb1 = _mm_loadu_ps(ktmp + 28);
_vb2 = _mm_loadu_ps(ktmp + 32);
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va0, _vb0));
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va1, _vb1));
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va2, _vb2));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va2, _vb0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va3, _vb1));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va4, _vb2));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va4, _vb0));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va5, _vb1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va6, _vb2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va6, _vb0));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va7, _vb1));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va8, _vb2));
_mm_storeu_ps(otmp, _sum0);
_mm_storeu_ps(otmp + 4, _sum1);
_mm_storeu_ps(otmp + 8, _sum2);
_mm_storeu_ps(otmp + 12, _sum3);
#else
float sum0[4] = {btmp[0]};
float sum1[4] = {btmp[0]};
float sum2[4] = {btmp[0]};
float sum3[4] = {btmp[0]};
for (int k = 0; k < 4; k++)
{
sum0[k] += itmp0[k] * ktmp[k];
sum0[k] += itmp1[k] * ktmp[k + 12];
sum0[k] += itmp2[k] * ktmp[k + 24];
sum0[k] += itmp0[k + 4] * ktmp[k + 4];
sum0[k] += itmp1[k + 4] * ktmp[k + 16];
sum0[k] += itmp2[k + 4] * ktmp[k + 28];
sum0[k] += itmp0[k + 8] * ktmp[k + 8];
sum0[k] += itmp1[k + 8] * ktmp[k + 20];
sum0[k] += itmp2[k + 8] * ktmp[k + 32];
sum1[k] += itmp0[k + 8] * ktmp[k];
sum1[k] += itmp1[k + 8] * ktmp[k + 12];
sum1[k] += itmp2[k + 8] * ktmp[k + 24];
sum1[k] += itmp0[k + 12] * ktmp[k + 4];
sum1[k] += itmp1[k + 12] * ktmp[k + 16];
sum1[k] += itmp2[k + 12] * ktmp[k + 28];
sum1[k] += itmp0[k + 16] * ktmp[k + 8];
sum1[k] += itmp1[k + 16] * ktmp[k + 20];
sum1[k] += itmp2[k + 16] * ktmp[k + 32];
sum2[k] += itmp0[k + 16] * ktmp[k];
sum2[k] += itmp1[k + 16] * ktmp[k + 12];
sum2[k] += itmp2[k + 16] * ktmp[k + 24];
sum2[k] += itmp0[k + 20] * ktmp[k + 4];
sum2[k] += itmp1[k + 20] * ktmp[k + 16];
sum2[k] += itmp2[k + 20] * ktmp[k + 28];
sum2[k] += itmp0[k + 24] * ktmp[k + 8];
sum2[k] += itmp1[k + 24] * ktmp[k + 20];
sum2[k] += itmp2[k + 24] * ktmp[k + 32];
sum3[k] += itmp0[k + 24] * ktmp[k];
sum3[k] += itmp1[k + 24] * ktmp[k + 12];
sum3[k] += itmp2[k + 24] * ktmp[k + 24];
sum3[k] += itmp0[k + 28] * ktmp[k + 4];
sum3[k] += itmp1[k + 28] * ktmp[k + 16];
sum3[k] += itmp2[k + 28] * ktmp[k + 28];
sum3[k] += itmp0[k + 32] * ktmp[k + 8];
sum3[k] += itmp1[k + 32] * ktmp[k + 20];
sum3[k] += itmp2[k + 32] * ktmp[k + 32];
}
for (int k = 0; k < 4; k++)
{
otmp[k] = sum0[k];
otmp[k + 4] = sum1[k];
otmp[k + 8] = sum2[k];
otmp[k + 12] = sum3[k];
}
#endif
itmp0 += 32;
itmp1 += 32;
itmp2 += 32;
otmp += 16;
}
for (; j < outw; j++)
{
#if __SSE__
__m128 _sum0 = _mm_loadu_ps(btmp);
__m128 _va0 = _mm_loadu_ps(itmp0);
__m128 _va1 = _mm_loadu_ps(itmp0 + 4);
__m128 _va2 = _mm_loadu_ps(itmp0 + 8);
__m128 _vb0 = _mm_loadu_ps(ktmp);
__m128 _vb1 = _mm_loadu_ps(ktmp + 4);
__m128 _vb2 = _mm_loadu_ps(ktmp + 8);
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va0, _vb0));
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va1, _vb1));
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va2, _vb2));
_va0 = _mm_loadu_ps(itmp1);
_va1 = _mm_loadu_ps(itmp1 + 4);
_va2 = _mm_loadu_ps(itmp1 + 8);
_vb0 = _mm_loadu_ps(ktmp + 12);
_vb1 = _mm_loadu_ps(ktmp + 16);
_vb2 = _mm_loadu_ps(ktmp + 20);
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va0, _vb0));
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va1, _vb1));
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va2, _vb2));
_va0 = _mm_loadu_ps(itmp2);
_va1 = _mm_loadu_ps(itmp2 + 4);
_va2 = _mm_loadu_ps(itmp2 + 8);
_vb0 = _mm_loadu_ps(ktmp + 24);
_vb1 = _mm_loadu_ps(ktmp + 28);
_vb2 = _mm_loadu_ps(ktmp + 32);
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va0, _vb0));
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va1, _vb1));
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va2, _vb2));
_mm_storeu_ps(otmp, _sum0);
#else
float sum0[4] = {btmp[0]};
for (int k = 0; k < 4; k++)
{
sum0[k] += itmp0[k] * ktmp[k];
sum0[k] += itmp1[k] * ktmp[k + 12];
sum0[k] += itmp2[k] * ktmp[k + 24];
sum0[k] += itmp0[k + 4] * ktmp[k + 4];
sum0[k] += itmp1[k + 4] * ktmp[k + 16];
sum0[k] += itmp2[k + 4] * ktmp[k + 28];
sum0[k] += itmp0[k + 8] * ktmp[k + 8];
sum0[k] += itmp1[k + 8] * ktmp[k + 20];
sum0[k] += itmp2[k + 8] * ktmp[k + 32];
}
for (int k = 0; k < 4; k++)
{
otmp[k] = sum0[k];
}
#endif
itmp0 += 8;
itmp1 += 8;
itmp2 += 8;
otmp += 4;
}
}
}
{
for (int i = 0; i < channel_count; i++)
{
float* otmp = output_tmp + i * 4 * outwh;
float* tmp0 = output + i * 4 * outwh;
float* tmp1 = output + i * 4 * outwh + 1 * outwh;
float* tmp2 = output + i * 4 * outwh + 2 * outwh;
float* tmp3 = output + i * 4 * outwh + 3 * outwh;
for (int i = 0; i < outwh; i++)
{
tmp0[0] = otmp[0];
tmp1[0] = otmp[1];
tmp2[0] = otmp[2];
tmp3[0] = otmp[3];
otmp += 4;
tmp0++;
tmp1++;
tmp2++;
tmp3++;
}
}
for (int i = 0; i < channel_remain; i++)
{
int ii = channel_count * 4 + i;
float* otmp = output_tmp + channel_count * 4 * outwh;
float* tmp0 = output + ii * outwh;
for (int j = 0; j < outwh; j++)
{
tmp0[0] = otmp[i];
otmp += 4;
tmp0++;
}
}
}
sys_free(output_tmp);
sys_free(img_tmp);
sys_free(kernel_tmp);
sys_free(bias_tmp);
}
#else
static void convdw3x3s1(float* output, float* input, float* _kernel, float* _bias, int channel, int in_h, int in_w,
int out_h, int out_w, int num_thread)
{
int w = in_w;
int h = in_h;
int c_step_in = w * h;
int outw = out_w;
int outh = out_h;
int c_step_out = outw * outh;
const int group = channel;
const float* kernel = _kernel;
#pragma omp parallel for num_threads(num_thread)
for (int g = 0; g < group; g++)
{
float* out = output + g * c_step_out;
float* outptr = out;
float* outptr2 = outptr + outw;
const float bias0 = _bias ? _bias[g] : 0.f;
const float* kernel0 = kernel + g * 9;
const float* img0 = input + g * c_step_in;
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w * 2;
const float* r3 = img0 + w * 3;
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
int i = 0;
for (; i + 1 < outh; i += 2)
{
int remain = outw;
for (; remain > 0; remain--)
{
float sum = bias0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
float sum2 = bias0;
sum2 += r1[0] * k0[0];
sum2 += r1[1] * k0[1];
sum2 += r1[2] * k0[2];
sum2 += r2[0] * k1[0];
sum2 += r2[1] * k1[1];
sum2 += r2[2] * k1[2];
sum2 += r3[0] * k2[0];
sum2 += r3[1] * k2[1];
sum2 += r3[2] * k2[2];
*outptr = sum;
*outptr2 = sum2;
r0++;
r1++;
r2++;
r3++;
outptr++;
outptr2++;
}
r0 += 2 + w;
r1 += 2 + w;
r2 += 2 + w;
r3 += 2 + w;
outptr += outw;
outptr2 += outw;
}
for (; i < outh; i++)
{
int remain = outw;
for (; remain > 0; remain--)
{
float sum = bias0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
*outptr = sum;
r0++;
r1++;
r2++;
outptr++;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
}
}
static void convdw3x3s2(float* output, float* input, float* _kernel, float* _bias, int channel, int in_h, int in_w,
int out_h, int out_w, int num_thread)
{
int w = in_w;
int h = in_h;
int c_step_in = w * h;
int outw = out_w;
int outh = out_h;
int c_step_out = outw * outh;
const int group = channel;
const int tailstep = w - 2 * outw + w;
const float* kernel = _kernel;
#pragma omp parallel for num_threads(num_thread)
for (int g = 0; g < group; g++)
{
float* out = output + g * c_step_out;
float* outptr = out;
const float* kernel0 = kernel + g * 9;
const float bias0 = _bias ? _bias[g] : 0.f;
const float* img0 = input + g * c_step_in;
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w * 2;
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
int i = 0;
for (; i < outh; i++)
{
int remain = outw;
for (; remain > 0; remain--)
{
float sum = bias0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
*outptr = sum;
r0 += 2;
r1 += 2;
r2 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
}
}
#endif
int conv_dw_run(struct ir_tensor* input_tensor, struct ir_tensor* weight_tensor, struct ir_tensor* bias_tensor,
struct ir_tensor* output_tensor, struct conv_param* param, int num_thread, int cpu_affinity)
{
float* input = ( float* )input_tensor->data;
float* output = ( float* )output_tensor->data;
float* kernel = ( float* )weight_tensor->data;
float* biases = NULL;
if (bias_tensor)
biases = ( float* )bias_tensor->data;
int batch_number = input_tensor->dims[0];
int inc = input_tensor->dims[1];
int inh = input_tensor->dims[2];
int inw = input_tensor->dims[3];
int in_chw = inc * inh * inw;
int outc = output_tensor->dims[1];
int outh = output_tensor->dims[2];
int outw = output_tensor->dims[3];
int out_hw = outh * outw;
int out_chw = out_hw * outc;
int ksize_h = param->kernel_h;
int ksize_w = param->kernel_w;
int pad_w = param->pad_w0;
int pad_h = param->pad_h0;
int stride_w = param->stride_w;
int stride_h = param->stride_h;
int dilation_w = param->dilation_w;
int dilation_h = param->dilation_h;
int group = param->group;
int activation = param->activation;
/* pading */
int inh_tmp = inh + pad_h + pad_h;
int inw_tmp = inw + pad_w + pad_w;
float* input_tmp = NULL;
if (inh_tmp == inh && inw_tmp == inw)
input_tmp = input;
else
{
input_tmp = ( float* )sys_malloc(inh_tmp * inw_tmp * group * sizeof(float));
for (int g = 0; g < group; g++)
{
float* pad_in = input + g * inh * inw;
float* pad_out = input_tmp + g * inh_tmp * inw_tmp;
pad(pad_in, pad_out, inh, inw, inh_tmp, inw_tmp, pad_h, pad_w, 0.f);
}
}
/* process */
for (int i = 0; i < batch_number; i++)
{
if (stride_h == 1)
convdw3x3s1(output, input_tmp, kernel, biases, group, inh_tmp, inw_tmp, outh, outw, num_thread);
else
convdw3x3s2(output, input_tmp, kernel, biases, group, inh_tmp, inw_tmp, outh, outw, num_thread);
}
/* relu */
if (activation >= 0)
relu(output, batch_number * out_chw, activation);
if (!(inh_tmp == inh && inw_tmp == inw))
sys_free(input_tmp);
return 0;
}
|
ocp_nlp_sqp_rti.c | /*
* Copyright 2019 Gianluca Frison, Dimitris Kouzoupis, Robin Verschueren,
* Andrea Zanelli, Niels van Duijkeren, Jonathan Frey, Tommaso Sartor,
* Branimir Novoselnik, Rien Quirynen, Rezart Qelibari, Dang Doan,
* Jonas Koenemann, Yutao Chen, Tobias Schöls, Jonas Schlagenhauf, Moritz Diehl
*
* This file is part of acados.
*
* The 2-Clause BSD License
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.;
*/
#include "acados/ocp_nlp/ocp_nlp_sqp_rti.h"
// external
#include <assert.h>
#include <math.h>
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#if defined(ACADOS_WITH_OPENMP)
#include <omp.h>
#endif
// blasfeo
#include "blasfeo/include/blasfeo_d_aux.h"
#include "blasfeo/include/blasfeo_d_aux_ext_dep.h"
#include "blasfeo/include/blasfeo_d_blas.h"
// acados
#include "acados/ocp_nlp/ocp_nlp_common.h"
#include "acados/ocp_nlp/ocp_nlp_dynamics_cont.h"
#include "acados/ocp_nlp/ocp_nlp_reg_common.h"
#include "acados/ocp_qp/ocp_qp_common.h"
#include "acados/sim/sim_common.h"
#include "acados/utils/mem.h"
#include "acados/utils/print.h"
#include "acados/utils/timing.h"
#include "acados/utils/types.h"
/************************************************
* options
************************************************/
int ocp_nlp_sqp_rti_opts_calculate_size(void *config_, void *dims_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_dynamics_config **dynamics = config->dynamics;
ocp_nlp_cost_config **cost = config->cost;
ocp_nlp_constraints_config **constraints = config->constraints;
int N = dims->N;
int size = 0;
size += sizeof(ocp_nlp_sqp_rti_opts);
size += qp_solver->opts_calculate_size(qp_solver, dims->qp_solver);
size += config->regularize->opts_calculate_size();
// dynamics
size += N * sizeof(void *);
for (int ii = 0; ii < N; ii++)
{
size += dynamics[ii]->opts_calculate_size(dynamics[ii], dims->dynamics[ii]);
}
// cost
size += (N + 1) * sizeof(void *);
for (int ii = 0; ii <= N; ii++)
{
size += cost[ii]->opts_calculate_size(cost[ii], dims->cost[ii]);
}
// constraints
size += (N + 1) * sizeof(void *);
for (int ii = 0; ii <= N; ii++)
{
size += constraints[ii]->opts_calculate_size(constraints[ii], dims->constraints[ii]);
}
return size;
}
void *ocp_nlp_sqp_rti_opts_assign(void *config_, void *dims_, void *raw_memory)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_dynamics_config **dynamics = config->dynamics;
ocp_nlp_cost_config **cost = config->cost;
ocp_nlp_constraints_config **constraints = config->constraints;
int N = dims->N;
char *c_ptr = (char *) raw_memory;
ocp_nlp_sqp_rti_opts *opts = (ocp_nlp_sqp_rti_opts *) c_ptr;
c_ptr += sizeof(ocp_nlp_sqp_rti_opts);
opts->qp_solver_opts = qp_solver->opts_assign(qp_solver, dims->qp_solver, c_ptr);
c_ptr += qp_solver->opts_calculate_size(qp_solver, dims->qp_solver);
opts->regularize = config->regularize->opts_assign(c_ptr);
c_ptr += config->regularize->opts_calculate_size();
// dynamics
opts->dynamics = (void **) c_ptr;
c_ptr += N * sizeof(void *);
for (int ii = 0; ii < N; ii++)
{
opts->dynamics[ii] = dynamics[ii]->opts_assign(dynamics[ii], dims->dynamics[ii], c_ptr);
c_ptr += dynamics[ii]->opts_calculate_size(dynamics[ii], dims->dynamics[ii]);
}
// cost
opts->cost = (void **) c_ptr;
c_ptr += (N + 1) * sizeof(void *);
for (int ii = 0; ii <= N; ii++)
{
opts->cost[ii] = cost[ii]->opts_assign(cost[ii], dims->cost[ii], c_ptr);
c_ptr += cost[ii]->opts_calculate_size(cost[ii], dims->cost[ii]);
}
// constraints
opts->constraints = (void **) c_ptr;
c_ptr += (N + 1) * sizeof(void *);
for (int ii = 0; ii <= N; ii++)
{
opts->constraints[ii] =
constraints[ii]->opts_assign(constraints[ii], dims->constraints[ii], c_ptr);
c_ptr += constraints[ii]->opts_calculate_size(constraints[ii], dims->constraints[ii]);
}
assert((char *) raw_memory + ocp_nlp_sqp_rti_opts_calculate_size(config, dims) >= c_ptr);
return opts;
}
void ocp_nlp_sqp_rti_opts_initialize_default(void *config_, void *dims_, void *opts_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_rti_opts *opts = opts_;
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_dynamics_config **dynamics = config->dynamics;
ocp_nlp_cost_config **cost = config->cost;
ocp_nlp_constraints_config **constraints = config->constraints;
ocp_nlp_reg_config *regularize = config->regularize;
int ii;
int N = dims->N;
// SQP RTI opts
// opts->compute_dual_sol = 1;
opts->reuse_workspace = 1;
#if defined(ACADOS_WITH_OPENMP)
opts->num_threads = ACADOS_NUM_THREADS;
#endif
opts->ext_qp_res = 0;
opts->step_length = 1.0;
// submodules opts
// do not compute adjoint in dynamics and constraints
int compute_adj = 0;
// qp solver
qp_solver->opts_initialize_default(qp_solver, dims->qp_solver, opts->qp_solver_opts);
// regularization
regularize->opts_initialize_default(regularize, dims->regularize, opts->regularize);
// dynamics
for (ii = 0; ii < N; ii++)
{
dynamics[ii]->opts_initialize_default(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]);
dynamics[ii]->opts_set(dynamics[ii], opts->dynamics[ii], "compute_adj", &compute_adj);
}
// cost
for (ii = 0; ii <= N; ii++)
{
cost[ii]->opts_initialize_default(cost[ii], dims->cost[ii], opts->cost[ii]);
}
// constraints
for (ii = 0; ii <= N; ii++)
{
constraints[ii]->opts_initialize_default(constraints[ii], dims->constraints[ii],
opts->constraints[ii]);
constraints[ii]->opts_set(constraints[ii], opts->constraints[ii], "compute_adj", &compute_adj);
}
return;
}
void ocp_nlp_sqp_rti_opts_update(void *config_, void *dims_, void *opts_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_rti_opts *opts = opts_;
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_dynamics_config **dynamics = config->dynamics;
ocp_nlp_cost_config **cost = config->cost;
ocp_nlp_constraints_config **constraints = config->constraints;
int ii;
int N = dims->N;
qp_solver->opts_update(qp_solver, dims->qp_solver, opts->qp_solver_opts);
// dynamics
for (ii = 0; ii < N; ii++)
{
dynamics[ii]->opts_update(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]);
}
// cost
for (ii = 0; ii <= N; ii++)
{
cost[ii]->opts_update(cost[ii], dims->cost[ii], opts->cost[ii]);
}
// constraints
for (ii = 0; ii <= N; ii++)
{
constraints[ii]->opts_update(constraints[ii], dims->constraints[ii], opts->constraints[ii]);
}
return;
}
void ocp_nlp_sqp_rti_opts_set(void *config_, void *opts_, const char *field, void* value)
{
ocp_nlp_sqp_rti_opts *opts = (ocp_nlp_sqp_rti_opts *) opts_;
ocp_nlp_config *config = config_;
int ii;
char module[MAX_STR_LEN];
char *ptr_module = NULL;
int module_length = 0;
// extract module name
char *char_ = strchr(field, '_');
if(char_!=NULL)
{
module_length = char_-field;
for(ii=0; ii<module_length; ii++)
module[ii] = field[ii];
module[module_length] = '\0'; // add end of string
ptr_module = module;
}
// pass options to QP module
if( ptr_module!=NULL && (!strcmp(ptr_module, "qp")) )
{
config->qp_solver->opts_set(config->qp_solver, opts->qp_solver_opts, field+module_length+1, value);
if(!strcmp(field, "qp_warm_start"))
{
int* i_ptr = (int *) value;
opts->qp_warm_start = *i_ptr;
}
}
else // nlp opts
{
if (!strcmp(field, "num_threads"))
{
int* num_threads = (int *) value;
opts->num_threads = *num_threads;
}
else if (!strcmp(field, "exact_hess"))
{
int N = config->N;
// cost
for (ii=0; ii<=N; ii++)
config->cost[ii]->opts_set(config->cost[ii], opts->cost[ii], "exact_hess", value);
// dynamics
for (ii=0; ii<N; ii++)
config->dynamics[ii]->opts_set(config->dynamics[ii], opts->dynamics[ii], "compute_hess", value);
// // constraints TODO disabled for now as prevents convergence !!!
// for (ii=0; ii<=N; ii++)
// config->constraints[ii]->opts_set(config->constraints[ii], opts->constraints[ii], "compute_hess", value);
}
else if (!strcmp(field, "ext_qp_res"))
{
int* ext_qp_res = (int *) value;
opts->ext_qp_res = *ext_qp_res;
}
else if (!strcmp(field, "step_length"))
{
double* step_length = (double *) value;
opts->step_length = *step_length;
}
else
{
printf("\nerror: ocp_nlp_sqp_rti_opts_set: wrong field: %s\n", field);
exit(1);
}
}
return;
}
void ocp_nlp_sqp_rti_dynamics_opts_set(void *config_, void *opts_, int stage,
const char *field, void *value)
{
ocp_nlp_config *config = config_;
ocp_nlp_sqp_rti_opts *opts = opts_;
ocp_nlp_dynamics_config *dyn_config = config->dynamics[stage];
dyn_config->opts_set(dyn_config, opts->dynamics[stage], field, value);
return;
}
void ocp_nlp_sqp_rti_cost_opts_set(void *config_, void *opts_, int stage,
const char *field, void *value)
{
ocp_nlp_config *config = config_;
ocp_nlp_sqp_rti_opts *opts = opts_;
ocp_nlp_cost_config *cost_config = config->cost[stage];
cost_config->opts_set(cost_config, opts->cost[stage], field, value);
return;
}
void ocp_nlp_sqp_rti_constraints_opts_set(void *config_, void *opts_, int stage,
const char *field, void *value)
{
ocp_nlp_config *config = config_;
ocp_nlp_sqp_rti_opts *opts = opts_;
ocp_nlp_constraints_config *constraints_config = config->constraints[stage];
constraints_config->opts_set(constraints_config, opts->constraints[stage], (char *) field, value);
return;
}
/************************************************
* memory
************************************************/
int ocp_nlp_sqp_rti_memory_calculate_size(void *config_, void *dims_, void *opts_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_rti_opts *opts = opts_;
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_dynamics_config **dynamics = config->dynamics;
ocp_nlp_cost_config **cost = config->cost;
ocp_nlp_constraints_config **constraints = config->constraints;
// loop index
int ii;
// extract dims
int N = dims->N;
// ocp_nlp_cost_dims **cost_dims = dims->cost;
// int ny;
int *nx = dims->nx;
int *nu = dims->nu;
int *nz = dims->nz;
int size = 0;
size += sizeof(ocp_nlp_sqp_rti_memory);
// qp in
size += ocp_qp_in_calculate_size(dims->qp_solver->orig_dims);
// qp out
size += ocp_qp_out_calculate_size(dims->qp_solver->orig_dims);
// qp solver
size += qp_solver->memory_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts);
size += config->regularize->memory_calculate_size(config->regularize, dims->regularize, opts->regularize);
// dynamics
size += N * sizeof(void *);
for (int ii = 0; ii < N; ii++)
{
size += dynamics[ii]->memory_calculate_size(dynamics[ii], dims->dynamics[ii],
opts->dynamics[ii]);
}
// cost
size += (N + 1) * sizeof(void *);
for (int ii = 0; ii <= N; ii++)
{
size += cost[ii]->memory_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]);
}
// constraints
size += (N + 1) * sizeof(void *);
for (int ii = 0; ii <= N; ii++)
{
size += constraints[ii]->memory_calculate_size(constraints[ii], dims->constraints[ii],
opts->constraints[ii]);
}
// nlp mem
size += ocp_nlp_memory_calculate_size(config, dims);
// stat
int stat_m = 1+1;
int stat_n = 2;
if(opts->ext_qp_res)
stat_n += 4;
size += stat_n*stat_m*sizeof(double);
// dzduxt
size += (N+1)*sizeof(struct blasfeo_dmat);
for(ii=0; ii<=N; ii++)
size += blasfeo_memsize_dmat(nu[ii]+nx[ii], nz[ii]);
// z_alg
size += (N+1)*sizeof(struct blasfeo_dvec);
for(ii=0; ii<=N; ii++)
size += blasfeo_memsize_dvec(nz[ii]);
size += 1*8; // blasfeo_str align
size += 1*64; // blasfeo_mem align
size += 8; // initial align
// make_int_multiple_of(64, &size);
return size;
}
void *ocp_nlp_sqp_rti_memory_assign(void *config_, void *dims_, void *opts_, void *raw_memory)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_rti_opts *opts = opts_;
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_dynamics_config **dynamics = config->dynamics;
ocp_nlp_cost_config **cost = config->cost;
ocp_nlp_constraints_config **constraints = config->constraints;
char *c_ptr = (char *) raw_memory;
// loop index
int ii;
// extract dims
int N = dims->N;
// ocp_nlp_cost_dims **cost_dims = dims->cost;
// int ny;
int *nx = dims->nx;
int *nu = dims->nu;
int *nz = dims->nz;
// initial align
align_char_to(8, &c_ptr);
ocp_nlp_sqp_rti_memory *mem = (ocp_nlp_sqp_rti_memory *) c_ptr;
c_ptr += sizeof(ocp_nlp_sqp_rti_memory);
// qp in
mem->qp_in = ocp_qp_in_assign(dims->qp_solver->orig_dims, c_ptr);
c_ptr += ocp_qp_in_calculate_size(dims->qp_solver->orig_dims);
// qp out
mem->qp_out = ocp_qp_out_assign(dims->qp_solver->orig_dims, c_ptr);
c_ptr += ocp_qp_out_calculate_size(dims->qp_solver->orig_dims);
// QP solver
mem->qp_solver_mem =
qp_solver->memory_assign(qp_solver, dims->qp_solver, opts->qp_solver_opts, c_ptr);
c_ptr += qp_solver->memory_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts);
// regularization
mem->regularize_mem = config->regularize->memory_assign(config->regularize, dims->regularize, opts->regularize, c_ptr);
c_ptr += config->regularize->memory_calculate_size(config->regularize, dims->regularize, opts->regularize);
// nlp mem
mem->nlp_mem = ocp_nlp_memory_assign(config, dims, c_ptr);
c_ptr += ocp_nlp_memory_calculate_size(config, dims);
// dynamics
mem->dynamics = (void **) c_ptr;
c_ptr += N * sizeof(void *);
for (int ii = 0; ii < N; ii++)
{
mem->dynamics[ii] = dynamics[ii]->memory_assign(dynamics[ii], dims->dynamics[ii],
opts->dynamics[ii], c_ptr);
c_ptr += dynamics[ii]->memory_calculate_size(dynamics[ii], dims->dynamics[ii],
opts->dynamics[ii]);
}
// cost
mem->cost = (void **) c_ptr;
c_ptr += (N + 1) * sizeof(void *);
for (int ii = 0; ii <= N; ii++)
{
mem->cost[ii] = cost[ii]->memory_assign(cost[ii], dims->cost[ii], opts->cost[ii], c_ptr);
c_ptr += cost[ii]->memory_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]);
}
// constraints
mem->constraints = (void **) c_ptr;
c_ptr += (N + 1) * sizeof(void *);
for (int ii = 0; ii <= N; ii++)
{
mem->constraints[ii] = constraints[ii]->memory_assign(
constraints[ii], dims->constraints[ii], opts->constraints[ii], c_ptr);
c_ptr += constraints[ii]->memory_calculate_size(constraints[ii], dims->constraints[ii],
opts->constraints[ii]);
}
// stat
mem->stat = (double *) c_ptr;
mem->stat_m = 1+1;
mem->stat_n = 2;
if(opts->ext_qp_res)
mem->stat_n += 4;
c_ptr += mem->stat_m*mem->stat_n*sizeof(double);
// blasfeo_str align
align_char_to(8, &c_ptr);
// dzduxt
mem->dzduxt = (struct blasfeo_dmat *) c_ptr;
c_ptr += (N+1)*sizeof(struct blasfeo_dmat);
// z_alg
mem->z_alg = (struct blasfeo_dvec *) c_ptr;
c_ptr += (N+1)*sizeof(struct blasfeo_dvec);
// blasfeo_mem align
align_char_to(64, &c_ptr);
// dzduxt
for(ii=0; ii<=N; ii++)
{
blasfeo_create_dmat(nu[ii]+nx[ii], nz[ii], mem->dzduxt+ii, c_ptr);
c_ptr += blasfeo_memsize_dmat(nu[ii]+nx[ii], nz[ii]);
}
// z_alg
for(ii=0; ii<=N; ii++)
{
blasfeo_create_dvec(nz[ii], mem->z_alg+ii, c_ptr);
c_ptr += blasfeo_memsize_dvec(nz[ii]);
}
mem->status = ACADOS_READY;
assert((char *) raw_memory+ocp_nlp_sqp_rti_memory_calculate_size(config, dims, opts) >= c_ptr);
return mem;
}
/************************************************
* workspace
************************************************/
int ocp_nlp_sqp_rti_workspace_calculate_size(void *config_, void *dims_, void *opts_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_rti_opts *opts = opts_;
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_dynamics_config **dynamics = config->dynamics;
ocp_nlp_cost_config **cost = config->cost;
ocp_nlp_constraints_config **constraints = config->constraints;
// loop index
int ii;
// extract dims
int N = dims->N;
// int *nx = dims->nx;
// int *nu = dims->nu;
// int *nz = dims->nz;
int size = 0;
int size_tmp = 0;
int tmp;
// sqp
size += sizeof(ocp_nlp_sqp_rti_work);
// qp in
size += ocp_qp_in_calculate_size(dims->qp_solver->orig_dims);
// qp out
size += ocp_qp_out_calculate_size(dims->qp_solver->orig_dims);
// array of pointers
// cost
size += (N + 1) * sizeof(void *);
// dynamics
size += N * sizeof(void *);
// constraints
size += (N + 1) * sizeof(void *);
if(opts->ext_qp_res)
{
// qp res
size += ocp_qp_res_calculate_size(dims->qp_solver->orig_dims);
// qp res ws
size += ocp_qp_res_workspace_calculate_size(dims->qp_solver->orig_dims);
}
if (opts->reuse_workspace)
{
#if defined(ACADOS_WITH_OPENMP)
// qp solver
size += qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver,
opts->qp_solver_opts);
// dynamics
for (ii = 0; ii < N; ii++)
{
size += dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii],
opts->dynamics[ii]);
}
// cost
for (ii = 0; ii <= N; ii++)
{
size += cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]);
}
// constraints
for (ii = 0; ii <= N; ii++)
{
size += constraints[ii]->workspace_calculate_size(constraints[ii],
dims->constraints[ii], opts->constraints[ii]);
}
#else
// qp solver
tmp = qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts);
size_tmp = tmp > size_tmp ? tmp : size_tmp;
// dynamics
for (ii = 0; ii < N; ii++)
{
tmp = dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii],
opts->dynamics[ii]);
size_tmp = tmp > size_tmp ? tmp : size_tmp;
}
// cost
for (ii = 0; ii <= N; ii++)
{
tmp = cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]);
size_tmp = tmp > size_tmp ? tmp : size_tmp;
}
// constraints
for (ii = 0; ii <= N; ii++)
{
tmp = constraints[ii]->workspace_calculate_size(constraints[ii], dims->constraints[ii],
opts->constraints[ii]);
size_tmp = tmp > size_tmp ? tmp : size_tmp;
}
size += size_tmp;
#endif
}
else
{
// qp solver
size += qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver,
opts->qp_solver_opts);
// dynamics
for (ii = 0; ii < N; ii++)
{
size += dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii],
opts->dynamics[ii]);
}
// cost
for (ii = 0; ii <= N; ii++)
{
size += cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]);
}
// constraints
for (ii = 0; ii <= N; ii++)
{
size += constraints[ii]->workspace_calculate_size(constraints[ii],
dims->constraints[ii], opts->constraints[ii]);
}
}
return size;
}
// TODO(all): introduce member "memsize" in all structures to make on-line cast cheaper (i.e. avoid to calculate size on-line)
static void ocp_nlp_sqp_rti_cast_workspace(void *config_, ocp_nlp_dims *dims,
ocp_nlp_sqp_rti_work *work,
ocp_nlp_sqp_rti_memory *mem, ocp_nlp_sqp_rti_opts *opts)
{
ocp_nlp_config *config = (ocp_nlp_config *) config_;
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_dynamics_config **dynamics = config->dynamics;
ocp_nlp_cost_config **cost = config->cost;
ocp_nlp_constraints_config **constraints = config->constraints;
// extract dims
int N = dims->N;
// int *nx = dims->nx;
// int *nu = dims->nu;
// int *nz = dims->nz;
// sqp
char *c_ptr = (char *) work;
c_ptr += sizeof(ocp_nlp_sqp_rti_work);
// qp in
work->tmp_qp_in = ocp_qp_in_assign(dims->qp_solver->orig_dims, c_ptr);
c_ptr += ocp_qp_in_calculate_size(dims->qp_solver->orig_dims);
// qp out
work->tmp_qp_out = ocp_qp_out_assign(dims->qp_solver->orig_dims, c_ptr);
c_ptr += ocp_qp_out_calculate_size(dims->qp_solver->orig_dims);
// array of pointers
//
work->dynamics = (void **) c_ptr;
c_ptr += N * sizeof(void *);
//
work->cost = (void **) c_ptr;
c_ptr += (N + 1) * sizeof(void *);
//
work->constraints = (void **) c_ptr;
c_ptr += (N + 1) * sizeof(void *);
if(opts->ext_qp_res)
{
// qp res
work->qp_res = ocp_qp_res_assign(dims->qp_solver->orig_dims, c_ptr);
c_ptr += ocp_qp_res_calculate_size(dims->qp_solver->orig_dims);
// qp res ws
work->qp_res_ws = ocp_qp_res_workspace_assign(dims->qp_solver->orig_dims, c_ptr);
c_ptr += ocp_qp_res_workspace_calculate_size(dims->qp_solver->orig_dims);
}
if (opts->reuse_workspace)
{
#if defined(ACADOS_WITH_OPENMP)
// qp solver
work->qp_work = (void *) c_ptr;
c_ptr += qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver,
opts->qp_solver_opts);
// dynamics
for (int ii = 0; ii < N; ii++)
{
work->dynamics[ii] = c_ptr;
c_ptr += dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii],
opts->dynamics[ii]);
}
// cost
for (int ii = 0; ii <= N; ii++)
{
work->cost[ii] = c_ptr;
c_ptr += cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]);
}
// constraints
for (int ii = 0; ii <= N; ii++)
{
work->constraints[ii] = c_ptr;
c_ptr += constraints[ii]->workspace_calculate_size(constraints[ii],
dims->constraints[ii], opts->constraints[ii]);
}
#else
int size_tmp = 0;
int tmp;
// qp solver
work->qp_work = (void *) c_ptr;
tmp = qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts);
size_tmp = tmp > size_tmp ? tmp : size_tmp;
// dynamics
for (int ii = 0; ii < N; ii++)
{
work->dynamics[ii] = c_ptr;
tmp = dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]);
size_tmp = tmp > size_tmp ? tmp : size_tmp;
}
// cost
for (int ii = 0; ii <= N; ii++)
{
work->cost[ii] = c_ptr;
tmp = cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]);
size_tmp = tmp > size_tmp ? tmp : size_tmp;
}
// constraints
for (int ii = 0; ii <= N; ii++)
{
work->constraints[ii] = c_ptr;
tmp = constraints[ii]->workspace_calculate_size(constraints[ii], dims->constraints[ii], opts->constraints[ii]);
size_tmp = tmp > size_tmp ? tmp : size_tmp;
}
c_ptr += size_tmp;
#endif
}
else
{
// qp solver
work->qp_work = (void *) c_ptr;
c_ptr += qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver,
opts->qp_solver_opts);
// dynamics
for (int ii = 0; ii < N; ii++)
{
work->dynamics[ii] = c_ptr;
c_ptr += dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii],
opts->dynamics[ii]);
}
// cost
for (int ii = 0; ii <= N; ii++)
{
work->cost[ii] = c_ptr;
c_ptr += cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]);
}
// constraints
for (int ii = 0; ii <= N; ii++)
{
work->constraints[ii] = c_ptr;
c_ptr += constraints[ii]->workspace_calculate_size(constraints[ii],
dims->constraints[ii], opts->constraints[ii]);
}
}
// assert & return
assert((char *) work + ocp_nlp_sqp_rti_workspace_calculate_size(config, dims, opts) >= c_ptr);
return;
}
/************************************************
* functions
************************************************/
static void initialize_qp(void *config_, ocp_nlp_dims *dims, ocp_nlp_in *nlp_in,
ocp_nlp_out *nlp_out, ocp_nlp_sqp_rti_opts *opts,
ocp_nlp_sqp_rti_memory *mem, ocp_nlp_sqp_rti_work *work)
{
ocp_nlp_config *config = (ocp_nlp_config *) config_;
// loop index
int ii;
// extract dims
int N = dims->N;
#if defined(ACADOS_WITH_OPENMP)
#pragma omp parallel for
#endif
for (ii = 0; ii <= N; ii++)
{
// cost
config->cost[ii]->initialize(config->cost[ii], dims->cost[ii], nlp_in->cost[ii],
opts->cost[ii], mem->cost[ii], work->cost[ii]);
// dynamics
if (ii < N)
config->dynamics[ii]->initialize(config->dynamics[ii], dims->dynamics[ii],
nlp_in->dynamics[ii], opts->dynamics[ii],
mem->dynamics[ii], work->dynamics[ii]);
// constraints
config->constraints[ii]->initialize(config->constraints[ii], dims->constraints[ii],
nlp_in->constraints[ii], opts->constraints[ii],
mem->constraints[ii], work->constraints[ii]);
}
return;
}
static void linearize_update_qp_matrices(void *config_, ocp_nlp_dims *dims, ocp_nlp_in *nlp_in,
ocp_nlp_out *nlp_out, ocp_nlp_sqp_rti_opts *opts,
ocp_nlp_sqp_rti_memory *mem, ocp_nlp_sqp_rti_work *work)
{
ocp_nlp_config *config = (ocp_nlp_config *) config_;
// loop index
int i;
// extract dims
int N = dims->N;
int *nv = dims->nv;
int *nx = dims->nx;
int *nu = dims->nu;
int *ni = dims->ni;
ocp_nlp_memory *nlp_mem = mem->nlp_mem;
/* stage-wise multiple shooting lagrangian evaluation */
#if defined(ACADOS_WITH_OPENMP)
#pragma omp parallel for
#endif
for (i = 0; i <= N; i++)
{
// init Hessian to 0
blasfeo_dgese(nu[i] + nx[i], nu[i] + nx[i], 0.0, mem->qp_in->RSQrq+i, 0, 0);
// dynamics
if (i < N)
config->dynamics[i]->update_qp_matrices(config->dynamics[i], dims->dynamics[i],
nlp_in->dynamics[i], opts->dynamics[i],
mem->dynamics[i], work->dynamics[i]);
// cost
config->cost[i]->update_qp_matrices(config->cost[i], dims->cost[i], nlp_in->cost[i],
opts->cost[i], mem->cost[i], work->cost[i]);
// constraints
config->constraints[i]->update_qp_matrices(config->constraints[i], dims->constraints[i],
nlp_in->constraints[i], opts->constraints[i],
mem->constraints[i], work->constraints[i]);
}
/* collect stage-wise evaluations */
#if defined(ACADOS_WITH_OPENMP)
#pragma omp parallel for
#endif
for (i=0; i <= N; i++)
{
// nlp mem: cost_grad
struct blasfeo_dvec *cost_grad = config->cost[i]->memory_get_grad_ptr(mem->cost[i]);
blasfeo_dveccp(nv[i], cost_grad, 0, nlp_mem->cost_grad + i, 0);
// nlp mem: dyn_fun
if (i < N)
{
struct blasfeo_dvec *dyn_fun
= config->dynamics[i]->memory_get_fun_ptr(mem->dynamics[i]);
blasfeo_dveccp(nx[i + 1], dyn_fun, 0, nlp_mem->dyn_fun + i, 0);
}
// nlp mem: dyn_adj
if (i < N)
{
struct blasfeo_dvec *dyn_adj
= config->dynamics[i]->memory_get_adj_ptr(mem->dynamics[i]);
blasfeo_dveccp(nu[i] + nx[i], dyn_adj, 0, nlp_mem->dyn_adj + i, 0);
}
else
{
blasfeo_dvecse(nu[N] + nx[N], 0.0, nlp_mem->dyn_adj + N, 0);
}
if (i > 0)
{
struct blasfeo_dvec *dyn_adj
= config->dynamics[i-1]->memory_get_adj_ptr(mem->dynamics[i-1]);
blasfeo_daxpy(nx[i], 1.0, dyn_adj, nu[i-1]+nx[i-1], nlp_mem->dyn_adj+i, nu[i],
nlp_mem->dyn_adj+i, nu[i]);
}
// nlp mem: ineq_fun
struct blasfeo_dvec *ineq_fun =
config->constraints[i]->memory_get_fun_ptr(mem->constraints[i]);
blasfeo_dveccp(2 * ni[i], ineq_fun, 0, nlp_mem->ineq_fun + i, 0);
// nlp mem: ineq_adj
struct blasfeo_dvec *ineq_adj =
config->constraints[i]->memory_get_adj_ptr(mem->constraints[i]);
blasfeo_dveccp(nv[i], ineq_adj, 0, nlp_mem->ineq_adj + i, 0);
}
// TODO(all): still to clean !!!!!!!!!!!!!
for (i = 0; i <= N; i++)
{
// TODO(rien) where should the update happen??? move to qp update ???
// TODO(all): fix and move where appropriate
// if(i<N)
// {
// ocp_nlp_dynamics_opts *dynamics_opts = opts->dynamics[i];
// sim_opts *opts = dynamics_opts->sim_solver;
// if (opts->scheme != NULL && opts->scheme->type != exact)
// {
// for (int_t j = 0; j < nx; j++)
// BLASFEO_DVECEL(nlp_mem->cost_grad+i, nu+j) += work->sim_out[i]->grad[j];
// for (int_t j = 0; j < nu; j++)
// BLASFEO_DVECEL(nlp_mem->cost_grad+i, j) += work->sim_out[i]->grad[nx+j];
// }
// }
}
return;
}
// update QP rhs for SQP (step prim var, abs dual var)
// TODO(all): move in dynamics, cost, constraints modules ???
static void sqp_update_qp_vectors(void *config_, ocp_nlp_dims *dims, ocp_nlp_in *nlp_in,
ocp_nlp_out *nlp_out, ocp_nlp_sqp_rti_opts *opts,
ocp_nlp_sqp_rti_memory *mem, ocp_nlp_sqp_rti_work *work)
{
// loop index
int i;
// extract dims
int N = dims->N;
int *nv = dims->nv;
int *nx = dims->nx;
// int *nu = dims->nu;
int *ni = dims->ni;
ocp_nlp_memory *nlp_mem = mem->nlp_mem;
#if defined(ACADOS_WITH_OPENMP)
#pragma omp parallel for
#endif
for (i = 0; i <= N; i++)
{
// g
blasfeo_dveccp(nv[i], nlp_mem->cost_grad + i, 0, mem->qp_in->rqz + i, 0);
// b
if (i < N)
blasfeo_dveccp(nx[i + 1], nlp_mem->dyn_fun + i, 0, mem->qp_in->b + i, 0);
// d
blasfeo_dveccp(2 * ni[i], nlp_mem->ineq_fun + i, 0, mem->qp_in->d + i, 0);
}
return;
}
static void sqp_update_variables(ocp_nlp_dims *dims, ocp_nlp_out *nlp_out,
ocp_nlp_sqp_rti_opts *opts, ocp_nlp_sqp_rti_memory *mem,
ocp_nlp_sqp_rti_work *work)
{
// loop index
int i;
// extract dims
int N = dims->N;
int *nv = dims->nv;
int *nx = dims->nx;
// int *nu = dims->nu;
int *ni = dims->ni;
int *nz = dims->nz;
// TODO(all): fix and move where appropriate
// for (i = 0; i < N; i++)
// {
// nx1 = dims->constraints[i+1]->nx;
// for (j = 0; j < nx1; j++)
// {
// work->sim_in[i]->S_adj[j] = -BLASFEO_DVECEL(&mem->qp_out->pi[i], j);
// }
// }
double alpha = opts->step_length;
#if defined(ACADOS_WITH_OPENMP)
#pragma omp parallel for
#endif
for (i = 0; i <= N; i++)
{
// (full) step in primal variables
blasfeo_daxpy(nv[i], alpha, mem->qp_out->ux + i, 0, nlp_out->ux + i, 0, nlp_out->ux + i, 0);
// absolute in dual variables
if (i < N)
{
blasfeo_dvecsc(nx[i+1], 1.0-alpha, nlp_out->pi+i, 0);
blasfeo_daxpy(nx[i+1], alpha, mem->qp_out->pi+i, 0, nlp_out->pi+i, 0, nlp_out->pi+i, 0);
}
blasfeo_dvecsc(2*ni[i], 1.0-alpha, nlp_out->lam+i, 0);
blasfeo_daxpy(2*ni[i], alpha, mem->qp_out->lam+i, 0, nlp_out->lam+i, 0, nlp_out->lam+i, 0);
blasfeo_dvecsc(2*ni[i], 1.0-alpha, nlp_out->t+i, 0);
blasfeo_daxpy(2*ni[i], alpha, mem->qp_out->t+i, 0, nlp_out->t+i, 0, nlp_out->t+i, 0);
if (i < N)
{
blasfeo_dvecsc(nz[i], 1.0-alpha, nlp_out->z+i, 0);
blasfeo_daxpy(nz[i], alpha, mem->z_alg+i, 0, nlp_out->z+i, 0, nlp_out->z+i, 0);
}
}
return;
}
// Simple fixed-step Gauss-Newton based SQP routine
int ocp_nlp_sqp_rti(void *config_, void *dims_, void *nlp_in_, void *nlp_out_,
void *opts_, void *mem_, void *work_)
{
// acados timer
acados_timer timer0, timer1;
// start timer
acados_tic(&timer0);
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_rti_opts *opts = opts_;
ocp_nlp_sqp_rti_memory *mem = mem_;
ocp_nlp_in *nlp_in = nlp_in_;
ocp_nlp_out *nlp_out = nlp_out_;
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_sqp_rti_work *work = work_;
ocp_nlp_sqp_rti_cast_workspace(config, dims, work, mem, opts);
// zero timers
double total_time = 0.0;
mem->time_qp_sol = 0.0;
mem->time_lin = 0.0;
mem->time_reg = 0.0;
mem->time_tot = 0.0;
// extract dims
int N = dims->N;
int ii;
int qp_iter = 0;
int qp_status = 0;
#if defined(ACADOS_WITH_OPENMP)
// backup number of threads
int num_threads_bkp = omp_get_num_threads();
// set number of threads
omp_set_num_threads(opts->num_threads);
#pragma omp parallel
{ // beginning of parallel region
#endif
// alias to dynamics_memory
#if defined(ACADOS_WITH_OPENMP)
#pragma omp for nowait
#endif
for (ii = 0; ii < N; ii++)
{
config->dynamics[ii]->memory_set_ux_ptr(nlp_out->ux+ii, mem->dynamics[ii]);
config->dynamics[ii]->memory_set_ux1_ptr(nlp_out->ux+ii+1, mem->dynamics[ii]);
config->dynamics[ii]->memory_set_pi_ptr(nlp_out->pi+ii, mem->dynamics[ii]);
config->dynamics[ii]->memory_set_BAbt_ptr(mem->qp_in->BAbt+ii, mem->dynamics[ii]);
config->dynamics[ii]->memory_set_RSQrq_ptr(mem->qp_in->RSQrq+ii, mem->dynamics[ii]);
config->dynamics[ii]->memory_set_dzduxt_ptr(mem->dzduxt+ii, mem->dynamics[ii]);
config->dynamics[ii]->memory_set_sim_guess_ptr(mem->nlp_mem->sim_guess+ii,
mem->nlp_mem->set_sim_guess+ii, mem->dynamics[ii]);
config->dynamics[ii]->memory_set_z_alg_ptr(mem->z_alg+ii, mem->dynamics[ii]);
}
// alias to cost_memory
#if defined(ACADOS_WITH_OPENMP)
#pragma omp for nowait
#endif
for (ii = 0; ii <= N; ii++)
{
config->cost[ii]->memory_set_ux_ptr(nlp_out->ux + ii, mem->cost[ii]);
config->cost[ii]->memory_set_z_alg_ptr(mem->z_alg+ii, mem->cost[ii]);
config->cost[ii]->memory_set_dzdux_tran_ptr(mem->dzduxt+ii, mem->cost[ii]);
config->cost[ii]->memory_set_RSQrq_ptr(mem->qp_in->RSQrq + ii, mem->cost[ii]);
config->cost[ii]->memory_set_Z_ptr(mem->qp_in->Z + ii, mem->cost[ii]);
}
// alias to constraints_memory
#if defined(ACADOS_WITH_OPENMP)
#pragma omp for nowait
#endif
for (ii = 0; ii <= N; ii++)
{
config->constraints[ii]->memory_set_ux_ptr(nlp_out->ux+ii, mem->constraints[ii]);
config->constraints[ii]->memory_set_z_alg_ptr(mem->z_alg+ii, mem->constraints[ii]);
config->constraints[ii]->memory_set_dzdux_tran_ptr(mem->dzduxt+ii, mem->constraints[ii]);
config->constraints[ii]->memory_set_lam_ptr(nlp_out->lam+ii, mem->constraints[ii]);
config->constraints[ii]->memory_set_DCt_ptr(mem->qp_in->DCt+ii, mem->constraints[ii]);
config->constraints[ii]->memory_set_RSQrq_ptr(mem->qp_in->RSQrq+ii, mem->constraints[ii]);
config->constraints[ii]->memory_set_idxb_ptr(mem->qp_in->idxb[ii], mem->constraints[ii]);
config->constraints[ii]->memory_set_idxs_ptr(mem->qp_in->idxs[ii], mem->constraints[ii]);
}
// alias to regularize memory
config->regularize->memory_set_RSQrq_ptr(dims->regularize, mem->qp_in->RSQrq, mem->regularize_mem);
config->regularize->memory_set_rq_ptr(dims->regularize, mem->qp_in->rqz, mem->regularize_mem);
config->regularize->memory_set_BAbt_ptr(dims->regularize, mem->qp_in->BAbt, mem->regularize_mem);
config->regularize->memory_set_b_ptr(dims->regularize, mem->qp_in->b, mem->regularize_mem);
config->regularize->memory_set_idxb_ptr(dims->regularize, mem->qp_in->idxb, mem->regularize_mem);
config->regularize->memory_set_DCt_ptr(dims->regularize, mem->qp_in->DCt, mem->regularize_mem);
config->regularize->memory_set_ux_ptr(dims->regularize, mem->qp_out->ux, mem->regularize_mem);
config->regularize->memory_set_pi_ptr(dims->regularize, mem->qp_out->pi, mem->regularize_mem);
config->regularize->memory_set_lam_ptr(dims->regularize, mem->qp_out->lam, mem->regularize_mem);
// copy sampling times into dynamics model
#if defined(ACADOS_WITH_OPENMP)
#pragma omp for nowait
#endif
for (int ii = 0; ii < N; ii++)
{
config->dynamics[ii]->model_set(config->dynamics[ii], dims->dynamics[ii], nlp_in->dynamics[ii], "T", nlp_in->Ts+ii);
}
#if defined(ACADOS_WITH_OPENMP)
} // end of parallel region
#endif
// initialize QP
initialize_qp(config, dims, nlp_in, nlp_out, opts, mem, work);
// SQP body
// start timer
acados_tic(&timer1);
// linearizate NLP and update QP matrices
linearize_update_qp_matrices(config, dims, nlp_in, nlp_out, opts, mem, work);
// stop timer
mem->time_lin += acados_toc(&timer1);
// update QP rhs for SQP (step prim var, abs dual var)
sqp_update_qp_vectors(config, dims, nlp_in, nlp_out, opts, mem, work);
// save statistics
// mem->stat[mem->stat_n*1+0] = qp_status;
// mem->stat[mem->stat_n*1+1] = qp_iter;
// start timer
acados_tic(&timer1);
// regularize Hessian
config->regularize->regularize_hessian(config->regularize, dims->regularize, opts->regularize, mem->regularize_mem);
// stop timer
mem->time_reg += acados_toc(&timer1);
// printf("\n------- qp_in (sqp iter %d) --------\n", sqp_iter);
// print_ocp_qp_in(mem->qp_in);
// exit(1);
// TODO no warm start across NLP solutions (yet)
int tmp_int = 0;
config->qp_solver->opts_set(config->qp_solver, opts->qp_solver_opts, "warm_start", &tmp_int);
// start timer
acados_tic(&timer1);
// TODO move qp_out in memory !!!!! (it has to be preserved to do warm start)
qp_status = qp_solver->evaluate(qp_solver, dims->qp_solver, mem->qp_in, mem->qp_out, opts->qp_solver_opts, mem->qp_solver_mem, work->qp_work);
// stop timer
mem->time_qp_sol += acados_toc(&timer1);
// start timer
acados_tic(&timer1);
// compute correct dual solution in case of Hessian regularization
config->regularize->correct_dual_sol(config->regularize, dims->regularize, opts->regularize, mem->regularize_mem);
// stop timer
mem->time_reg += acados_toc(&timer1);
// TODO move into QP solver memory ???
qp_info *qp_info_;
ocp_qp_out_get(mem->qp_out, "qp_info", &qp_info_);
nlp_out->qp_iter = qp_info_->num_iter;
qp_iter = qp_info_->num_iter;
// compute external QP residuals (for debugging)
if(opts->ext_qp_res)
{
ocp_qp_res_compute(mem->qp_in, mem->qp_out, work->qp_res, work->qp_res_ws);
ocp_qp_res_compute_nrm_inf(work->qp_res, mem->stat+(mem->stat_n*1+2));
// printf("\nsqp_iter %d, res %e %e %e %e\n", sqp_iter, inf_norm_qp_res[0], inf_norm_qp_res[1], inf_norm_qp_res[2], inf_norm_qp_res[3]);
}
// printf("\n------- qp_out (sqp iter %d) ---------\n", sqp_iter);
// print_ocp_qp_out(mem->qp_out);
// if(sqp_iter==1)
// exit(1);
// save statistics
mem->stat[mem->stat_n*1+0] = qp_status;
mem->stat[mem->stat_n*1+1] = qp_iter;
if ((qp_status!=ACADOS_SUCCESS) & (qp_status!=ACADOS_MAXITER))
{
// print_ocp_qp_in(mem->qp_in);
// stop timer
total_time += acados_toc(&timer0);
mem->time_tot = total_time;
nlp_out->total_time = total_time;
printf("QP solver returned error status %d\n", qp_status);
#if defined(ACADOS_WITH_OPENMP)
// restore number of threads
omp_set_num_threads(num_threads_bkp);
#endif
mem->status = ACADOS_QP_FAILURE;
return mem->status;
}
sqp_update_variables(dims, nlp_out, opts, mem, work);
// ocp_nlp_dims_print(nlp_out->dims);
// ocp_nlp_out_print(nlp_out);
// exit(1);
// stop timer
total_time += acados_toc(&timer0);
mem->time_tot = total_time;
nlp_out->total_time = total_time;
// ocp_nlp_out_print(nlp_out);
// print_ocp_qp_in(mem->qp_in);
#if defined(ACADOS_WITH_OPENMP)
// restore number of threads
omp_set_num_threads(num_threads_bkp);
#endif
mem->status = ACADOS_SUCCESS;
return mem->status;
}
int ocp_nlp_sqp_rti_precompute(void *config_, void *dims_, void *nlp_in_, void *nlp_out_,
void *opts_, void *mem_, void *work_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_rti_opts *opts = opts_;
ocp_nlp_sqp_rti_memory *mem = mem_;
ocp_nlp_in *nlp_in = nlp_in_;
// ocp_nlp_out *nlp_out = nlp_out_;
// ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_sqp_rti_work *work = work_;
ocp_nlp_sqp_rti_cast_workspace(config, dims, work, mem, opts);
// extract dims
int N = dims->N;
int status = ACADOS_SUCCESS;
int ii;
// TODO(fuck_lint) checks
// TODO(fuck_lint) flag to enable/disable checks
for (ii = 0; ii <= N; ii++)
{
// TODO(fuck_lint) check that ns in opt_var == ns in constraints
}
// precompute
for (ii = 0; ii < N; ii++)
{
// set T
config->dynamics[ii]->model_set(config->dynamics[ii], dims->dynamics[ii], nlp_in->dynamics[ii], "T", nlp_in->Ts+ii);
// dynamics precompute
status = config->dynamics[ii]->precompute(config->dynamics[ii], dims->dynamics[ii],
nlp_in->dynamics[ii], opts->dynamics[ii],
mem->dynamics[ii], work->dynamics[ii]);
if (status != ACADOS_SUCCESS) return status;
}
return status;
}
void ocp_nlp_sqp_rti_eval_param_sens(void *config_, void *dims_, void *opts_, void *mem_, void *work_, char *field, int stage, int index, void *sens_nlp_out_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_rti_opts *opts = opts_;
ocp_nlp_sqp_rti_memory *mem = mem_;
ocp_nlp_out *sens_nlp_out = sens_nlp_out_;
// ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_sqp_rti_work *work = work_;
ocp_nlp_sqp_rti_cast_workspace(config, dims, work, mem, opts);
// extract dims
// int N = dims->N;
// int status = ACADOS_SUCCESS;
// int ii;
d_ocp_qp_copy_all(mem->qp_in, work->tmp_qp_in);
d_ocp_qp_set_rhs_zero(work->tmp_qp_in);
double one = 1.0;
if ((!strcmp("ex", field)) & (stage==0))
{
d_ocp_qp_set_el("lbx", stage, index, &one, work->tmp_qp_in);
d_ocp_qp_set_el("ubx", stage, index, &one, work->tmp_qp_in);
// d_ocp_qp_print(work->tmp_qp_in->dim, work->tmp_qp_in);
config->qp_solver->eval_sens(config->qp_solver, dims->qp_solver, work->tmp_qp_in, work->tmp_qp_out, opts->qp_solver_opts, mem->qp_solver_mem, work->qp_work);
// d_ocp_qp_sol_print(work->tmp_qp_out->dim, work->tmp_qp_out);
// exit(1);
// copy tmp_qp_out into sens_nlp_out
// loop index
int i;
// extract dims
int N = dims->N;
int *nv = dims->nv;
int *nx = dims->nx;
// int *nu = dims->nu;
int *ni = dims->ni;
// int *nz = dims->nz;
for (i = 0; i <= N; i++)
{
blasfeo_dveccp(nv[i], work->tmp_qp_out->ux + i, 0, sens_nlp_out->ux + i, 0);
if (i < N)
blasfeo_dveccp(nx[i + 1], work->tmp_qp_out->pi + i, 0, sens_nlp_out->pi + i, 0);
blasfeo_dveccp(2 * ni[i], work->tmp_qp_out->lam + i, 0, sens_nlp_out->lam + i, 0);
blasfeo_dveccp(2 * ni[i], work->tmp_qp_out->t + i, 0, sens_nlp_out->t + i, 0);
}
}
else
{
printf("\nerror: field %s at stage %d not available in ocp_nlp_sqp_rti_eval_param_sens\n", field, stage);
exit(1);
}
return;
}
// TODO remane mmeory_get ???
void ocp_nlp_sqp_rti_get(void *config_, void *mem_, const char *field, void *return_value_)
{
// ocp_nlp_config *config = config_;
ocp_nlp_sqp_rti_memory *mem = mem_;
if (!strcmp("sqp_iter", field))
{
int *value = return_value_;
*value = 1;
}
else if (!strcmp("status", field))
{
int *value = return_value_;
*value = mem->status;
}
else if (!strcmp("time_tot", field) || !strcmp("tot_time", field))
{
double *value = return_value_;
*value = mem->time_tot;
}
else if (!strcmp("time_qp_sol", field) || !strcmp("time_qp", field))
{
double *value = return_value_;
*value = mem->time_qp_sol;
}
else if (!strcmp("time_lin", field))
{
double *value = return_value_;
*value = mem->time_lin;
}
else if (!strcmp("time_reg", field))
{
double *value = return_value_;
*value = mem->time_reg;
}
else if (!strcmp("stat", field))
{
double **value = return_value_;
*value = mem->stat;
}
else if (!strcmp("stat_m", field))
{
int *value = return_value_;
*value = mem->stat_m;
}
else if (!strcmp("stat_n", field))
{
int *value = return_value_;
*value = mem->stat_n;
}
else
{
printf("\nerror: output type %s not available in ocp_nlp_sqp_rti module\n", field);
exit(1);
}
}
void ocp_nlp_sqp_rti_config_initialize_default(void *config_)
{
ocp_nlp_config *config = (ocp_nlp_config *) config_;
config->opts_calculate_size = &ocp_nlp_sqp_rti_opts_calculate_size;
config->opts_assign = &ocp_nlp_sqp_rti_opts_assign;
config->opts_initialize_default = &ocp_nlp_sqp_rti_opts_initialize_default;
config->opts_update = &ocp_nlp_sqp_rti_opts_update;
config->opts_set = &ocp_nlp_sqp_rti_opts_set;
config->dynamics_opts_set = &ocp_nlp_sqp_rti_dynamics_opts_set;
config->cost_opts_set = &ocp_nlp_sqp_rti_cost_opts_set;
config->constraints_opts_set = &ocp_nlp_sqp_rti_constraints_opts_set;
config->memory_calculate_size = &ocp_nlp_sqp_rti_memory_calculate_size;
config->memory_assign = &ocp_nlp_sqp_rti_memory_assign;
config->workspace_calculate_size = &ocp_nlp_sqp_rti_workspace_calculate_size;
config->evaluate = &ocp_nlp_sqp_rti;
config->eval_param_sens = &ocp_nlp_sqp_rti_eval_param_sens;
config->config_initialize_default = &ocp_nlp_sqp_rti_config_initialize_default;
config->precompute = &ocp_nlp_sqp_rti_precompute;
config->get = &ocp_nlp_sqp_rti_get;
return;
}
|
cast_hcl_arm.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2020, OPEN AI LAB
* Author: renzun@openailab.com
*/
#include "sys_port.h"
#include "module.h"
#include "tengine_errno.h"
#include "tengine_log.h"
#include "tengine_ir.h"
#include "../../cpu_node_ops.h"
#include "tengine_op.h"
#include <math.h>
#include "compiler_fp16.h"
#include "cast_param.h"
static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int prerun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct ir_node* ir_node = exec_node->ir_node;
struct ir_graph* ir_graph = ir_node->graph;
struct ir_tensor* input_tensor;
struct ir_tensor* output_tensor;
input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]);
struct cast_param* cast_param = ( struct cast_param* )ir_node->op.param_mem;
int type_from = cast_param->type_from;
int type_to = cast_param->type_to;
int channel_num = input_tensor->dims[1];
int batch_number = input_tensor->dims[0];
int channel_size = (input_tensor->dims[2]) * (input_tensor->dims[3]);
int num_thread = exec_graph->num_thread;
if (type_from == 1 && type_to == 2)
{
float* idata = ( float* )input_tensor->data;
__fp16* odata = ( __fp16* )output_tensor->data;
#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < (channel_num * batch_number); i++)
{
int offset = i * channel_size;
for (int j = 0; j < channel_size; j++)
{
odata[j + offset] = fp32_to_fp16(idata[j + offset]);
}
}
}
if (type_from == 2 && type_to == 1)
{
__fp16* idata = ( __fp16* )input_tensor->data;
float* odata = ( float* )output_tensor->data;
#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < (channel_num * batch_number); i++)
{
int offset = i * channel_size;
for (int j = 0; j < channel_size; j++)
{
odata[j + offset] = fp16_to_fp32(idata[j + offset]);
}
}
}
return 0;
}
static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct ir_node* exec_node)
{
struct ir_node* ir_node = exec_node;
struct ir_graph* ir_graph = ir_node->graph;
struct ir_tensor* input_tensor;
input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
if (input_tensor->layout != TENGINE_LAYOUT_NCHW)
return 0;
return OPS_SCORE_BEST;
}
static struct node_ops hcl_node_ops = {.prerun = prerun,
.run = run,
.reshape = NULL,
.postrun = NULL,
.init_node = init_node,
.release_node = release_node,
.score = score};
static int reg_cast_hcl_ops(void* arg)
{
return register_builtin_node_ops(OP_CAST, &hcl_node_ops);
}
static int unreg_cast_hcl_ops(void* arg)
{
return unregister_builtin_node_ops(OP_CAST, &hcl_node_ops);
}
AUTO_REGISTER_OPS(reg_cast_hcl_ops);
AUTO_UNREGISTER_OPS(unreg_cast_hcl_ops);
|
spmm_hicoo_mat.c | /*
This file is part of ParTI!.
ParTI! is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
ParTI! is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with ParTI!.
If not, see <http://www.gnu.org/licenses/>.
*/
#include <stdio.h>
#include <stdlib.h>
#include <getopt.h>
#include <HiParTI.h>
int main(int argc, char * const argv[]) {
char * mm_filename = NULL;
FILE *fi = NULL, *fo = NULL;
ptiSparseMatrix spA;
ptiSparseMatrixHiCOO hispA;
ptiMatrix B, C;
ptiElementIndex sb_bits = 7; // 2^7 by default
ptiIndex R = 16;
int niters = 50;
ptiTimer timer;
ptiNewTimer(&timer, 0);
/* OpenMP */
int cuda_dev_id = -2;
int nthreads = 1; // get from OMP_NUM_THREADS environment
int use_schedule = 0; // privatization or not
ptiElementIndex sk_bits = sb_bits;
int par_iters = 0; // determine in the code
ptiMatrix * Cbufs;
static struct option long_options[] = {
{"input", required_argument, 0, 'i'},
{"output", optional_argument, 0, 'o'},
{"bs", optional_argument, 0, 'b'},
{"ks", optional_argument, 0, 'k'},
{"R", optional_argument, 0, 'r'},
{"cuda-dev-id", optional_argument, 0, 'd'},
{"use-schedule", optional_argument, 0, 'u'},
{0, 0, 0, 0}
};
for(;;) {
int option_index = 0;
int c = 1;
c = getopt_long(argc, argv, "i:o:b:k:r:d:u:", long_options, &option_index);
if(c == -1) {
break;
}
switch(c) {
case 'i':
mm_filename = optarg;
fi = fopen(optarg, "r");
ptiAssert(fi != NULL);
break;
case 'o':
fo = fopen(optarg, "w");
ptiAssert(fo != NULL);
break;
case 'b':
sscanf(optarg, "%"HIPARTI_SCN_ELEMENT_INDEX, &sb_bits);
break;
case 'k':
sscanf(optarg, "%"HIPARTI_SCN_ELEMENT_INDEX, &sk_bits);
break;
case 'r':
sscanf(optarg, "%"HIPARTI_SCN_INDEX, &R);
break;
case 'd':
sscanf(optarg, "%d", &cuda_dev_id);
break;
case 'u':
sscanf(optarg, "%d", &use_schedule);
break;
default:
abort();
}
}
printf("B ncols: %d\n", R);
printf("niters: %d\n", niters);
printf("cuda_dev_id: %d\n", cuda_dev_id);
printf("sb: %ld\n", (long int)pow(2,sb_bits));
if(cuda_dev_id == -1) {
printf("use_schedule: %d\n", use_schedule);
#ifdef HIPARTI_USE_OPENMP
#pragma omp parallel
nthreads = omp_get_num_threads();
#endif
printf("sk: %ld\n", (long int)pow(2,sk_bits));
printf("nthreads: %d\n", nthreads);
}
if(optind > argc || argc < 2) {
printf("Usage: %s\n", argv[0]);
printf("Options: -i INPUT, --input=INPUT\n");
printf(" -o OUTPUT, --output=OUTPUT\n");
printf(" -b BLOCKSIZE (bits), --blocksize=BLOCKSIZE (bits)\n");
printf(" -k SUPERBLOCKSIZE (bits), --kernelsize=SUPERBLOCKSIZE (bits)\n");
printf(" -R RANK\n");
printf(" -d CUDA_DEV_ID, --cuda-dev-id=DEV_ID\n");
printf(" -u use_schedule, --ur=use_schedule\n");
printf("\n");
return 1;
}
printf("Reading sparse matrix from file (%s) ...",mm_filename);
fflush(stdout);
ptiAssert(ptiLoadSparseMatrix(&spA, 1, fi) == 0);
fclose(fi);
printf(" done\n");
ptiSparseMatrixStatus(&spA, stdout);
// ptiAssert(ptiDumpSparseMatrix(&spA, 0, stdout) == 0);
/* Convert to HiCOO */
ptiNnzIndex max_nnzb = 0;
ptiAssert(ptiSparseMatrixToHiCOO(&hispA, &max_nnzb, &spA, sb_bits, sk_bits) == 0); // TODO
ptiFreeSparseMatrix(&spA);
ptiSparseMatrixStatusHiCOO(&hispA, stdout);
// ptiAssert(ptiDumpSparseMatrixHiCOO(&hispA, stdout) == 0);
ptiNewMatrix(&B, hispA.ncols, R);
ptiRandomizeMatrix(&B);
ptiNewMatrix(&C, hispA.nrows, R);
ptiConstantMatrix(&C, 0);
// ptiAssert(ptiDumpMatrix(&B, stdout) == 0);
// ptiAssert(ptiDumpMatrix(&C, stdout) == 0);
/* determine niters or num_kernel_dim to be parallelized */
ptiIndex sk = (ptiIndex)pow(2, hispA.sk_bits);
ptiIndex num_kernel_dim = (hispA.nrows + sk - 1) / sk;
printf("num_kernel_dim: %u, hispA.nkiters / num_kernel_dim: %u\n", num_kernel_dim, hispA.nkiters/num_kernel_dim);
if(num_kernel_dim <= NUM_CORES && hispA.nkiters / num_kernel_dim >= 20) {
par_iters = 1;
}
/* Set zeros for temporary CBufs */
char * bytestr;
if(cuda_dev_id == -1 && par_iters == 1) {
Cbufs = (ptiMatrix *)malloc(nthreads * sizeof(ptiMatrix));
for(int t=0; t<nthreads; ++t) {
ptiAssert(ptiNewMatrix(&Cbufs[t], hispA.nrows, R) == 0);
ptiAssert(ptiConstantMatrix(&Cbufs[t], 0) == 0);
}
ptiNnzIndex bytes = nthreads * hispA.nrows * R * sizeof(ptiValue);
bytestr = ptiBytesString(bytes);
printf("MATRIX BUFFER=%s\n\n", bytestr);
free(bytestr);
}
// Warm-up
if(cuda_dev_id == -2) {
printf("Run ptiSparseMatrixMulMatrixHiCOO:\n");
ptiSparseMatrixMulMatrixHiCOO(&C, &hispA, &B);
} else if(cuda_dev_id == -1) {
if(use_schedule == 1) {
if(par_iters == 0) {
printf("Run ptiOmpSparseMatrixMulMatrixHiCOO_Schedule:\n");
ptiOmpSparseMatrixMulMatrixHiCOO_Schedule(&C, &hispA, &B);
} else {
printf("Run ptiOmpSparseMatrixMulMatrixHiCOO_Schedule_Reduce:\n");
ptiOmpSparseMatrixMulMatrixHiCOO_Schedule_Reduce(&C, Cbufs, &hispA, &B);
}
} else {
printf("Run ptiOmpSparseMatrixMulMatrixHiCOO:\n");
ptiOmpSparseMatrixMulMatrixHiCOO(&C, &hispA, &B);
}
}
ptiStartTimer(timer);
for(int i=0; i<niters; ++i) {
if(cuda_dev_id == -2) {
ptiSparseMatrixMulMatrixHiCOO(&C, &hispA, &B);
} else if(cuda_dev_id == -1) {
if(use_schedule == 1) {
if(par_iters == 0) {
ptiOmpSparseMatrixMulMatrixHiCOO_Schedule(&C, &hispA, &B);
} else {
ptiOmpSparseMatrixMulMatrixHiCOO_Schedule_Reduce(&C, Cbufs, &hispA, &B);
}
} else {
ptiOmpSparseMatrixMulMatrixHiCOO(&C, &hispA, &B);
}
}
}
ptiStopTimer(timer);
printf("\n");
double elapsed_time = ptiPrintAverageElapsedTime(timer, niters, "HiCOO-SpMM");
ptiNnzIndex flops = 2 * hispA.nnz * R;
ptiPrintGFLOPS(elapsed_time, flops, "HiCOO-SpMM");
if(fo != NULL) {
ptiAssert(ptiDumpMatrix(&C, fo) == 0);
fclose(fo);
}
if(cuda_dev_id == -1 && par_iters == 1) {
for(int t=0; t<nthreads; ++t) {
ptiFreeMatrix(&Cbufs[t]);
}
free(Cbufs);
}
ptiFreeSparseMatrixHiCOO(&hispA);
ptiFreeMatrix(&B);
ptiFreeMatrix(&C);
ptiFreeTimer(timer);
return 0;
}
|
boundary_matrix.h | /* Copyright 2013 IST Austria
Contributed by: Ulrich Bauer, Michael Kerber, Jan Reininghaus
This file is part of PHAT.
PHAT is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
PHAT is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with PHAT. If not, see <http://www.gnu.org/licenses/>. */
#pragma once
#include <phat/helpers/misc.h>
#include <phat/representations/bit_tree_pivot_column.h>
// interface class for the main data structure -- implementations of the interface can be found in ./representations
namespace phat {
template< class Representation = bit_tree_pivot_column >
class boundary_matrix
{
protected:
Representation rep;
// interface functions -- actual implementation and complexity depends on chosen @Representation template
public:
// get overall number of columns in boundary_matrix
index get_num_cols() const { return rep._get_num_cols(); }
// set overall number of columns in boundary_matrix
void set_num_cols( index nr_of_columns ) { rep._set_num_cols( nr_of_columns ); }
// get dimension of given index
dimension get_dim( index idx ) const { return rep._get_dim( idx ); }
// set dimension of given index
void set_dim( index idx, dimension dim ) { rep._set_dim( idx, dim ); }
// replaces content of @col with boundary of given index
void get_col( index idx, column& col ) const { col.clear(); rep._get_col( idx, col ); }
// set column @idx to the values contained in @col
void set_col( index idx, const column& col ) { rep._set_col( idx, col ); }
// true iff boundary of given column is empty
bool is_empty( index idx ) const { return rep._is_empty( idx ); }
// largest index of given column (new name for lowestOne()) -- NOT thread-safe
index get_max_index( index idx ) const { return rep._get_max_index( idx ); }
// removes maximal index from given column
void remove_max( index idx ) { rep._remove_max( idx ); }
// adds column @source to column @target'
void add_to( index source, index target ) { rep._add_to( source, target ); }
// clears given column
void clear( index idx ) { rep._clear( idx ); }
// finalizes given column
void finalize( index idx ) { rep._finalize( idx ); }
// syncronizes all internal data structures -- has to be called before and after any multithreaded access!
void sync() { rep._sync(); }
// info functions -- independent of chosen 'Representation'
public:
// maximal dimension
dimension get_max_dim() const {
dimension cur_max_dim = 0;
for( index idx = 0; idx < get_num_cols(); idx++ )
cur_max_dim = get_dim( idx ) > cur_max_dim ? get_dim( idx ) : cur_max_dim;
return cur_max_dim;
}
// number of nonzero rows for given column @idx
index get_num_rows( index idx ) const {
column cur_col;
get_col( idx, cur_col );
return cur_col.size();
}
// maximal number of nonzero rows of all columns
index get_max_col_entries() const {
index max_col_entries = -1;
const index nr_of_columns = get_num_cols();
for( index idx = 0; idx < nr_of_columns; idx++ )
max_col_entries = get_num_rows( idx ) > max_col_entries ? get_num_rows( idx ) : max_col_entries;
return max_col_entries;
}
// maximal number of nonzero cols of all rows
index get_max_row_entries() const {
size_t max_row_entries = 0;
const index nr_of_columns = get_num_cols();
std::vector< std::vector< index > > transposed_matrix( nr_of_columns );
column temp_col;
for( index cur_col = 0; cur_col < nr_of_columns; cur_col++ ) {
get_col( cur_col, temp_col );
for( index idx = 0; idx < (index)temp_col.size(); idx++)
transposed_matrix[ temp_col[ idx ] ].push_back( cur_col );
}
for( index idx = 0; idx < nr_of_columns; idx++ )
max_row_entries = transposed_matrix[ idx ].size() > max_row_entries ? transposed_matrix[ idx ].size() : max_row_entries;
return max_row_entries;
}
// overall number of entries in the matrix
index get_num_entries() const {
index number_of_nonzero_entries = 0;
const index nr_of_columns = get_num_cols();
for( index idx = 0; idx < nr_of_columns; idx++ )
number_of_nonzero_entries += get_num_rows( idx );
return number_of_nonzero_entries;
}
// operators / constructors
public:
boundary_matrix() {};
template< class OtherRepresentation >
boundary_matrix( const boundary_matrix< OtherRepresentation >& other ) {
*this = other;
}
template< typename OtherRepresentation >
bool operator==( const boundary_matrix< OtherRepresentation >& other_boundary_matrix ) const {
const index number_of_columns = this->get_num_cols();
if( number_of_columns != other_boundary_matrix.get_num_cols() )
return false;
column temp_col;
column other_temp_col;
for( index idx = 0; idx < number_of_columns; idx++ ) {
this->get_col( idx, temp_col );
other_boundary_matrix.get_col( idx, other_temp_col );
if( temp_col != other_temp_col || this->get_dim( idx ) != other_boundary_matrix.get_dim( idx ) )
return false;
}
return true;
}
template< typename OtherRepresentation >
bool operator!=( const boundary_matrix< OtherRepresentation >& other_boundary_matrix ) const {
return !( *this == other_boundary_matrix );
}
template< typename OtherRepresentation >
boundary_matrix< Representation >& operator=( const boundary_matrix< OtherRepresentation >& other )
{
const index nr_of_columns = other.get_num_cols();
this->set_num_cols( nr_of_columns );
column temp_col;
for( index cur_col = 0; cur_col < nr_of_columns; cur_col++ ) {
this->set_dim( cur_col, other.get_dim( cur_col ) );
other.get_col( cur_col, temp_col );
this->set_col( cur_col, temp_col );
}
// by convention, always return *this
return *this;
}
// I/O -- independent of chosen 'Representation'
public:
// initializes boundary_matrix from (vector<vector>, vector) pair -- untested
template< typename index_type, typename dimemsion_type >
void load_vector_vector( const std::vector< std::vector< index_type > >& input_matrix, const std::vector< dimemsion_type >& input_dims ) {
const index nr_of_columns = (index)input_matrix.size();
this->set_num_cols( nr_of_columns );
column temp_col;
#pragma omp parallel for private( temp_col )
for( index cur_col = 0; cur_col < nr_of_columns; cur_col++ ) {
this->set_dim( cur_col, (dimension)input_dims[ cur_col ] );
index num_rows = input_matrix[ cur_col ].size();
temp_col.resize( num_rows );
for( index cur_row = 0; cur_row < num_rows; cur_row++ )
temp_col[ cur_row ] = (index)input_matrix[ cur_col ][ cur_row ];
this->set_col( cur_col, temp_col );
}
}
template< typename index_type, typename dimemsion_type >
void save_vector_vector( std::vector< std::vector< index_type > >& output_matrix, std::vector< dimemsion_type >& output_dims ) {
const index nr_of_columns = get_num_cols();
output_matrix.resize( nr_of_columns );
output_dims.resize( nr_of_columns );
column temp_col;
for( index cur_col = 0; cur_col < nr_of_columns; cur_col++ ) {
output_dims[ cur_col ] = (dimemsion_type)get_dim( cur_col );
get_col( cur_col, temp_col );
index num_rows = temp_col.size();
output_matrix[ cur_col ].clear();
output_matrix[ cur_col ].resize( num_rows );
for( index cur_row = 0; cur_row < num_rows; cur_row++ )
output_matrix[ cur_col ][ cur_row ] = (index_type)temp_col[ cur_row ];
}
}
// Loads the boundary_matrix from given file in ascii format
// Format: each line represents a column, first number is dimension, other numbers are the content of the column.
// Ignores empty lines and lines starting with a '#'.
bool load_ascii( std::string filename ) {
// first count number of columns:
std::string cur_line;
std::ifstream dummy( filename .c_str() );
if( dummy.fail() )
return false;
index number_of_columns = 0;
while( getline( dummy, cur_line ) ) {
cur_line.erase(cur_line.find_last_not_of(" \t\n\r\f\v") + 1);
if( cur_line != "" && cur_line[ 0 ] != '#' )
number_of_columns++;
}
this->set_num_cols( number_of_columns );
dummy.close();
std::ifstream input_stream( filename.c_str() );
if( input_stream.fail() )
return false;
column temp_col;
index cur_col = -1;
while( getline( input_stream, cur_line ) ) {
cur_line.erase(cur_line.find_last_not_of(" \t\n\r\f\v") + 1);
if( cur_line != "" && cur_line[ 0 ] != '#' ) {
cur_col++;
std::stringstream ss( cur_line );
int64_t temp_dim;
ss >> temp_dim;
this->set_dim( cur_col, (dimension) temp_dim );
int64_t temp_index;
temp_col.clear();
while( ss.good() ) {
ss >> temp_index;
temp_col.push_back( (index)temp_index );
}
std::sort( temp_col.begin(), temp_col.end() );
this->set_col( cur_col, temp_col );
}
}
input_stream.close();
return true;
}
// Saves the boundary_matrix to given file in ascii format
// Format: each line represents a column, first number is dimension, other numbers are the content of the column
bool save_ascii( std::string filename ) {
std::ofstream output_stream( filename.c_str() );
if( output_stream.fail() )
return false;
const index nr_columns = this->get_num_cols();
column tempCol;
for( index cur_col = 0; cur_col < nr_columns; cur_col++ ) {
output_stream << (int64_t)this->get_dim( cur_col );
this->get_col( cur_col, tempCol );
for( index cur_row_idx = 0; cur_row_idx < (index)tempCol.size(); cur_row_idx++ )
output_stream << " " << tempCol[ cur_row_idx ];
output_stream << std::endl;
}
output_stream.close();
return true;
}
// Loads boundary_matrix from given file
// Format: nr_columns % dim1 % N1 % row1 row2 % ...% rowN1 % dim2 % N2 % ...
bool load_binary( std::string filename )
{
std::ifstream input_stream( filename.c_str( ), std::ios_base::binary | std::ios_base::in );
if( input_stream.fail( ) )
return false;
int64_t nr_columns;
input_stream.read( (char*)&nr_columns, sizeof( int64_t ) );
this->set_num_cols( (index)nr_columns );
column temp_col;
for( index cur_col = 0; cur_col < nr_columns; cur_col++ ) {
int64_t cur_dim;
input_stream.read( (char*)&cur_dim, sizeof( int64_t ) );
this->set_dim( cur_col, (dimension)cur_dim );
int64_t nr_rows;
input_stream.read( (char*)&nr_rows, sizeof( int64_t ) );
temp_col.resize( ( std::size_t )nr_rows );
for( index idx = 0; idx < nr_rows; idx++ ) {
int64_t cur_row;
input_stream.read( (char*)&cur_row, sizeof( int64_t ) );
temp_col[ idx ] = (index)cur_row;
}
this->set_col( cur_col, temp_col );
}
input_stream.close( );
return true;
}
// Saves the boundary_matrix to given file in binary format
// Format: nr_columns % dim1 % N1 % row1 row2 % ...% rowN1 % dim2 % N2 % ...
bool save_binary( std::string filename )
{
std::ofstream output_stream( filename.c_str( ), std::ios_base::binary | std::ios_base::out );
if( output_stream.fail( ) )
return false;
const int64_t nr_columns = this->get_num_cols( );
output_stream.write( (char*)&nr_columns, sizeof( int64_t ) );
column tempCol;
for( index cur_col = 0; cur_col < nr_columns; cur_col++ ) {
int64_t cur_dim = this->get_dim( cur_col );
output_stream.write( (char*)&cur_dim, sizeof( int64_t ) );
this->get_col( cur_col, tempCol );
int64_t cur_nr_rows = tempCol.size( );
output_stream.write( (char*)&cur_nr_rows, sizeof( int64_t ) );
for( index cur_row_idx = 0; cur_row_idx < (index)tempCol.size( ); cur_row_idx++ ) {
int64_t cur_row = tempCol[ cur_row_idx ];
output_stream.write( (char*)&cur_row, sizeof( int64_t ) );
}
}
output_stream.close( );
return true;
}
};
}
|
GxB_SelectOp_wait.c | //------------------------------------------------------------------------------
// GxB_SelectOp_wait: wait for a user-defined GxB_SelectOp to complete
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// In SuiteSparse:GraphBLAS, a user-defined GxB_SelectOp has no pending
// operations to wait for. All this method does is verify that the op is
// properly initialized, and then it does an OpenMP flush.
#include "GB.h"
GrB_Info GxB_SelectOp_wait // no work, just check if the GxB_SelectOp is valid
(
#if (GxB_IMPLEMENTATION_MAJOR <= 5)
GxB_SelectOp *op
#else
GxB_SelectOp op,
GrB_WaitMode waitmode
#endif
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
#if (GxB_IMPLEMENTATION_MAJOR <= 5)
GB_WHERE1 ("GxB_SelectOp_wait (&op)") ;
GB_RETURN_IF_NULL (op) ;
GB_RETURN_IF_NULL_OR_FAULTY (*op) ;
#else
GB_WHERE1 ("GxB_SelectOp_wait (op, waitmode)") ;
GB_RETURN_IF_NULL_OR_FAULTY (op) ;
#endif
//--------------------------------------------------------------------------
// return result
//--------------------------------------------------------------------------
#pragma omp flush
return (GrB_SUCCESS) ;
}
|
find.h | // -*- C++ -*-
// Copyright (C) 2007, 2008 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the terms
// of the GNU General Public License as published by the Free Software
// Foundation; either version 2, or (at your option) any later
// version.
// This library is distributed in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this library; see the file COPYING. If not, write to
// the Free Software Foundation, 59 Temple Place - Suite 330, Boston,
// MA 02111-1307, USA.
// As a special exception, you may use this file as part of a free
// software library without restriction. Specifically, if other files
// instantiate templates or use macros or inline functions from this
// file, or you compile this file and link it with other files to
// produce an executable, this file does not by itself cause the
// resulting executable to be covered by the GNU General Public
// License. This exception does not however invalidate any other
// reasons why the executable file might be covered by the GNU General
// Public License.
/** @file parallel/find.h
* @brief Parallel implementation base for std::find(), std::equal()
* and related functions.
* This file is a GNU parallel extension to the Standard C++ Library.
*/
// Written by Felix Putze and Johannes Singler.
#ifndef _GLIBCXX_PARALLEL_FIND_H
#define _GLIBCXX_PARALLEL_FIND_H 1
#include <bits/stl_algobase.h>
#include <parallel/features.h>
#include <parallel/parallel.h>
#include <parallel/compatibility.h>
#include <parallel/equally_split.h>
namespace __gnu_parallel
{
/**
* @brief Parallel std::find, switch for different algorithms.
* @param begin1 Begin iterator of first sequence.
* @param end1 End iterator of first sequence.
* @param begin2 Begin iterator of second sequence. Must have same
* length as first sequence.
* @param pred Find predicate.
* @param selector Functionality (e. g. std::find_if (), std::equal(),...)
* @return Place of finding in both sequences.
*/
template<typename RandomAccessIterator1,
typename RandomAccessIterator2,
typename Pred,
typename Selector>
inline std::pair<RandomAccessIterator1, RandomAccessIterator2>
find_template(RandomAccessIterator1 begin1, RandomAccessIterator1 end1,
RandomAccessIterator2 begin2, Pred pred, Selector selector)
{
switch (_Settings::get().find_algorithm)
{
case GROWING_BLOCKS:
return find_template(begin1, end1, begin2, pred, selector,
growing_blocks_tag());
case CONSTANT_SIZE_BLOCKS:
return find_template(begin1, end1, begin2, pred, selector,
constant_size_blocks_tag());
case EQUAL_SPLIT:
return find_template(begin1, end1, begin2, pred, selector,
equal_split_tag());
default:
_GLIBCXX_PARALLEL_ASSERT(false);
return std::make_pair(begin1, begin2);
}
}
#if _GLIBCXX_FIND_EQUAL_SPLIT
/**
* @brief Parallel std::find, equal splitting variant.
* @param begin1 Begin iterator of first sequence.
* @param end1 End iterator of first sequence.
* @param begin2 Begin iterator of second sequence. Second sequence
* must have same length as first sequence.
* @param pred Find predicate.
* @param selector Functionality (e. g. std::find_if (), std::equal(),...)
* @return Place of finding in both sequences.
*/
template<typename RandomAccessIterator1,
typename RandomAccessIterator2,
typename Pred,
typename Selector>
std::pair<RandomAccessIterator1, RandomAccessIterator2>
find_template(RandomAccessIterator1 begin1,
RandomAccessIterator1 end1,
RandomAccessIterator2 begin2,
Pred pred,
Selector selector,
equal_split_tag)
{
_GLIBCXX_CALL(end1 - begin1)
typedef std::iterator_traits<RandomAccessIterator1> traits_type;
typedef typename traits_type::difference_type difference_type;
typedef typename traits_type::value_type value_type;
difference_type length = end1 - begin1;
difference_type result = length;
difference_type* borders;
omp_lock_t result_lock;
omp_init_lock(&result_lock);
thread_index_t num_threads = get_max_threads();
# pragma omp parallel num_threads(num_threads)
{
# pragma omp single
{
num_threads = omp_get_num_threads();
borders = new difference_type[num_threads + 1];
equally_split(length, num_threads, borders);
} //single
thread_index_t iam = omp_get_thread_num();
difference_type start = borders[iam], stop = borders[iam + 1];
RandomAccessIterator1 i1 = begin1 + start;
RandomAccessIterator2 i2 = begin2 + start;
for (difference_type pos = start; pos < stop; ++pos)
{
#pragma omp flush(result)
// Result has been set to something lower.
if (result < pos)
break;
if (selector(i1, i2, pred))
{
omp_set_lock(&result_lock);
if (pos < result)
result = pos;
omp_unset_lock(&result_lock);
break;
}
++i1;
++i2;
}
} //parallel
omp_destroy_lock(&result_lock);
delete[] borders;
return
std::pair<RandomAccessIterator1, RandomAccessIterator2>(begin1 + result,
begin2 + result);
}
#endif
#if _GLIBCXX_FIND_GROWING_BLOCKS
/**
* @brief Parallel std::find, growing block size variant.
* @param begin1 Begin iterator of first sequence.
* @param end1 End iterator of first sequence.
* @param begin2 Begin iterator of second sequence. Second sequence
* must have same length as first sequence.
* @param pred Find predicate.
* @param selector Functionality (e. g. std::find_if (), std::equal(),...)
* @return Place of finding in both sequences.
* @see __gnu_parallel::_Settings::find_sequential_search_size
* @see __gnu_parallel::_Settings::find_initial_block_size
* @see __gnu_parallel::_Settings::find_maximum_block_size
* @see __gnu_parallel::_Settings::find_increasing_factor
*
* There are two main differences between the growing blocks and
* the constant-size blocks variants.
* 1. For GB, the block size grows; for CSB, the block size is fixed.
* 2. For GB, the blocks are allocated dynamically;
* for CSB, the blocks are allocated in a predetermined manner,
* namely spacial round-robin.
*/
template<typename RandomAccessIterator1,
typename RandomAccessIterator2,
typename Pred,
typename Selector>
std::pair<RandomAccessIterator1, RandomAccessIterator2>
find_template(RandomAccessIterator1 begin1, RandomAccessIterator1 end1,
RandomAccessIterator2 begin2, Pred pred, Selector selector,
growing_blocks_tag)
{
_GLIBCXX_CALL(end1 - begin1)
typedef std::iterator_traits<RandomAccessIterator1> traits_type;
typedef typename traits_type::difference_type difference_type;
typedef typename traits_type::value_type value_type;
const _Settings& __s = _Settings::get();
difference_type length = end1 - begin1;
difference_type sequential_search_size =
std::min<difference_type>(length, __s.find_sequential_search_size);
// Try it sequentially first.
std::pair<RandomAccessIterator1, RandomAccessIterator2> find_seq_result =
selector.sequential_algorithm(
begin1, begin1 + sequential_search_size, begin2, pred);
if (find_seq_result.first != (begin1 + sequential_search_size))
return find_seq_result;
// Index of beginning of next free block (after sequential find).
difference_type next_block_start = sequential_search_size;
difference_type result = length;
omp_lock_t result_lock;
omp_init_lock(&result_lock);
thread_index_t num_threads = get_max_threads();
# pragma omp parallel shared(result) num_threads(num_threads)
{
# pragma omp single
num_threads = omp_get_num_threads();
// Not within first k elements -> start parallel.
thread_index_t iam = omp_get_thread_num();
difference_type block_size = __s.find_initial_block_size;
difference_type start =
fetch_and_add<difference_type>(&next_block_start, block_size);
// Get new block, update pointer to next block.
difference_type stop =
std::min<difference_type>(length, start + block_size);
std::pair<RandomAccessIterator1, RandomAccessIterator2> local_result;
while (start < length)
{
# pragma omp flush(result)
// Get new value of result.
if (result < start)
{
// No chance to find first element.
break;
}
local_result = selector.sequential_algorithm(
begin1 + start, begin1 + stop, begin2 + start, pred);
if (local_result.first != (begin1 + stop))
{
omp_set_lock(&result_lock);
if ((local_result.first - begin1) < result)
{
result = local_result.first - begin1;
// Result cannot be in future blocks, stop algorithm.
fetch_and_add<difference_type>(&next_block_start, length);
}
omp_unset_lock(&result_lock);
}
block_size =
std::min<difference_type>(block_size * __s.find_increasing_factor,
__s.find_maximum_block_size);
// Get new block, update pointer to next block.
start =
fetch_and_add<difference_type>(&next_block_start, block_size);
stop = ((length < (start + block_size))
? length : (start + block_size));
}
} //parallel
omp_destroy_lock(&result_lock);
// Return iterator on found element.
return
std::pair<RandomAccessIterator1, RandomAccessIterator2>(begin1 + result,
begin2 + result);
}
#endif
#if _GLIBCXX_FIND_CONSTANT_SIZE_BLOCKS
/**
* @brief Parallel std::find, constant block size variant.
* @param begin1 Begin iterator of first sequence.
* @param end1 End iterator of first sequence.
* @param begin2 Begin iterator of second sequence. Second sequence
* must have same length as first sequence.
* @param pred Find predicate.
* @param selector Functionality (e. g. std::find_if (), std::equal(),...)
* @return Place of finding in both sequences.
* @see __gnu_parallel::_Settings::find_sequential_search_size
* @see __gnu_parallel::_Settings::find_block_size
* There are two main differences between the growing blocks and the
* constant-size blocks variants.
* 1. For GB, the block size grows; for CSB, the block size is fixed.
* 2. For GB, the blocks are allocated dynamically; for CSB, the
* blocks are allocated in a predetermined manner, namely spacial
* round-robin.
*/
template<typename RandomAccessIterator1,
typename RandomAccessIterator2,
typename Pred,
typename Selector>
std::pair<RandomAccessIterator1, RandomAccessIterator2>
find_template(RandomAccessIterator1 begin1, RandomAccessIterator1 end1,
RandomAccessIterator2 begin2, Pred pred, Selector selector,
constant_size_blocks_tag)
{
_GLIBCXX_CALL(end1 - begin1)
typedef std::iterator_traits<RandomAccessIterator1> traits_type;
typedef typename traits_type::difference_type difference_type;
typedef typename traits_type::value_type value_type;
const _Settings& __s = _Settings::get();
difference_type length = end1 - begin1;
difference_type sequential_search_size = std::min<difference_type>(
length, __s.find_sequential_search_size);
// Try it sequentially first.
std::pair<RandomAccessIterator1, RandomAccessIterator2> find_seq_result =
selector.sequential_algorithm(begin1, begin1 + sequential_search_size,
begin2, pred);
if (find_seq_result.first != (begin1 + sequential_search_size))
return find_seq_result;
difference_type result = length;
omp_lock_t result_lock;
omp_init_lock(&result_lock);
// Not within first sequential_search_size elements -> start parallel.
thread_index_t num_threads = get_max_threads();
# pragma omp parallel shared(result) num_threads(num_threads)
{
# pragma omp single
num_threads = omp_get_num_threads();
thread_index_t iam = omp_get_thread_num();
difference_type block_size = __s.find_initial_block_size;
// First element of thread's current iteration.
difference_type iteration_start = sequential_search_size;
// Where to work (initialization).
difference_type start = iteration_start + iam * block_size;
difference_type stop =
std::min<difference_type>(length, start + block_size);
std::pair<RandomAccessIterator1, RandomAccessIterator2> local_result;
while (start < length)
{
// Get new value of result.
# pragma omp flush(result)
// No chance to find first element.
if (result < start)
break;
local_result = selector.sequential_algorithm(
begin1 + start, begin1 + stop,
begin2 + start, pred);
if (local_result.first != (begin1 + stop))
{
omp_set_lock(&result_lock);
if ((local_result.first - begin1) < result)
result = local_result.first - begin1;
omp_unset_lock(&result_lock);
// Will not find better value in its interval.
break;
}
iteration_start += num_threads * block_size;
// Where to work.
start = iteration_start + iam * block_size;
stop = std::min<difference_type>(length, start + block_size);
}
} //parallel
omp_destroy_lock(&result_lock);
// Return iterator on found element.
return
std::pair<RandomAccessIterator1, RandomAccessIterator2>(begin1 + result,
begin2 + result);
}
#endif
} // end namespace
#endif
|
app_main.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "bmp_interface.h"
#include <omp.h>
extern int __htc_get_unit_count();
extern int global_radius;
int app_main(int argc, char **argv) {
uint32_t bufsize = 1000;
// Allocate target temp buffer.
extern void *stencil_cp_alloc(size_t);
uint8_t *unew = (uint8_t *)stencil_cp_alloc(bufsize * sizeof(uint8_t));
printf("unit count is %d\n", __htc_get_unit_count());
int i;
#pragma omp target
#pragma omp teams num_teams(8)
#pragma omp distribute parallel for schedule(static,10)
for (i = 0; i < bufsize; i++) {
printf("team %d thread %d i is %d\n", (int)omp_get_team_num(),
(int)omp_get_thread_num(), i);
unew[i] = omp_get_team_num() * omp_get_thread_num();
}
int sum = 0;
for (i = 0; i < bufsize; i++) {
// printf("i = %d val = %d\n", i, unew[i]);
sum += unew[i];
}
printf("sum is %d %s\n", sum, (sum == 10080) ? "PASSED" : "FAILED");
return 0;
}
|
2Dfold.c | /*
minimum free energy
RNA secondary structure with
basepair distance d_1 to reference structure 1 and distance d_2 to reference structure 2
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <ctype.h>
#include <string.h>
#include "utils.h"
#include "energy_par.h"
#include "fold_vars.h"
#include "fold.h"
#include "pair_mat.h"
#include "loop_energies.h"
#include "mm.h"
#include "params.h"
#ifdef _OPENMP
#include <omp.h>
#endif
#include "2Dfold.h"
/*
#################################
# GLOBAL VARIABLES #
#################################
*/
int compute_2Dfold_F3 = 0;
/*
#################################
# PRIVATE VARIABLES #
#################################
*/
/*
#################################
# PRIVATE FUNCTION DECLARATIONS #
#################################
*/
PRIVATE void mfe_linear(TwoDfold_vars *vars);
PRIVATE void mfe_circ(TwoDfold_vars *vars);
PRIVATE void initialize_TwoDfold_vars(TwoDfold_vars *vars);
PUBLIC void update_TwoDfold_params(TwoDfold_vars *vars);
PRIVATE void make_ptypes(TwoDfold_vars *vars);
PRIVATE void backtrack_f5(unsigned int j, int k, int l, char *structure, TwoDfold_vars *vars);
PRIVATE void backtrack_c(unsigned int i, unsigned int j, int k, int l, char *structure, TwoDfold_vars *vars);
PRIVATE void backtrack_m(unsigned int i, unsigned int j, int k, int l, char *structure, TwoDfold_vars *vars);
PRIVATE void backtrack_m1(unsigned int i, unsigned int j, int k, int l, char *structure, TwoDfold_vars *vars);
PRIVATE void backtrack_fc(int k, int l, char *structure, TwoDfold_vars *vars);
PRIVATE void backtrack_m2(unsigned int i, int k, int l, char *structure, TwoDfold_vars *vars);
PRIVATE void adjustArrayBoundaries(int ***array, int *k_min, int *k_max, int **l_min, int **l_max, int k_min_real, int k_max_real, int *l_min_real, int *l_max_real);
INLINE PRIVATE void preparePosteriorBoundaries(int size, int shift, int *min_k, int *max_k, int **min_l, int **max_l);
INLINE PRIVATE void updatePosteriorBoundaries(int d1, int d2, int *min_k, int *max_k, int **min_l, int **max_l);
INLINE PRIVATE void prepareBoundaries(int min_k_pre, int max_k_pre, int min_l_pre, int max_l_pre, int bpdist, int *min_k, int *max_k, int **min_l, int **max_l);
INLINE PRIVATE void prepareArray(int ***array, int min_k, int max_k, int *min_l, int *max_l);
INLINE PRIVATE void prepareArray2(unsigned long ***array, int min_k, int max_k, int *min_l, int *max_l);
/*
#################################
# BEGIN OF FUNCTION DEFINITIONS #
#################################
*/
PUBLIC TwoDfold_vars *get_TwoDfold_variables(const char *seq, const char *structure1, const char *structure2, int circ){
unsigned int size, length, i;
int *index;
TwoDfold_vars *vars;
length = strlen(seq);
vars = (TwoDfold_vars *)malloc(sizeof(TwoDfold_vars));
vars->sequence = (char *)space(length + 1);
strcpy(vars->sequence, seq);
vars->seq_length = length;
if(vars->seq_length < 1) nrerror("get_TwoDfold_variables: sequence must be longer than 0");
size = ((length + 1) * (length + 2)/2);
vars->reference_pt1 = make_pair_table(structure1);
vars->reference_pt2 = make_pair_table(structure2);
vars->referenceBPs1 = make_referenceBP_array(vars->reference_pt1, TURN);
vars->referenceBPs2 = make_referenceBP_array(vars->reference_pt2, TURN);
vars->bpdist = compute_BPdifferences(vars->reference_pt1, vars->reference_pt2, TURN);
vars->do_backtrack = 1;
vars->dangles = dangles;
vars->circ = circ;
vars->temperature = temperature;
vars->ptype = space(sizeof(char) * size);
vars->P = NULL;
vars->S = NULL;
vars->S1 = NULL;
vars->my_iindx = get_iindx(length);
index = vars->my_iindx;
/* compute maximum matching with reference structure 1 disallowed */
vars->mm1 = maximumMatchingConstraint(vars->sequence, vars->reference_pt1);
/* compute maximum matching with reference structure 2 disallowed */
vars->mm2 = maximumMatchingConstraint(vars->sequence, vars->reference_pt2);
vars->maxD1 = vars->mm1[index[1]-length] + vars->referenceBPs1[index[1]-length];
vars->maxD2 = vars->mm2[index[1]-length] + vars->referenceBPs2[index[1]-length];
/* allocate memory for the energy matrices and min-/max-index helper arrays */
vars->E_C = (int ***) space(sizeof(int **) * size);
vars->l_min_values = (int **) space(sizeof(int *) * size);
vars->l_max_values = (int **) space(sizeof(int *) * size);
vars->k_min_values = (int *) space(sizeof(int) * size);
vars->k_max_values = (int *) space(sizeof(int) * size);
vars->E_F5 = (int ***) space(sizeof(int **) * (length + 1));
vars->l_min_values_f = (int **) space(sizeof(int *) * (length + 1));
vars->l_max_values_f = (int **) space(sizeof(int *) * (length + 1));
vars->k_min_values_f = (int *) space(sizeof(int) * (length + 1));
vars->k_max_values_f = (int *) space(sizeof(int) * (length + 1));
if(compute_2Dfold_F3){
vars->E_F3 = (int ***) space(sizeof(int **) * (length + 1));
vars->l_min_values_f3 = (int **) space(sizeof(int *) * (length + 1));
vars->l_max_values_f3 = (int **) space(sizeof(int *) * (length + 1));
vars->k_min_values_f3 = (int *) space(sizeof(int) * (length + 1));
vars->k_max_values_f3 = (int *) space(sizeof(int) * (length + 1));
}
else vars->E_F3 = NULL;
vars->E_M = (int ***) space(sizeof(int **) * size);
vars->l_min_values_m = (int **) space(sizeof(int *) * size);
vars->l_max_values_m = (int **) space(sizeof(int *) * size);
vars->k_min_values_m = (int *) space(sizeof(int) * size);
vars->k_max_values_m = (int *) space(sizeof(int) * size);
vars->E_M1 = (int ***) space(sizeof(int **) * size);
vars->l_min_values_m1 = (int **) space(sizeof(int *) * size);
vars->l_max_values_m1 = (int **) space(sizeof(int *) * size);
vars->k_min_values_m1 = (int *) space(sizeof(int) * size);
vars->k_max_values_m1 = (int *) space(sizeof(int) * size);
#ifdef COUNT_STATES
vars->N_C = (unsigned long ***) space(sizeof(unsigned long **) * size);
vars->N_F5 = (unsigned long ***) space(sizeof(unsigned long **) * (length + 1));
vars->N_M = (unsigned long ***) space(sizeof(unsigned long **) * size);
vars->N_M1 = (unsigned long ***) space(sizeof(unsigned long **) * size);
#endif
if(circ){
vars->E_M2_rem = (int *) space(sizeof(int) * (length + 1));
vars->E_M2 = (int ***) space(sizeof(int **) * (length + 1));
vars->l_min_values_m2 = (int **) space(sizeof(int *) * (length + 1));
vars->l_max_values_m2 = (int **) space(sizeof(int *) * (length + 1));
vars->k_min_values_m2 = (int *) space(sizeof(int) * (length + 1));
vars->k_max_values_m2 = (int *) space(sizeof(int) * (length + 1));
}
else{
vars->E_M2_rem = NULL;
vars->E_M2 = NULL;
vars->l_min_values_m2 = NULL;
vars->l_max_values_m2 = NULL;
vars->k_min_values_m2 = NULL;
vars->k_max_values_m2 = NULL;
}
vars->E_Fc = NULL;
vars->E_FcH = NULL;
vars->E_FcI = NULL;
vars->E_FcM = NULL;
vars->E_Fc_rem = INF;
vars->E_FcH_rem = INF;
vars->E_FcI_rem = INF;
vars->E_FcM_rem = INF;
vars->E_C_rem = (int *) space(sizeof(int) * size);
vars->E_M_rem = (int *) space(sizeof(int) * size);
vars->E_M1_rem = (int *) space(sizeof(int) * size);
vars->E_F5_rem = (int *) space(sizeof(int) * (length+1));
/* init rest arrays */
for(i=0;i<size;i++){
vars->E_C_rem[i] = vars->E_M_rem[i] = vars->E_M1_rem[i] = INF;
}
for(i=0;i<=length;i++)
vars->E_F5_rem[i] = INF;
if(vars->E_M2_rem)
for(i=0;i<=length;i++)
vars->E_M2_rem[i] = INF;
return vars;
}
PUBLIC void destroy_TwoDfold_variables(TwoDfold_vars *vars){
unsigned int i, j, ij;
int cnt1;
if(vars == NULL) return;
free(vars->E_C_rem);
free(vars->E_M_rem);
free(vars->E_M1_rem);
free(vars->E_F5_rem);
if(vars->E_M2_rem) free(vars->E_M2_rem);
#ifdef _OPENMP
#pragma omp sections private(i,j,ij,cnt1)
{
#pragma omp section
{
#endif
#ifdef COUNT_STATES
if(vars->N_C != NULL){
for(i = 1; i < vars->seq_length; i++){
for(j = i; j <= vars->seq_length; j++){
ij = vars->my_iindx[i] - j;
if(!vars->N_C[ij]) continue;
for(cnt1 = vars->k_min_values[ij]; cnt1 <= vars->k_max_values[ij]; cnt1++)
if(vars->l_min_values[ij][cnt1] < INF){
vars->N_C[ij][cnt1] += vars->l_min_values[ij][cnt1]/2;
free(vars->N_C[ij][cnt1]);
}
if(vars->k_min_values[ij] < INF){
vars->N_C[ij] += vars->k_min_values[ij];
free(vars->N_C[ij]);
}
}
}
free(vars->N_C);
}
#endif
if(vars->E_C != NULL){
for(i = 1; i < vars->seq_length; i++){
for(j = i; j <= vars->seq_length; j++){
ij = vars->my_iindx[i] - j;
if(!vars->E_C[ij]) continue;
for(cnt1 = vars->k_min_values[ij]; cnt1 <= vars->k_max_values[ij]; cnt1++)
if(vars->l_min_values[ij][cnt1] < INF){
vars->E_C[ij][cnt1] += vars->l_min_values[ij][cnt1]/2;
free(vars->E_C[ij][cnt1]);
}
if(vars->k_min_values[ij] < INF){
vars->E_C[ij] += vars->k_min_values[ij];
free(vars->E_C[ij]);
vars->l_min_values[ij] += vars->k_min_values[ij];
vars->l_max_values[ij] += vars->k_min_values[ij];
free(vars->l_min_values[ij]);
free(vars->l_max_values[ij]);
}
}
}
free(vars->E_C);
free(vars->l_min_values);
free(vars->l_max_values);
free(vars->k_min_values);
free(vars->k_max_values);
}
#ifdef _OPENMP
}
#pragma omp section
{
#endif
#ifdef COUNT_STATES
if(vars->N_M != NULL){
for(i = 1; i < vars->seq_length; i++){
for(j = i; j <= vars->seq_length; j++){
ij = vars->my_iindx[i] - j;
if(!vars->N_M[ij]) continue;
for(cnt1 = vars->k_min_values_m[ij]; cnt1 <= vars->k_max_values_m[ij]; cnt1++)
if(vars->l_min_values_m[ij][cnt1] < INF){
vars->N_M[ij][cnt1] += vars->l_min_values_m[ij][cnt1]/2;
free(vars->N_M[ij][cnt1]);
}
if(vars->k_min_values_m[ij] < INF){
vars->N_M[ij] += vars->k_min_values_m[ij];
free(vars->N_M[ij]);
}
}
}
free(vars->N_M);
}
#endif
if(vars->E_M != NULL){
for(i = 1; i < vars->seq_length; i++){
for(j = i; j <= vars->seq_length; j++){
ij = vars->my_iindx[i] - j;
if(!vars->E_M[ij]) continue;
for(cnt1 = vars->k_min_values_m[ij]; cnt1 <= vars->k_max_values_m[ij]; cnt1++)
if(vars->l_min_values_m[ij][cnt1] < INF){
vars->E_M[ij][cnt1] += vars->l_min_values_m[ij][cnt1]/2;
free(vars->E_M[ij][cnt1]);
}
if(vars->k_min_values_m[ij] < INF){
vars->E_M[ij] += vars->k_min_values_m[ij];
free(vars->E_M[ij]);
vars->l_min_values_m[ij] += vars->k_min_values_m[ij];
vars->l_max_values_m[ij] += vars->k_min_values_m[ij];
free(vars->l_min_values_m[ij]);
free(vars->l_max_values_m[ij]);
}
}
}
free(vars->E_M);
free(vars->l_min_values_m);
free(vars->l_max_values_m);
free(vars->k_min_values_m);
free(vars->k_max_values_m);
}
#ifdef _OPENMP
}
#pragma omp section
{
#endif
#ifdef COUNT_STATES
if(vars->N_M1 != NULL){
for(i = 1; i < vars->seq_length; i++){
for(j = i; j <= vars->seq_length; j++){
ij = vars->my_iindx[i] - j;
if(!vars->N_M1[ij]) continue;
for(cnt1 = vars->k_min_values_m1[ij]; cnt1 <= vars->k_max_values_m1[ij]; cnt1++)
if(vars->l_min_values_m1[ij][cnt1] < INF){
vars->N_M1[ij][cnt1] += vars->l_min_values_m1[ij][cnt1]/2;
free(vars->N_M1[ij][cnt1]);
}
if(vars->k_min_values_m1[ij] < INF){
vars->N_M1[ij] += vars->k_min_values_m1[ij];
free(vars->N_M1[ij]);
}
}
}
free(vars->N_M1);
}
#endif
if(vars->E_M1 != NULL){
for(i = 1; i < vars->seq_length; i++){
for(j = i; j <= vars->seq_length; j++){
ij = vars->my_iindx[i] - j;
if(!vars->E_M1[ij]) continue;
for(cnt1 = vars->k_min_values_m1[ij]; cnt1 <= vars->k_max_values_m1[ij]; cnt1++)
if(vars->l_min_values_m1[ij][cnt1] < INF){
vars->E_M1[ij][cnt1] += vars->l_min_values_m1[ij][cnt1]/2;
free(vars->E_M1[ij][cnt1]);
}
if(vars->k_min_values_m1[ij] < INF){
vars->E_M1[ij] += vars->k_min_values_m1[ij];
free(vars->E_M1[ij]);
vars->l_min_values_m1[ij] += vars->k_min_values_m1[ij];
vars->l_max_values_m1[ij] += vars->k_min_values_m1[ij];
free(vars->l_min_values_m1[ij]);
free(vars->l_max_values_m1[ij]);
}
}
}
free(vars->E_M1);
free(vars->l_min_values_m1);
free(vars->l_max_values_m1);
free(vars->k_min_values_m1);
free(vars->k_max_values_m1);
}
#ifdef _OPENMP
}
#pragma omp section
{
#endif
if(vars->E_M2 != NULL){
for(i = 1; i < vars->seq_length-TURN-1; i++){
if(!vars->E_M2[i]) continue;
for(cnt1 = vars->k_min_values_m2[i]; cnt1 <= vars->k_max_values_m2[i]; cnt1++)
if(vars->l_min_values_m2[i][cnt1] < INF){
vars->E_M2[i][cnt1] += vars->l_min_values_m2[i][cnt1]/2;
free(vars->E_M2[i][cnt1]);
}
if(vars->k_min_values_m2[i] < INF){
vars->E_M2[i] += vars->k_min_values_m2[i];
free(vars->E_M2[i]);
vars->l_min_values_m2[i] += vars->k_min_values_m2[i];
vars->l_max_values_m2[i] += vars->k_min_values_m2[i];
free(vars->l_min_values_m2[i]);
free(vars->l_max_values_m2[i]);
}
}
free(vars->E_M2);
free(vars->l_min_values_m2);
free(vars->l_max_values_m2);
free(vars->k_min_values_m2);
free(vars->k_max_values_m2);
}
#ifdef _OPENMP
}
#pragma omp section
{
#endif
#ifdef COUNT_STATES
if(vars->N_F5 != NULL){
for(i = 1; i <= vars->seq_length; i++){
if(!vars->N_F5[i]) continue;
for(cnt1 = vars->k_min_values_f[i]; cnt1 <= vars->k_max_values_f[i]; cnt1++)
if(vars->l_min_values_f[i][cnt1] < INF){
vars->N_F5[i][cnt1] += vars->l_min_values_f[i][cnt1]/2;
free(vars->N_F5[i][cnt1]);
}
if(vars->k_min_values_f[i] < INF){
vars->N_F5[i] += vars->k_min_values_f[i];
free(vars->N_F5[i]);
}
}
free(vars->N_F5);
}
#endif
if(vars->E_F5 != NULL){
for(i = 1; i <= vars->seq_length; i++){
if(!vars->E_F5[i]) continue;
for(cnt1 = vars->k_min_values_f[i]; cnt1 <= vars->k_max_values_f[i]; cnt1++)
if(vars->l_min_values_f[i][cnt1] < INF){
vars->E_F5[i][cnt1] += vars->l_min_values_f[i][cnt1]/2;
free(vars->E_F5[i][cnt1]);
}
if(vars->k_min_values_f[i] < INF){
vars->E_F5[i] += vars->k_min_values_f[i];
free(vars->E_F5[i]);
vars->l_min_values_f[i] += vars->k_min_values_f[i];
vars->l_max_values_f[i] += vars->k_min_values_f[i];
free(vars->l_min_values_f[i]);
free(vars->l_max_values_f[i]);
}
}
free(vars->E_F5);
free(vars->l_min_values_f);
free(vars->l_max_values_f);
free(vars->k_min_values_f);
free(vars->k_max_values_f);
}
if(vars->E_F3 != NULL){
for(i = 1; i <= vars->seq_length; i++){
if(!vars->E_F3[i]) continue;
for(cnt1 = vars->k_min_values_f3[i]; cnt1 <= vars->k_max_values_f3[i]; cnt1++)
if(vars->l_min_values_f3[i][cnt1] < INF){
vars->E_F3[i][cnt1] += vars->l_min_values_f3[i][cnt1]/2;
free(vars->E_F3[i][cnt1]);
}
if(vars->k_min_values_f3[i] < INF){
vars->E_F3[i] += vars->k_min_values_f3[i];
free(vars->E_F3[i]);
vars->l_min_values_f3[i] += vars->k_min_values_f3[i];
vars->l_max_values_f3[i] += vars->k_min_values_f3[i];
free(vars->l_min_values_f3[i]);
free(vars->l_max_values_f3[i]);
}
}
free(vars->E_F3);
free(vars->l_min_values_f3);
free(vars->l_max_values_f3);
free(vars->k_min_values_f3);
free(vars->k_max_values_f3);
}
#ifdef _OPENMP
}
#pragma omp section
{
#endif
if(vars->E_Fc != NULL){
for(cnt1 = vars->k_min_values_fc; cnt1 <= vars->k_max_values_fc; cnt1++)
if(vars->l_min_values_fc[cnt1] < INF){
vars->E_Fc[cnt1] += vars->l_min_values_fc[cnt1]/2;
free(vars->E_Fc[cnt1]);
}
if(vars->k_min_values_fc < INF){
vars->E_Fc += vars->k_min_values_fc;
free(vars->E_Fc);
vars->l_min_values_fc += vars->k_min_values_fc;
vars->l_max_values_fc += vars->k_min_values_fc;
free(vars->l_min_values_fc);
free(vars->l_max_values_fc);
}
}
#ifdef _OPENMP
}
#pragma omp section
{
#endif
if(vars->E_FcI != NULL){
for(cnt1 = vars->k_min_values_fcI; cnt1 <= vars->k_max_values_fcI; cnt1++)
if(vars->l_min_values_fcI[cnt1] < INF){
vars->E_FcI[cnt1] += vars->l_min_values_fcI[cnt1]/2;
free(vars->E_FcI[cnt1]);
}
if(vars->k_min_values_fcI < INF){
vars->E_FcI += vars->k_min_values_fcI;
free(vars->E_FcI);
vars->l_min_values_fcI += vars->k_min_values_fcI;
vars->l_max_values_fcI += vars->k_min_values_fcI;
free(vars->l_min_values_fcI);
free(vars->l_max_values_fcI);
}
}
#ifdef _OPENMP
}
#pragma omp section
{
#endif
if(vars->E_FcH != NULL){
for(cnt1 = vars->k_min_values_fcH; cnt1 <= vars->k_max_values_fcH; cnt1++)
if(vars->l_min_values_fcH[cnt1] < INF){
vars->E_FcH[cnt1] += vars->l_min_values_fcH[cnt1]/2;
free(vars->E_FcH[cnt1]);
}
if(vars->k_min_values_fcH < INF){
vars->E_FcH += vars->k_min_values_fcH;
free(vars->E_FcH);
vars->l_min_values_fcH += vars->k_min_values_fcH;
vars->l_max_values_fcH += vars->k_min_values_fcH;
free(vars->l_min_values_fcH);
free(vars->l_max_values_fcH);
}
}
#ifdef _OPENMP
}
#pragma omp section
{
#endif
if(vars->E_FcM != NULL){
for(cnt1 = vars->k_min_values_fcM; cnt1 <= vars->k_max_values_fcM; cnt1++)
if(vars->l_min_values_fcM[cnt1] < INF){
vars->E_FcM[cnt1] += vars->l_min_values_fcM[cnt1]/2;
free(vars->E_FcM[cnt1]);
}
if(vars->k_min_values_fcM < INF){
vars->E_FcM += vars->k_min_values_fcM;
free(vars->E_FcM);
vars->l_min_values_fcM += vars->k_min_values_fcM;
vars->l_max_values_fcM += vars->k_min_values_fcM;
free(vars->l_min_values_fcM);
free(vars->l_max_values_fcM);
}
}
#ifdef _OPENMP
}
#pragma omp section
{
#endif
if(vars->P != NULL) free(vars->P);
if(vars->sequence != NULL) free(vars->sequence);
if(vars->reference_pt1 != NULL) free(vars->reference_pt1);
if(vars->reference_pt2 != NULL) free(vars->reference_pt2);
if(vars->referenceBPs1 != NULL) free(vars->referenceBPs1);
if(vars->referenceBPs2 != NULL) free(vars->referenceBPs2);
if(vars->ptype != NULL) free(vars->ptype);
if(vars->S != NULL) free(vars->S);
if(vars->S1 != NULL) free(vars->S1);
if(vars->mm1 != NULL) free(vars->mm1);
if(vars->mm2 != NULL) free(vars->mm2);
if(vars->bpdist != NULL) free(vars->bpdist);
#ifdef _OPENMP
}
}
#endif
if(vars->my_iindx != NULL) free(vars->my_iindx);
free(vars);
}
PRIVATE void initialize_TwoDfold_vars(TwoDfold_vars *vars){
update_TwoDfold_params(vars);
/* this call updates the params in the ViennaRNA fold.o which is a global, so be careful
* whith calling it parallel... need a workarround or fix of ViennaRNA fold stuff
*/
update_fold_params();
}
PUBLIC TwoDfold_solution **TwoDfold(TwoDfold_vars *vars, int distance1, int distance2){
unsigned int i, d1, d2;
unsigned int maxD1;
unsigned int maxD2;
unsigned int length;
TwoDfold_solution **output;
initialize_TwoDfold_vars(vars);
if(fabs(vars->P->temperature - temperature)>1e-6) update_TwoDfold_params(vars);
vars->S = encode_sequence(vars->sequence, 0);
vars->S1 = encode_sequence(vars->sequence, 1);
make_ptypes(vars);
maxD1 = vars->maxD1;
maxD2 = vars->maxD2;
if(distance1 >= 0){
if((unsigned int)distance1 > maxD1)
fprintf(stderr,
"limiting maximum basepair distance 1 to %u\n",
maxD1);
else
maxD1 = (unsigned int)distance1;
}
if(distance2 >= 0){
if((unsigned int)distance2 > maxD2)
fprintf(stderr,
"limiting maximum basepair distance 2 to %u\n",
maxD2);
else
maxD2 = (unsigned int)distance2;
}
vars->maxD1 = maxD1;
vars->maxD2 = maxD2;
output = (TwoDfold_solution **)space((vars->maxD1+1) * sizeof(TwoDfold_solution *));
mfe_linear(vars);
if(vars->circ) mfe_circ(vars);
length = vars->seq_length;
for(d1=0; d1<=maxD1;d1++){
output[d1] = (TwoDfold_solution *)space((vars->maxD2+1)*sizeof(TwoDfold_solution));
#ifdef _OPENMP
#pragma omp parallel for private(d2)
#endif
for(d2=0; d2<=maxD2;d2++){
output[d1][d2].en = (float)INF/(float)100.;
output[d1][d2].s = NULL;
}
if( (d1 >= ((vars->circ) ? vars->k_min_values_fc : vars->k_min_values_f[length]))
&& (d1 <= ((vars->circ) ? vars->k_max_values_fc : vars->k_max_values_f[length]))){
#ifdef _OPENMP
#pragma omp parallel for private(d2, i)
#endif
for( d2 = ((vars->circ) ? vars->l_min_values_fc[d1] : vars->l_min_values_f[length][d1]);
d2 <= ((vars->circ) ? vars->l_max_values_fc[d1] : vars->l_max_values_f[length][d1]);
d2 += 2){
output[d1][d2].en = (float)((vars->circ) ? vars->E_Fc[d1][d2/2] : vars->E_F5[length][d1][d2/2])/(float)100.;
if(vars->do_backtrack && (output[d1][d2].en != (float)INF/(float)100.)){
char *mfe_structure = (char *)space(length+1);
for(i=0;i<length;i++) mfe_structure[i] = '.';
mfe_structure[i] = '\0';
(vars->circ) ? backtrack_fc(d1, d2, mfe_structure, vars) : backtrack_f5(length, d1, d2, mfe_structure, vars);
output[d1][d2].s = mfe_structure;
}
}
}
}
return output;
}
PUBLIC TwoDfold_solution *TwoDfoldList(TwoDfold_vars *vars, int distance1, int distance2){
unsigned int i, d1, d2;
unsigned int maxD1;
unsigned int maxD2;
unsigned int length;
unsigned int counter = 0;
int en = 0;
TwoDfold_solution *output;
initialize_TwoDfold_vars(vars);
if(fabs(vars->P->temperature - temperature)>1e-6) update_TwoDfold_params(vars);
vars->S = encode_sequence(vars->sequence, 0);
vars->S1 = encode_sequence(vars->sequence, 1);
make_ptypes(vars);
maxD1 = vars->maxD1;
maxD2 = vars->maxD2;
if(distance1 >= 0){
if((unsigned int)distance1 > maxD1)
fprintf(stderr,
"TwoDfoldList@2Dfold.c: limiting maximum basepair distance 1 to %u\n",
maxD1);
else
maxD1 = (unsigned int)distance1;
}
if(distance2 >= 0){
if((unsigned int)distance2 > maxD2)
fprintf(stderr,
"TwoDfoldList@2Dfold.c: limiting maximum basepair distance 2 to %u\n",
maxD2);
else
maxD2 = (unsigned int)distance2;
}
vars->maxD1 = maxD1;
vars->maxD2 = maxD2;
output = (TwoDfold_solution *)space((((vars->maxD1+1)*(vars->maxD2+2))/2 + 2) * sizeof(TwoDfold_solution));
mfe_linear(vars);
if(vars->circ) mfe_circ(vars);
length = vars->seq_length;
for(d1=0; d1<=maxD1;d1++){
if((d1 >= ((vars->circ) ? vars->k_min_values_fc : vars->k_min_values_f[length]))
&& (d1 <= ((vars->circ) ? vars->k_max_values_fc : vars->k_max_values_f[length]))){
for(d2 = ((vars->circ) ? vars->l_min_values_fc[d1] : vars->l_min_values_f[length][d1]);
d2 <= ((vars->circ) ? vars->l_max_values_fc[d1] : vars->l_max_values_f[length][d1]);
d2 += 2){
en = ((vars->circ) ? vars->E_Fc[d1][d2/2] : vars->E_F5[length][d1][d2/2]);
if(en == INF) continue;
output[counter].k = d1;
output[counter].l = d2;
output[counter].en = (float)en/(float)100.;
if(vars->do_backtrack){
char *mfe_structure = (char *)space(length+1);
for(i=0;i<length;i++) mfe_structure[i] = '.';
mfe_structure[i] = '\0';
(vars->circ) ? backtrack_fc((int)d1, (int)d2, mfe_structure, vars) : backtrack_f5(length, (int)d1, (int)d2, mfe_structure, vars);
output[counter].s = mfe_structure;
}
else output[counter].s = NULL;
counter++;
}
}
}
/* store entry for remaining partition if it exists */
en = ((vars->circ) ? vars->E_Fc_rem : vars->E_F5_rem[length]);
if(en != INF){
output[counter].k = -1;
output[counter].l = -1;
output[counter].en = (float)en/(float)100.;
if(vars->do_backtrack){
char *mfe_structure = (char *)space(length+1);
for(i=0;i<length;i++) mfe_structure[i] = '.';
mfe_structure[i] = '\0';
(vars->circ) ? backtrack_fc(-1, -1, mfe_structure, vars) : backtrack_f5(length, -1, -1, mfe_structure, vars);
output[counter].s = mfe_structure;
}
else output[counter].s = NULL;
counter++;
}
/* insert end-marker entry */
output[counter].k = output[counter].l = INF;
counter++;
/* resize to actual dataset amount */
output = (TwoDfold_solution*)xrealloc(output, sizeof(TwoDfold_solution) * counter);
return output;
}
PUBLIC char *TwoDfold_backtrack_f5(unsigned int j, int k, int l, TwoDfold_vars *vars){
unsigned int i;
char *mfe_structure = (char *)space(j+1);
if(j < TURN + 2) return NULL;
for(i=0; i < j; i++) mfe_structure[i] = '.';
mfe_structure[i] = '\0';
backtrack_f5(j, k, l, mfe_structure, vars);
return mfe_structure;
}
PRIVATE void mfe_linear(TwoDfold_vars *vars){
unsigned int d, i, j, ij, maxD1, maxD2, seq_length, dia, dib, dja, djb, *referenceBPs1, *referenceBPs2, *mm1, *mm2, *bpdist;
int cnt1, cnt2, cnt3, cnt4, d1, d2, energy, dangles, temp2, type, additional_en, *my_iindx, circ;
short *S1, *reference_pt1, *reference_pt2;
char *sequence, *ptype;
paramT *P;
/* dereferenciate things we often need */
P = vars->P;
sequence = vars->sequence;
seq_length = vars->seq_length;
maxD1 = vars->maxD1;
maxD2 = vars->maxD2;
S1 = vars->S1;
ptype = vars->ptype;
reference_pt1 = vars->reference_pt1;
reference_pt2 = vars->reference_pt2;
my_iindx = vars->my_iindx;
referenceBPs1 = vars->referenceBPs1;
referenceBPs2 = vars->referenceBPs2;
dangles = vars->dangles;
mm1 = vars->mm1;
mm2 = vars->mm2;
bpdist = vars->bpdist;
circ = vars->circ;
for (d = TURN+2; d <= seq_length; d++) { /* i,j in [1..length] */
#ifdef _OPENMP
#pragma omp parallel for private(additional_en, j, energy, temp2, i, ij, dia,dib,dja,djb,cnt1,cnt2,cnt3,cnt4, d1, d2)
#endif
for (j = d; j <= seq_length; j++) {
unsigned int p, q, pq, u, maxp, dij;
int type_2, type, tt, no_close, base_d1, base_d2;
i = j-d+1;
dij = j - i - 1;
ij = my_iindx[i]-j;
type = ptype[ij];
no_close = (((type==3)||(type==4))&&no_closingGU);
if (type) { /* we have a pair */
/* increase or decrease distance-to-reference value depending whether (i,j) is included in
* reference or has to be introduced
*/
base_d1 = ((unsigned int)reference_pt1[i] != j) ? 1 : -1;
base_d2 = ((unsigned int)reference_pt2[i] != j) ? 1 : -1;
/* HAIRPIN STRUCTURES */
/* get distance to reference if closing the hairpin
* d = dbp(T_{i,j}, {i,j})
*/
d1 = base_d1 + referenceBPs1[ij];
d2 = base_d2 + referenceBPs2[ij];
int min_k, max_k, min_l, max_l;
int real_min_k, real_max_k, *min_l_real, *max_l_real;
min_l = min_k = 0;
max_k = mm1[ij] + referenceBPs1[ij];
max_l = mm2[ij] + referenceBPs2[ij];
prepareBoundaries(min_k,
max_k,
min_l,
max_l,
bpdist[ij],
&vars->k_min_values[ij],
&vars->k_max_values[ij],
&vars->l_min_values[ij],
&vars->l_max_values[ij]
);
preparePosteriorBoundaries( vars->k_max_values[ij] - vars->k_min_values[ij] + 1,
vars->k_min_values[ij],
&real_min_k,
&real_max_k,
&min_l_real,
&max_l_real
);
prepareArray( &vars->E_C[ij],
vars->k_min_values[ij],
vars->k_max_values[ij],
vars->l_min_values[ij],
vars->l_max_values[ij]
);
#ifdef COUNT_STATES
prepareArray2( &vars->N_C[ij],
vars->k_min_values[ij],
vars->k_max_values[ij],
vars->l_min_values[ij],
vars->l_max_values[ij]
);
#endif
/* d1 and d2 are the distancies to both references introduced by closing a hairpin structure at (i,j) */
if((d1 >= 0) && (d2 >= 0)){
if(((unsigned int)d1<=maxD1) && ((unsigned int)d2 <= maxD2)){
vars->E_C[ij][d1][d2/2] = (no_close) ? FORBIDDEN : E_Hairpin(dij, type, S1[i+1], S1[j-1], sequence+i-1, P);
updatePosteriorBoundaries(d1,
d2,
&real_min_k,
&real_max_k,
&min_l_real,
&max_l_real
);
#ifdef COUNT_STATES
vars->N_C[ij][d1][d2/2] = 1;
#endif
}
else{
vars->E_C_rem[ij] = (no_close) ? FORBIDDEN : E_Hairpin(dij, type, S1[i+1], S1[j-1], sequence+i-1, P);
}
}
/* INTERIOR LOOP STRUCTURES */
maxp = MIN2(j-2-TURN,i+MAXLOOP+1);
for(p = i+1; p <= maxp; p++){
unsigned int minq = p + TURN + 1;
unsigned int ln_pre = dij + p;
if(ln_pre > minq + MAXLOOP) minq = ln_pre - MAXLOOP - 1;
for(q = minq; q < j; q++){
pq = my_iindx[p]-q;
/* set distance to reference structure... */
type_2 = ptype[pq];
if (type_2==0) continue;
type_2 = rtype[type_2];
/* get distance to reference if closing the interior loop
* d2 = dbp(S_{i,j}, S_{p.q} + {i,j})
*/
d1 = base_d1 + referenceBPs1[ij] - referenceBPs1[pq];
d2 = base_d2 + referenceBPs2[ij] - referenceBPs2[pq];
if(no_closingGU)
if(no_close||(type_2==3)||(type_2==4))
if((p>i+1)||(q<j-1)) continue; /* continue unless stack */
energy = E_IntLoop(p-i-1, j-q-1, type, type_2, S1[i+1], S1[j-1], S1[p-1], S1[q+1], P);
if(vars->E_C[pq] != NULL){
for(cnt1 = vars->k_min_values[pq]; cnt1 <= vars->k_max_values[pq]; cnt1++){
for(cnt2 = vars->l_min_values[pq][cnt1]; cnt2 <= vars->l_max_values[pq][cnt1]; cnt2+=2){
if(vars->E_C[pq][cnt1][cnt2/2] != INF){
if(((cnt1 + d1) <= maxD1) && ((cnt2+d2) <= maxD2)){
vars->E_C[ij][cnt1 + d1][(cnt2 + d2)/2] = MIN2( vars->E_C[ij][cnt1 + d1][(cnt2 + d2)/2],
vars->E_C[pq][cnt1][cnt2/2] + energy
);
updatePosteriorBoundaries(cnt1 + d1,
cnt2 + d2,
&real_min_k,
&real_max_k,
&min_l_real,
&max_l_real
);
#ifdef COUNT_STATES
vars->N_C[ij][cnt1 + d1][(cnt2 + d2)/2] += vars->N_C[pq][cnt1][cnt2/2];
#endif
}
/* collect all cases where d1+cnt1 or d2+cnt2 exceeds maxD1, maxD2, respectively */
else{
vars->E_C_rem[ij] = MIN2(vars->E_C_rem[ij], vars->E_C[pq][cnt1][cnt2/2] + energy);
}
}
}
}
}
/* collect all contributions where C[pq] already lies outside k_max, l_max boundary */
if(vars->E_C_rem[pq] != INF){
vars->E_C_rem[ij] = MIN2(vars->E_C_rem[ij], vars->E_C_rem[pq] + energy);
}
} /* end q-loop */
} /* end p-loop */
/* MULTI LOOP STRUCTURES */
if(!no_close){
/* dangle energies for multiloop closing stem */
tt = rtype[type];
temp2 = P->MLclosing;
if(dangles == 2)
temp2 += E_MLstem(tt, S1[j-1], S1[i+1], P);
else
temp2 += E_MLstem(tt, -1, -1, P);
for(u=i+TURN+2; u<j-TURN-2;u++){
int i1u = my_iindx[i+1]-u;
int u1j1 = my_iindx[u+1]-j+1;
/* check all cases where either M or M1 are already out of scope of maxD1 and/or maxD2 */
if(vars->E_M_rem[i1u] != INF){
for(cnt3 = vars->k_min_values_m1[u1j1];
cnt3 <= vars->k_max_values_m1[u1j1];
cnt3++)
for(cnt4 = vars->l_min_values_m1[u1j1][cnt3];
cnt4 <= vars->l_max_values_m1[u1j1][cnt3];
cnt4+=2){
if(vars->E_M1[u1j1][cnt3][cnt4/2]!= INF){
vars->E_C_rem[ij] = MIN2(vars->E_C_rem[ij],
vars->E_M_rem[i1u]
+ vars->E_M1[u1j1][cnt3][cnt4/2]
+ temp2
);
}
}
if(vars->E_M1_rem[u1j1] != INF){
vars->E_C_rem[ij] = MIN2(vars->E_C_rem[ij],
vars->E_M_rem[i1u]
+ vars->E_M1_rem[u1j1]
+ temp2
);
}
}
if(vars->E_M1_rem[u1j1] != INF){
for(cnt1 = vars->k_min_values_m[i1u];
cnt1 <= vars->k_max_values_m[i1u];
cnt1++)
for(cnt2 = vars->l_min_values_m[i1u][cnt1];
cnt2 <= vars->l_max_values_m[i1u][cnt1];
cnt2+=2)
if(vars->E_M[i1u][cnt1][cnt2/2] != INF){
vars->E_C_rem[ij] = MIN2(vars->E_C_rem[ij],
vars->E_M[i1u][cnt1][cnt2/2]
+ vars->E_M1_rem[u1j1]
+ temp2
);
}
}
/* get distance to reference if closing the multiloop
* d = dbp(S_{i,j}, {i,j} + S_{i+1,u} + S_{u+1,j-1})
*/
if(!vars->E_M[i1u]) continue;
if(!vars->E_M1[u1j1]) continue;
d1 = base_d1 + referenceBPs1[ij] - referenceBPs1[i1u] - referenceBPs1[u1j1];
d2 = base_d2 + referenceBPs2[ij] - referenceBPs2[i1u] - referenceBPs2[u1j1];
for(cnt1 = vars->k_min_values_m[i1u];
cnt1 <= vars->k_max_values_m[i1u];
cnt1++)
for(cnt2 = vars->l_min_values_m[i1u][cnt1];
cnt2 <= vars->l_max_values_m[i1u][cnt1];
cnt2+=2)
for(cnt3 = vars->k_min_values_m1[u1j1];
cnt3 <= vars->k_max_values_m1[u1j1];
cnt3++)
for(cnt4 = vars->l_min_values_m1[u1j1][cnt3];
cnt4 <= vars->l_max_values_m1[u1j1][cnt3];
cnt4+=2){
if((vars->E_M[i1u][cnt1][cnt2/2] != INF) && (vars->E_M1[u1j1][cnt3][cnt4/2]!= INF)){
if(((cnt1+cnt3+d1) <= maxD1) && ((cnt2+cnt4+d2) <= maxD2)){
vars->E_C[ij][cnt1+cnt3+d1][(cnt2+cnt4+d2)/2] = MIN2( vars->E_C[ij][cnt1+cnt3+d1][(cnt2+cnt4+d2)/2],
vars->E_M[i1u][cnt1][cnt2/2]
+ vars->E_M1[u1j1][cnt3][cnt4/2]
+ temp2
);
updatePosteriorBoundaries(cnt1 + cnt3 + d1,
cnt2 + cnt4 + d2,
&real_min_k,
&real_max_k,
&min_l_real,
&max_l_real
);
#ifdef COUNT_STATES
vars->N_C[ij][cnt1+cnt3+d1][(cnt2+cnt4+d2)/2] += vars->N_M[i1u][cnt1][cnt2/2] * vars->N_M1[u1j1][cnt3][cnt4/2];
#endif
}
/* collect all cases where d1+cnt1+cnt3 or d2+cnt2+cnt4 exceeds maxD1, maxD2, respectively */
else{
vars->E_C_rem[ij] = MIN2( vars->E_C_rem[ij],
vars->E_M[i1u][cnt1][cnt2/2]
+ vars->E_M1[u1j1][cnt3][cnt4/2]
+ temp2
);
}
}
}
}
}
/* resize and move memory portions of energy matrix E_C */
adjustArrayBoundaries(&vars->E_C[ij],
&vars->k_min_values[ij],
&vars->k_max_values[ij],
&vars->l_min_values[ij],
&vars->l_max_values[ij],
real_min_k,
real_max_k,
min_l_real,
max_l_real
);
#ifdef COUNT_STATES
/* actually we should adjust the array boundaries here but we might never use the count states option more than once so what....*/
#endif
} /* end >> if (pair) << */
/* done with c[i,j], now compute fML[i,j] */
/* free ends ? -----------------------------------------*/
dia = referenceBPs1[ij] - referenceBPs1[my_iindx[i+1]-j];
dib = referenceBPs2[ij] - referenceBPs2[my_iindx[i+1]-j];
dja = referenceBPs1[ij] - referenceBPs1[ij+1];
djb = referenceBPs2[ij] - referenceBPs2[ij+1];
if(dangles==2)
temp2 = E_MLstem(type, ((i > 1) || circ) ? S1[i-1] : -1, ((j < seq_length) || circ) ? S1[j+1] : -1, P);
else
temp2 = E_MLstem(type, -1, -1, P);
int min_k_guess, max_k_guess, min_l_guess, max_l_guess;
int min_k_real_m, max_k_real_m, *min_l_real_m, *max_l_real_m;
int min_k_real_m1, max_k_real_m1, *min_l_real_m1, *max_l_real_m1;
min_k_guess = min_l_guess = 0;
max_k_guess = mm1[ij] + referenceBPs1[ij];
max_l_guess = mm2[ij] + referenceBPs2[ij];
prepareBoundaries(min_k_guess,
max_k_guess,
min_l_guess,
max_l_guess,
bpdist[ij],
&vars->k_min_values_m[ij],
&vars->k_max_values_m[ij],
&vars->l_min_values_m[ij],
&vars->l_max_values_m[ij]
);
prepareBoundaries(min_k_guess,
max_k_guess,
min_l_guess,
max_l_guess,
bpdist[ij],
&vars->k_min_values_m1[ij],
&vars->k_max_values_m1[ij],
&vars->l_min_values_m1[ij],
&vars->l_max_values_m1[ij]
);
preparePosteriorBoundaries( vars->k_max_values_m[ij] - vars->k_min_values_m[ij] + 1,
vars->k_min_values_m[ij],
&min_k_real_m,
&max_k_real_m,
&min_l_real_m,
&max_l_real_m
);
preparePosteriorBoundaries( vars->k_max_values_m1[ij] - vars->k_min_values_m1[ij] + 1,
vars->k_min_values_m1[ij],
&min_k_real_m1,
&max_k_real_m1,
&min_l_real_m1,
&max_l_real_m1
);
prepareArray( &vars->E_M[ij],
vars->k_min_values_m[ij],
vars->k_max_values_m[ij],
vars->l_min_values_m[ij],
vars->l_max_values_m[ij]
);
prepareArray( &vars->E_M1[ij],
vars->k_min_values_m1[ij],
vars->k_max_values_m1[ij],
vars->l_min_values_m1[ij],
vars->l_max_values_m1[ij]
);
#ifdef COUNT_STATES
prepareArray2( &vars->N_M[ij],
vars->k_min_values_m[ij],
vars->k_max_values_m[ij],
vars->l_min_values_m[ij],
vars->l_max_values_m[ij]
);
prepareArray2( &vars->N_M1[ij],
vars->k_min_values_m1[ij],
vars->k_max_values_m1[ij],
vars->l_min_values_m1[ij],
vars->l_max_values_m1[ij]
);
#endif
/* now to the actual computations... */
/* 1st E_M[ij] = E_M1[ij] = E_C[ij] + b */
if(vars->E_C_rem[ij] != INF){
vars->E_M_rem[ij] = vars->E_M1_rem[ij] = temp2 + vars->E_C_rem[ij];
}
if(vars->E_C[ij])
for(cnt1 = vars->k_min_values[ij]; cnt1 <= vars->k_max_values[ij]; cnt1++){
for(cnt2 = vars->l_min_values[ij][cnt1]; cnt2 <= vars->l_max_values[ij][cnt1]; cnt2+=2){
if(vars->E_C[ij][cnt1][cnt2/2] != INF){
vars->E_M[ij][cnt1][cnt2/2] = vars->E_M1[ij][cnt1][cnt2/2] = temp2 + vars->E_C[ij][cnt1][cnt2/2];
updatePosteriorBoundaries(cnt1,
cnt2,
&min_k_real_m,
&max_k_real_m,
&min_l_real_m,
&max_l_real_m
);
updatePosteriorBoundaries(cnt1,
cnt2,
&min_k_real_m1,
&max_k_real_m1,
&min_l_real_m1,
&max_l_real_m1
);
#ifdef COUNT_STATES
vars->N_M[ij][cnt1][cnt2/2] = vars->N_M1[ij][cnt1][cnt2/2] = vars->N_C[ij][cnt1][cnt2/2];
#endif
}
}
}
/* 2nd E_M[ij] = MIN(E_M[ij], E_M[i+1,j] + c) */
if(vars->E_M_rem[my_iindx[i+1]-j] != INF){
vars->E_M_rem[ij] = MIN2(vars->E_M_rem[ij],
vars->E_M_rem[my_iindx[i+1]-j] + P->MLbase
);
}
if(vars->E_M[my_iindx[i+1]-j])
for(cnt1 = vars->k_min_values_m[my_iindx[i+1]-j];
cnt1 <= vars->k_max_values_m[my_iindx[i+1]-j];
cnt1++){
for(cnt2 = vars->l_min_values_m[my_iindx[i+1]-j][cnt1];
cnt2 <= vars->l_max_values_m[my_iindx[i+1]-j][cnt1];
cnt2+=2){
if(vars->E_M[my_iindx[i+1]-j][cnt1][cnt2/2] != INF){
if(((cnt1 + dia) <= maxD1) && ((cnt2 + dib) <= maxD2)){
vars->E_M[ij][cnt1+dia][(cnt2+dib)/2] = MIN2( vars->E_M[ij][cnt1+dia][(cnt2+dib)/2],
vars->E_M[my_iindx[i+1]-j][cnt1][cnt2/2] + P->MLbase
);
updatePosteriorBoundaries(cnt1 + dia,
cnt2 + dib,
&min_k_real_m,
&max_k_real_m,
&min_l_real_m,
&max_l_real_m
);
#ifdef COUNT_STATES
vars->N_M[ij][cnt1+dia][(cnt2+dib)/2] += vars->N_M[my_iindx[i+1]-j][cnt1][cnt2/2];
#endif
}
/* collect all cases where dia+cnt1 or dib+cnt2 exceeds maxD1, maxD2, respectively */
else{
vars->E_M_rem[ij] = MIN2(vars->E_M_rem[ij],
vars->E_M[my_iindx[i+1]-j][cnt1][cnt2/2] + P->MLbase
);
}
}
}
}
/* 3rd E_M[ij] = MIN(E_M[ij], E_M[i,j-1] + c) */
if(vars->E_M_rem[ij+1] != INF){
vars->E_M_rem[ij] = MIN2(vars->E_M_rem[ij],
vars->E_M_rem[ij+1] + P->MLbase
);
}
if(vars->E_M[ij+1])
for(cnt1 = vars->k_min_values_m[ij+1];
cnt1 <= vars->k_max_values_m[ij+1];
cnt1++){
for(cnt2 = vars->l_min_values_m[ij+1][cnt1];
cnt2 <= vars->l_max_values_m[ij+1][cnt1];
cnt2+=2){
if(vars->E_M[ij+1][cnt1][cnt2/2] != INF){
if(((cnt1 + dja) <= maxD1) && ((cnt2 + djb) <= maxD2)){
vars->E_M[ij][cnt1+dja][(cnt2+djb)/2] = MIN2( vars->E_M[ij][cnt1+dja][(cnt2+djb)/2],
vars->E_M[ij+1][cnt1][cnt2/2] + P->MLbase
);
updatePosteriorBoundaries(cnt1 + dja,
cnt2 + djb,
&min_k_real_m,
&max_k_real_m,
&min_l_real_m,
&max_l_real_m
);
#ifdef COUNT_STATES
vars->N_M[ij][cnt1+dja][(cnt2+djb)/2] += vars->N_M[ij+1][cnt1][cnt2/2];
#endif
}
/* collect all cases where dja+cnt1 or djb+cnt2 exceeds maxD1, maxD2, respectively */
else{
vars->E_M_rem[ij] = MIN2(vars->E_M_rem[ij],
vars->E_M[ij+1][cnt1][cnt2/2] + P->MLbase
);
}
}
}
}
/* 4th E_M1[ij] = MIN(E_M1[ij], E_M1[i,j-1] + c) */
if(vars->E_M1_rem[ij+1] != INF){
vars->E_M1_rem[ij] = MIN2( vars->E_M1_rem[ij],
vars->E_M1_rem[ij+1] + P->MLbase
);
}
if(vars->E_M1[ij+1])
for(cnt1 = vars->k_min_values_m1[ij+1];
cnt1 <= vars->k_max_values_m1[ij+1];
cnt1++){
for(cnt2 = vars->l_min_values_m1[ij+1][cnt1];
cnt2 <= vars->l_max_values_m1[ij+1][cnt1];
cnt2+=2){
if(vars->E_M1[ij+1][cnt1][cnt2/2] != INF){
if(((cnt1 + dja) <= maxD1) && ((cnt2 + djb) <= maxD2)){
vars->E_M1[ij][cnt1+dja][(cnt2+djb)/2] = MIN2( vars->E_M1[ij][cnt1+dja][(cnt2+djb)/2],
vars->E_M1[ij+1][cnt1][cnt2/2] + P->MLbase
);
updatePosteriorBoundaries(cnt1 + dja,
cnt2 + djb,
&min_k_real_m1,
&max_k_real_m1,
&min_l_real_m1,
&max_l_real_m1
);
#ifdef COUNT_STATES
vars->N_M1[ij][cnt1+dja][(cnt2+djb)/2] += vars->N_M1[ij+1][cnt1][cnt2/2];
#endif
}
/* collect all cases where dja+cnt1 or djb+cnt2 exceeds maxD1, maxD2, respectively */
else{
vars->E_M1_rem[ij] = MIN2( vars->E_M1_rem[ij],
vars->E_M1[ij+1][cnt1][cnt2/2] + P->MLbase
);
}
}
}
}
/* 5th E_M[ij] = MIN(E_M[ij], min(E_M[i,k] + E_M[k+1,j])) */
if(j > TURN + 2)
for (u = i+1+TURN; u <= j-2-TURN; u++){
/* check all cases where M(i,u) and/or M(u+1,j) are already out of scope of maxD1 and/or maxD2 */
if(vars->E_M_rem[my_iindx[i]-u] != INF){
for(cnt3 = vars->k_min_values_m[my_iindx[u+1]-j];
cnt3 <= vars->k_max_values_m[my_iindx[u+1]-j];
cnt3++){
for(cnt4 = vars->l_min_values_m[my_iindx[u+1]-j][cnt3];
cnt4 <= vars->l_max_values_m[my_iindx[u+1]-j][cnt3];
cnt4+=2){
if(vars->E_M[my_iindx[u+1]-j][cnt3][cnt4/2] != INF){
vars->E_M_rem[ij] = MIN2(vars->E_M_rem[ij],
vars->E_M_rem[my_iindx[i]-u] + vars->E_M[my_iindx[u+1]-j][cnt3][cnt4/2]
);
}
}
}
if(vars->E_M_rem[my_iindx[u+1]-j] != INF){
vars->E_M_rem[ij] = MIN2(vars->E_M_rem[ij],
vars->E_M_rem[my_iindx[i]-u] + vars->E_M_rem[my_iindx[u+1]-j]
);
}
}
if(vars->E_M_rem[my_iindx[u+1]-j] != INF){
for(cnt1 = vars->k_min_values_m[my_iindx[i]-u];
cnt1 <= vars->k_max_values_m[my_iindx[i]-u];
cnt1++){
for(cnt2 = vars->l_min_values_m[my_iindx[i]-u][cnt1];
cnt2 <= vars->l_max_values_m[my_iindx[i]-u][cnt1];
cnt2+=2){
if(vars->E_M[my_iindx[i]-u][cnt1][cnt2/2] != INF){
vars->E_M_rem[ij] = MIN2(vars->E_M_rem[ij],
vars->E_M[my_iindx[i]-u][cnt1][cnt2/2] + vars->E_M_rem[my_iindx[u+1]-j]
);
}
}
}
}
if(!vars->E_M[my_iindx[i]-u]) continue;
if(!vars->E_M[my_iindx[u+1]-j]) continue;
dia = referenceBPs1[ij] - referenceBPs1[my_iindx[i]-u] - referenceBPs1[my_iindx[u+1]-j];
dib = referenceBPs2[ij] - referenceBPs2[my_iindx[i]-u] - referenceBPs2[my_iindx[u+1]-j];
for(cnt1 = vars->k_min_values_m[my_iindx[i]-u];
cnt1 <= vars->k_max_values_m[my_iindx[i]-u];
cnt1++){
for(cnt2 = vars->l_min_values_m[my_iindx[i]-u][cnt1];
cnt2 <= vars->l_max_values_m[my_iindx[i]-u][cnt1];
cnt2+=2){
for(cnt3 = vars->k_min_values_m[my_iindx[u+1]-j];
cnt3 <= vars->k_max_values_m[my_iindx[u+1]-j];
cnt3++){
for(cnt4 = vars->l_min_values_m[my_iindx[u+1]-j][cnt3];
cnt4 <= vars->l_max_values_m[my_iindx[u+1]-j][cnt3];
cnt4+=2){
if((vars->E_M[my_iindx[i]-u][cnt1][cnt2/2] != INF) && (vars->E_M[my_iindx[u+1]-j][cnt3][cnt4/2] != INF)){
if(((cnt1 + cnt3 + dia) <= maxD1) && ((cnt2 + cnt4 + dib) <= maxD2)){
vars->E_M[ij][cnt1+cnt3+dia][(cnt2+cnt4+dib)/2] = MIN2( vars->E_M[ij][cnt1+cnt3+dia][(cnt2+cnt4+dib)/2],
vars->E_M[my_iindx[i]-u][cnt1][cnt2/2]
+ vars->E_M[my_iindx[u+1]-j][cnt3][cnt4/2]
);
updatePosteriorBoundaries(cnt1 + cnt3 + dia,
cnt2 + cnt4 + dib,
&min_k_real_m,
&max_k_real_m,
&min_l_real_m,
&max_l_real_m
);
#ifdef COUNT_STATES
vars->N_M[ij][cnt1+cnt3+dia][(cnt2+cnt4+dib)/2] += vars->N_M[my_iindx[i]-u][cnt1][cnt2/2] * vars->N_M1[my_iindx[u+1]-j][cnt3][cnt4/2];
#endif
}
/* collect all cases where dia+cnt1+cnt3 or dib+cnt2+cnt4 exceeds maxD1, maxD2, respectively */
else{
vars->E_M_rem[ij] = MIN2(vars->E_M_rem[ij],
vars->E_M[my_iindx[i]-u][cnt1][cnt2/2] + vars->E_M[my_iindx[u+1]-j][cnt3][cnt4/2]
);
}
}
}
}
}
}
}
/* thats all folks for the multiloop decomposition... */
adjustArrayBoundaries(&vars->E_M[ij],
&vars->k_min_values_m[ij],
&vars->k_max_values_m[ij],
&vars->l_min_values_m[ij],
&vars->l_max_values_m[ij],
min_k_real_m,
max_k_real_m,
min_l_real_m,
max_l_real_m
);
adjustArrayBoundaries(&vars->E_M1[ij],
&vars->k_min_values_m1[ij],
&vars->k_max_values_m1[ij],
&vars->l_min_values_m1[ij],
&vars->l_max_values_m1[ij],
min_k_real_m1,
max_k_real_m1,
min_l_real_m1,
max_l_real_m1
);
#ifdef COUNT_STATES
/* actually we should adjust the array boundaries here but we might never use the count states option more than once so what....*/
#endif
} /* end of j-loop */
}
/* calculate energies of 5' and 3' fragments */
/* prepare first entries in E_F5 */
for(cnt1 = 1; cnt1 <= TURN+1; cnt1++){
vars->E_F5[cnt1] = (int **)space(sizeof(int *));
vars->E_F5[cnt1][0] = (int *)space(sizeof(int));
vars->E_F5[cnt1][0][0] = 0;
vars->E_F5_rem[cnt1] = INF;
vars->k_min_values_f[cnt1] = vars->k_max_values_f[cnt1] = 0;
vars->l_min_values_f[cnt1] = (int *)space(sizeof(int));
vars->l_max_values_f[cnt1] = (int *)space(sizeof(int));
vars->l_min_values_f[cnt1][0] = vars->l_max_values_f[cnt1][0] = 0;
#ifdef COUNT_STATES
vars->N_F5[cnt1] = (unsigned long **)space(sizeof(unsigned long *));
vars->N_F5[cnt1][0] = (unsigned long *)space(sizeof(unsigned long));
vars->N_F5[cnt1][0][0] = 1;
#endif
}
for (j=TURN+2; j <= seq_length; j++) {
unsigned int da = referenceBPs1[my_iindx[1]-j] - referenceBPs1[my_iindx[1]-j+1];
unsigned int db = referenceBPs2[my_iindx[1]-j] - referenceBPs2[my_iindx[1]-j+1];
type=ptype[my_iindx[1]-j];
additional_en = 0;
if(type){
if(dangles == 2)
additional_en += E_ExtLoop(type, -1, j < seq_length ? S1[j+1] : -1, P);
else
additional_en += E_ExtLoop(type, -1, -1, P);
}
/* make min and max k guess for memory allocation */
int min_k_guess, max_k_guess, min_l_guess, max_l_guess;
int *min_l_real, *max_l_real, min_k_real, max_k_real;
min_k_guess = min_l_guess = 0;
max_k_guess = referenceBPs1[my_iindx[1]-j] + mm1[my_iindx[1]-j];
max_l_guess = referenceBPs2[my_iindx[1]-j] + mm2[my_iindx[1]-j];
prepareBoundaries(min_k_guess,
max_k_guess,
min_l_guess,
max_l_guess,
bpdist[my_iindx[1]-j],
&vars->k_min_values_f[j],
&vars->k_max_values_f[j],
&vars->l_min_values_f[j],
&vars->l_max_values_f[j]
);
preparePosteriorBoundaries( vars->k_max_values_f[j] - vars->k_min_values_f[j] + 1,
vars->k_min_values_f[j],
&min_k_real,
&max_k_real,
&min_l_real,
&max_l_real
);
prepareArray( &vars->E_F5[j],
vars->k_min_values_f[j],
vars->k_max_values_f[j],
vars->l_min_values_f[j],
vars->l_max_values_f[j]
);
#ifdef COUNT_STATES
prepareArray2( &vars->N_F5[j],
vars->k_min_values_f[j],
vars->k_max_values_f[j],
vars->l_min_values_f[j],
vars->l_max_values_f[j]
);
#endif
/* begin the actual computation of 5' end energies */
/* j-1 is unpaired ... */
vars->E_F5_rem[j] = vars->E_F5_rem[j-1];
for(cnt1 = vars->k_min_values_f[j-1]; cnt1 <= vars->k_max_values_f[j-1]; cnt1++){
for(cnt2 = vars->l_min_values_f[j-1][cnt1]; cnt2 <= vars->l_max_values_f[j-1][cnt1]; cnt2+=2){
if(((cnt1 + da) <= maxD1) && ((cnt2 + db) <= maxD2)){
vars->E_F5[j][cnt1+da][(cnt2+db)/2] = MIN2( vars->E_F5[j][cnt1+da][(cnt2+db)/2],
vars->E_F5[j-1][cnt1][cnt2/2]
);
updatePosteriorBoundaries(cnt1 + da,
cnt2 + db,
&min_k_real,
&max_k_real,
&min_l_real,
&max_l_real
);
#ifdef COUNT_STATES
vars->N_F5[j][cnt1+da][(cnt2+db)/2] += vars->N_F5[j-1][cnt1][cnt2/2];
#endif
}
/* collect all cases where da+cnt1 or db+cnt2 exceeds maxD1, maxD2, respectively */
else{
vars->E_F5_rem[j] = MIN2(vars->E_F5_rem[j], vars->E_F5[j-1][cnt1][cnt2/2]);
}
}
}
/* j pairs with 1 */
if(vars->E_C_rem[my_iindx[1]-j] != INF){
vars->E_F5_rem[j] = MIN2(vars->E_F5_rem[j], vars->E_C_rem[my_iindx[1]-j] + additional_en);
}
if(vars->E_C[my_iindx[1]-j])
for(cnt1 = vars->k_min_values[my_iindx[1]-j]; cnt1 <= vars->k_max_values[my_iindx[1]-j]; cnt1++)
for(cnt2 = vars->l_min_values[my_iindx[1]-j][cnt1]; cnt2 <= vars->l_max_values[my_iindx[1]-j][cnt1]; cnt2+=2){
if(vars->E_C[my_iindx[1]-j][cnt1][cnt2/2] != INF){
vars->E_F5[j][cnt1][cnt2/2] = MIN2( vars->E_F5[j][cnt1][cnt2/2],
vars->E_C[my_iindx[1]-j][cnt1][cnt2/2]+ additional_en
);
updatePosteriorBoundaries(cnt1,
cnt2,
&min_k_real,
&max_k_real,
&min_l_real,
&max_l_real
);
#ifdef COUNT_STATES
vars->N_F5[j][cnt1][cnt2/2] += vars->N_C[my_iindx[1]-j][cnt1][cnt2/2];
#endif
}
}
/* j pairs with some other nucleotide -> see below */
for (i=j-TURN-1; i>1; i--) {
ij = my_iindx[i]-j;
type = ptype[ij];
if (type) {
if(dangles == 2)
additional_en = E_ExtLoop(type, S1[i-1], j < seq_length ? S1[j+1] : -1, P);
else
additional_en = E_ExtLoop(type, -1, -1, P);
if(vars->E_C_rem[ij] != INF){
for(cnt3 = vars->k_min_values_f[i-1]; cnt3 <= vars->k_max_values_f[i-1]; cnt3++)
for(cnt4 = vars->l_min_values_f[i-1][cnt3]; cnt4 <= vars->l_max_values_f[i-1][cnt3]; cnt4+=2){
if(vars->E_F5[i-1][cnt3][cnt4/2] != INF){
vars->E_F5_rem[j] = MIN2(vars->E_F5_rem[j],
vars->E_F5[i-1][cnt3][cnt4/2] + vars->E_C_rem[ij] + additional_en
);
}
}
if(vars->E_F5_rem[i-1] != INF){
vars->E_F5_rem[j] = MIN2(vars->E_F5_rem[j],
vars->E_F5_rem[i-1] + vars->E_C_rem[ij] + additional_en
);
}
}
if((vars->E_F5_rem[i-1] != INF) && (vars->E_C[ij])){
for(cnt1 = vars->k_min_values[ij]; cnt1 <= vars->k_max_values[ij]; cnt1++)
for(cnt2 = vars->l_min_values[ij][cnt1]; cnt2 <= vars->l_max_values[ij][cnt1]; cnt2+=2)
if(vars->E_C[ij][cnt1][cnt2/2]!= INF){
vars->E_F5_rem[j] = MIN2(vars->E_F5_rem[j],
vars->E_F5_rem[i-1] + vars->E_C[ij][cnt1][cnt2/2] + additional_en
);
}
}
if(!vars->E_C[ij]) continue;
unsigned int d1a = referenceBPs1[my_iindx[1]-j] - referenceBPs1[ij] - referenceBPs1[my_iindx[1]-i+1];
unsigned int d1b = referenceBPs2[my_iindx[1]-j] - referenceBPs2[ij] - referenceBPs2[my_iindx[1]-i+1];
for(cnt1 = vars->k_min_values[ij]; cnt1 <= vars->k_max_values[ij]; cnt1++)
for(cnt2 = vars->l_min_values[ij][cnt1]; cnt2 <= vars->l_max_values[ij][cnt1]; cnt2+=2)
for(cnt3 = vars->k_min_values_f[i-1]; cnt3 <= vars->k_max_values_f[i-1]; cnt3++)
for(cnt4 = vars->l_min_values_f[i-1][cnt3]; cnt4 <= vars->l_max_values_f[i-1][cnt3]; cnt4+=2){
if(vars->E_F5[i-1][cnt3][cnt4/2] != INF && vars->E_C[ij][cnt1][cnt2/2]!= INF){
if(((cnt1 + cnt3 + d1a) <= maxD1) && ((cnt2 + cnt4 + d1b) <= maxD2)){
vars->E_F5[j][cnt1+cnt3+d1a][(cnt2+cnt4+d1b)/2] = MIN2( vars->E_F5[j][cnt1+cnt3+d1a][(cnt2+cnt4+d1b)/2],
vars->E_F5[i-1][cnt3][cnt4/2] + vars->E_C[ij][cnt1][cnt2/2] + additional_en
);
updatePosteriorBoundaries(cnt1 + cnt3 + d1a,
cnt2 + cnt4 + d1b,
&min_k_real,
&max_k_real,
&min_l_real,
&max_l_real
);
#ifdef COUNT_STATES
vars->N_F5[j][cnt1+cnt3+d1a][(cnt2+cnt4+d1b)/2] += vars->N_F5[i-1][cnt3][cnt4/2] * vars->N_C[ij][cnt1][cnt2/2];
#endif
}
/* collect all cases where d1a+cnt1+cnt3 or d1b+cnt2+cnt4 exceeds maxD1, maxD2, respectively */
else{
vars->E_F5_rem[j] = MIN2(vars->E_F5_rem[j],
vars->E_F5[i-1][cnt3][cnt4/2] + vars->E_C[ij][cnt1][cnt2/2] + additional_en
);
}
}
}
}
}
/* resize and move memory portions of energy matrix E_F5 */
adjustArrayBoundaries(&vars->E_F5[j],
&vars->k_min_values_f[j],
&vars->k_max_values_f[j],
&vars->l_min_values_f[j],
&vars->l_max_values_f[j],
min_k_real,
max_k_real,
min_l_real,
max_l_real
);
} /* end of j-loop */
if(compute_2Dfold_F3){
/* prepare first entries in E_F3 */
for(cnt1 = seq_length; cnt1 >= seq_length-TURN-1; cnt1--){
vars->E_F3[cnt1] = (int **)space(sizeof(int *));
vars->E_F3[cnt1][0] = (int *) space(sizeof(int));
vars->E_F3[cnt1][0][0] = 0;
vars->k_min_values_f3[cnt1] = vars->k_max_values_f3[cnt1] = 0;
vars->l_min_values_f3[cnt1] = (int *)space(sizeof(int));
vars->l_max_values_f3[cnt1] = (int *)space(sizeof(int));
vars->l_min_values_f3[cnt1][0] = vars->l_max_values_f3[cnt1][0] = 0;
}
/* begin calculations */
for (j=seq_length-TURN-2; j >= 1; j--){
unsigned int da = referenceBPs1[my_iindx[j]-seq_length] - referenceBPs1[my_iindx[j+1]-seq_length];
unsigned int db = referenceBPs2[my_iindx[j]-seq_length] - referenceBPs2[my_iindx[j+1]-seq_length];
type=ptype[my_iindx[j]-seq_length];
additional_en = 0;
if(type){
if(dangles == 2)
additional_en += E_ExtLoop(type, j > 1 ? S1[j-1] : -1, -1, P);
else
additional_en += E_ExtLoop(type, -1, -1, P);
}
/* make min and max k guess for memory allocation */
int min_k_guess, max_k_guess, min_l_guess, max_l_guess;
int *min_l_real, *max_l_real, min_k_real, max_k_real;
min_k_guess = min_l_guess = 0;
max_k_guess = referenceBPs1[my_iindx[j]-seq_length] + mm1[my_iindx[j]-seq_length];
max_l_guess = referenceBPs2[my_iindx[j]-seq_length] + mm2[my_iindx[j]-seq_length];
prepareBoundaries(min_k_guess,
max_k_guess,
min_l_guess,
max_l_guess,
bpdist[my_iindx[j]-seq_length],
&vars->k_min_values_f3[j],
&vars->k_max_values_f3[j],
&vars->l_min_values_f3[j],
&vars->l_max_values_f3[j]
);
preparePosteriorBoundaries( vars->k_max_values_f3[j] - vars->k_min_values_f3[j] + 1,
vars->k_min_values_f3[j],
&min_k_real,
&max_k_real,
&min_l_real,
&max_l_real
);
prepareArray( &vars->E_F3[j],
vars->k_min_values_f3[j],
vars->k_max_values_f3[j],
vars->l_min_values_f3[j],
vars->l_max_values_f3[j]
);
/* begin the actual computation of 5' end energies */
/* j is unpaired ... */
for(cnt1 = vars->k_min_values_f3[j+1]; cnt1 <= vars->k_max_values_f3[j+1]; cnt1++){
for(cnt2 = vars->l_min_values_f3[j+1][cnt1]; cnt2 <= vars->l_max_values_f3[j+1][cnt1]; cnt2+=2){
vars->E_F3[j][cnt1+da][(cnt2+db)/2] = MIN2( vars->E_F3[j][cnt1+da][(cnt2+db)/2],
vars->E_F3[j+1][cnt1][cnt2/2]
);
updatePosteriorBoundaries(cnt1 + da,
cnt2 + db,
&min_k_real,
&max_k_real,
&min_l_real,
&max_l_real
);
}
}
/* j pairs with n */
if(vars->E_C[my_iindx[j]-seq_length])
for(cnt1 = vars->k_min_values[my_iindx[j]-seq_length]; cnt1 <= vars->k_max_values[my_iindx[j]-seq_length]; cnt1++)
for(cnt2 = vars->l_min_values[my_iindx[j]-seq_length][cnt1]; cnt2 <= vars->l_max_values[my_iindx[j]-seq_length][cnt1]; cnt2+=2){
if(vars->E_C[my_iindx[j]-seq_length][cnt1][cnt2/2] != INF){
vars->E_F3[j][cnt1][cnt2/2] = MIN2( vars->E_F3[j][cnt1][cnt2/2],
vars->E_C[my_iindx[j]-seq_length][cnt1][cnt2/2]+ additional_en
);
updatePosteriorBoundaries(cnt1,
cnt2,
&min_k_real,
&max_k_real,
&min_l_real,
&max_l_real
);
}
}
/* j pairs with some other nucleotide -> see below */
for (i=j-TURN-1; i>1; i--) {
ij = my_iindx[i]-j;
if(!vars->E_C[ij]) continue;
type = ptype[ij];
if (type) {
unsigned int d1a = referenceBPs1[my_iindx[1]-j] - referenceBPs1[ij] - referenceBPs1[my_iindx[1]-i+1];
unsigned int d1b = referenceBPs2[my_iindx[1]-j] - referenceBPs2[ij] - referenceBPs2[my_iindx[1]-i+1];
if(dangles == 2)
additional_en = E_ExtLoop(type, S1[i-1], j < seq_length ? S1[j+1] : -1, P);
else
additional_en = E_ExtLoop(type, -1, -1, P);
for(cnt1 = vars->k_min_values[ij]; cnt1 <= vars->k_max_values[ij]; cnt1++)
for(cnt2 = vars->l_min_values[ij][cnt1]; cnt2 <= vars->l_max_values[ij][cnt1]; cnt2+=2)
for(cnt3 = vars->k_min_values_f[i-1]; cnt3 <= vars->k_max_values_f[i-1]; cnt3++)
for(cnt4 = vars->l_min_values_f[i-1][cnt3]; cnt4 <= vars->l_max_values_f[i-1][cnt3]; cnt4+=2){
if(vars->E_F5[i-1][cnt3][cnt4/2] != INF && vars->E_C[ij][cnt1][cnt2/2]!= INF){
vars->E_F5[j][cnt1+cnt3+d1a][(cnt2+cnt4+d1b)/2] = MIN2( vars->E_F5[j][cnt1+cnt3+d1a][(cnt2+cnt4+d1b)/2],
vars->E_F5[i-1][cnt3][cnt4/2] + vars->E_C[ij][cnt1][cnt2/2] + additional_en
);
updatePosteriorBoundaries(cnt1 + cnt3 + d1a,
cnt2 + cnt4 + d1b,
&min_k_real,
&max_k_real,
&min_l_real,
&max_l_real
);
#ifdef COUNT_STATES
vars->N_F5[j][cnt1+cnt3+d1a][(cnt2+cnt4+d1b)/2] += vars->N_F5[i-1][cnt3][cnt4/2] * vars->N_C[ij][cnt1][cnt2/2];
#endif
}
}
}
}
/* resize and move memory portions of energy matrix E_F5 */
adjustArrayBoundaries(&vars->E_F5[j],
&vars->k_min_values_f[j],
&vars->k_max_values_f[j],
&vars->l_min_values_f[j],
&vars->l_max_values_f[j],
min_k_real,
max_k_real,
min_l_real,
max_l_real
);
} /* end of j-loop */
}
}
/*---------------------------------------------------------------------------*/
PUBLIC void update_TwoDfold_params(TwoDfold_vars *vars){
if(vars->P) free(vars->P);
vars->P = scale_parameters();
make_pair_matrix();
}
/*---------------------------------------------------------------------------*/
PRIVATE void make_ptypes(TwoDfold_vars *vars) {
int n,i,j,k,l;
n=vars->S[0];
for (k=1; k<n-TURN; k++)
for (l=1; l<=2; l++) {
int type,ntype=0,otype=0;
i=k; j = i+TURN+l; if (j>n) continue;
type = pair[vars->S[i]][vars->S[j]];
while ((i>=1)&&(j<=n)) {
if ((i>1)&&(j<n)) ntype = pair[vars->S[i-1]][vars->S[j+1]];
if (noLonelyPairs && (!otype) && (!ntype))
type = 0; /* i.j can only form isolated pairs */
vars->ptype[vars->my_iindx[i]-j] = (char) type;
otype = type;
type = ntype;
i--; j++;
}
}
}
PRIVATE void backtrack_f5(unsigned int j, int k, int l, char *structure, TwoDfold_vars *vars){
int *my_iindx, energy, type, dangles, cnt1, cnt2, cnt3, cnt4;
int **l_min_values, **l_max_values,**l_min_values_f, **l_max_values_f;
int *k_min_values, *k_max_values,*k_min_values_f, *k_max_values_f;
int ***E_C, ***E_F5;
int *E_C_rem, *E_F5_rem;
unsigned int i, ij, seq_length, maxD1, maxD2;
short *S1;
unsigned int *referenceBPs1, *referenceBPs2;
char *ptype;
paramT *P;
unsigned int da, db;
P = vars->P;
seq_length = vars->seq_length;
S1 = vars->S1;
ptype = vars->ptype;
my_iindx = vars->my_iindx;
referenceBPs1 = vars->referenceBPs1;
referenceBPs2 = vars->referenceBPs2;
dangles = vars->dangles;
E_F5 = vars->E_F5;
l_min_values_f = vars->l_min_values_f;
l_max_values_f = vars->l_max_values_f;
k_min_values_f = vars->k_min_values_f;
k_max_values_f = vars->k_max_values_f;
E_C = vars->E_C;
l_min_values = vars->l_min_values;
l_max_values = vars->l_max_values;
k_min_values = vars->k_min_values;
k_max_values = vars->k_max_values;
E_F5_rem = vars->E_F5_rem;
E_C_rem = vars->E_C_rem;
maxD1 = vars->maxD1;
maxD2 = vars->maxD2;
da = referenceBPs1[my_iindx[1]-j] - referenceBPs1[my_iindx[1]-j+1];
db = referenceBPs2[my_iindx[1]-j] - referenceBPs2[my_iindx[1]-j+1];
if(j<TURN+2) return;
/* F5[j] == F5[j-1] ? */
if(k == -1){
if(E_F5_rem[j]==INF)
return;
else if(E_F5_rem[j] == E_F5_rem[j-1]){
backtrack_f5(j-1,k,l,structure, vars);
return;
}
else if(E_F5[j-1]){
for(cnt1 = k_min_values_f[j-1];
cnt1 <= k_max_values_f[j-1];
cnt1++){
for(cnt2 = l_min_values_f[j-1][cnt1];
cnt2 <= l_max_values_f[j-1][cnt1];
cnt2+=2){
if(((cnt1 + da) > maxD1) || ((cnt2 + db) > maxD2)){
if(E_F5_rem[j] == E_F5[j-1][cnt1][cnt2/2]){
backtrack_f5(j-1, cnt1, cnt2, structure, vars);
return;
}
}
}
}
}
}
else if((k >= da) && (l >= db)){
if(E_F5[j-1]){
if((k - da >= k_min_values_f[j-1]) && (k - da <= k_max_values_f[j-1])){
if((l - db >= l_min_values_f[j-1][k-da]) && (l - db <= l_max_values_f[j-1][k-da]))
if(E_F5[j-1][k-da][(l-db)/2] == E_F5[j][k][l/2]){
backtrack_f5(j-1, k-da, l-db, structure, vars);
return;
}
}
}
}
type = ptype[my_iindx[1]-j];
if(type){
if(dangles == 2)
energy = E_ExtLoop(type, -1, j < seq_length ? S1[j+1] : -1, P);
else
energy = E_ExtLoop(type, -1, -1, P);
if(k == -1){
if(E_C_rem[my_iindx[1]-j] + energy == E_F5_rem[j]){
backtrack_c(1, j, -1, -1, structure, vars);
return;
}
}
else if(k >= k_min_values[my_iindx[1]-j] && (k <= k_max_values[my_iindx[1]-j])){
if((l >= l_min_values[my_iindx[1]-j][k]) && (l <= l_max_values[my_iindx[1]-j][k]))
if(E_C[my_iindx[1]-j][k][l/2] + energy == E_F5[j][k][l/2]){
backtrack_c(1, j, k, l, structure, vars);
return;
}
}
}
for (i=j-TURN-1; i>1; i--) {
ij = my_iindx[i]-j;
type = ptype[ij];
if (type) {
unsigned int d1a = referenceBPs1[my_iindx[1]-j] - referenceBPs1[ij] - referenceBPs1[my_iindx[1]-i+1];
unsigned int d1b = referenceBPs2[my_iindx[1]-j] - referenceBPs2[ij] - referenceBPs2[my_iindx[1]-i+1];
if(dangles == 2)
energy = E_ExtLoop(type, S1[i-1], j < seq_length ? S1[j+1] : -1, P);
else
energy = E_ExtLoop(type, -1, -1, P);
if(k == -1){
if(E_C_rem[ij] != INF){
for(cnt1 = k_min_values_f[i-1];
cnt1 <= k_max_values_f[i-1];
cnt1++){
for(cnt2 = l_min_values_f[i-1][cnt1];
cnt2 <= l_max_values_f[i-1][cnt1];
cnt2+=2){
if(E_F5_rem[j] == (E_F5[i-1][cnt1][cnt2/2] + E_C_rem[ij] + energy)){
backtrack_f5(i-1, cnt1, cnt2, structure, vars);
backtrack_c(i,j,-1,-1,structure, vars);
return;
}
}
}
if(E_F5_rem[j] == (E_F5_rem[i-1] + E_C_rem[ij] + energy)){
backtrack_f5(i-1, -1, -1, structure, vars);
backtrack_c(i,j,-1,-1,structure,vars);
return;
}
}
if(E_F5_rem[i-1] != INF){
for(cnt1 = k_min_values[ij];
cnt1 <= k_max_values[ij];
cnt1++){
for(cnt2 = l_min_values[ij][cnt1];
cnt2 <= l_max_values[ij][cnt1];
cnt2 += 2){
if(E_F5_rem[j] == (E_F5_rem[i-1] + E_C[ij][cnt1][cnt2/2] + energy)){
backtrack_f5(i-1,-1,-1,structure,vars);
backtrack_c(i,j,cnt1,cnt2,structure,vars);
return;
}
}
}
}
for(cnt1 = k_min_values_f[i-1];
cnt1 <= k_max_values_f[i-1];
cnt1++)
for(cnt2 = l_min_values_f[i-1][cnt1];
cnt2 <= l_max_values_f[i-1][cnt1];
cnt2 += 2)
for(cnt3 = k_min_values[ij];
cnt3 <= k_max_values[ij];
cnt3++)
for(cnt4 = l_min_values[ij][cnt3];
cnt4 <= l_max_values[ij][cnt3];
cnt4 += 2){
if(((cnt1 + cnt3 + d1a)>maxD1) || ((cnt2+cnt4+d1b)>maxD2)){
if(E_F5_rem[j] == (E_F5[i-1][cnt1][cnt2/2] + E_C[ij][cnt3][cnt4/2] + energy)){
backtrack_f5(i-1,cnt1,cnt2,structure,vars);
backtrack_c(i,j,cnt3,cnt4,structure,vars);
return;
}
}
}
}
else if((k >= d1a) && (l >= d1b)){
int k_f_max = MIN2(k-d1a, k_max_values_f[i-1]);
for(cnt1 = k_min_values_f[i-1]; cnt1 <= k_f_max; cnt1++){
int l_f_max = MIN2(l - d1b, l_max_values_f[i-1][cnt1]);
for(cnt2 = l_min_values_f[i-1][cnt1]; cnt2 <= l_f_max; cnt2+=2){
int k_c = k - d1a - cnt1;
if((k_c >= k_min_values[ij]) && (k_c <= k_max_values[ij])){
int l_c = l - d1b - cnt2;
if((l_c >= l_min_values[ij][k_c]) && (l_c <= l_max_values[ij][k_c])){
if(E_F5[j][k][l/2] == (E_F5[i-1][cnt1][cnt2/2] + E_C[ij][k_c][l_c/2] + energy)){
backtrack_f5(i-1, cnt1, cnt2, structure, vars);
backtrack_c(i, j, k_c, l_c, structure, vars);
return;
}
}
}
}
}
}
}
}
nrerror("backtracking failed in f5");
}
PRIVATE void backtrack_c(unsigned int i, unsigned int j, int k, int l, char *structure, TwoDfold_vars *vars){
unsigned int p, q, pq, ij, maxp, maxD1, maxD2;
int *my_iindx, type, type_2, energy, no_close, dangles, base_d1, base_d2, d1, d2, cnt1, cnt2, cnt3, cnt4;
int **l_min_values, **l_max_values,**l_min_values_m, **l_max_values_m,**l_min_values_m1, **l_max_values_m1;
int *k_min_values, *k_max_values,*k_min_values_m, *k_max_values_m,*k_min_values_m1, *k_max_values_m1;
int ***E_C, ***E_M, ***E_M1, *E_C_rem, *E_M_rem, *E_M1_rem;
short *S1;
unsigned int *referenceBPs1, *referenceBPs2;
char *ptype, *sequence;
paramT *P;
P = vars->P;
sequence = vars->sequence;
S1 = vars->S1;
ptype = vars->ptype;
my_iindx = vars->my_iindx;
referenceBPs1 = vars->referenceBPs1;
referenceBPs2 = vars->referenceBPs2;
dangles = vars->dangles;
E_C = vars->E_C;
l_min_values = vars->l_min_values;
l_max_values = vars->l_max_values;
k_min_values = vars->k_min_values;
k_max_values = vars->k_max_values;
E_M = vars->E_M;
l_min_values_m = vars->l_min_values_m;
l_max_values_m = vars->l_max_values_m;
k_min_values_m = vars->k_min_values_m;
k_max_values_m = vars->k_max_values_m;
E_M1 = vars->E_M1;
l_min_values_m1 = vars->l_min_values_m1;
l_max_values_m1 = vars->l_max_values_m1;
k_min_values_m1 = vars->k_min_values_m1;
k_max_values_m1 = vars->k_max_values_m1;
E_C_rem = vars->E_C_rem;
E_M_rem = vars->E_M_rem;
E_M1_rem = vars->E_M1_rem;
maxD1 = vars->maxD1;
maxD2 = vars->maxD2;
ij = my_iindx[i]-j;
int e = (k==-1) ? E_C_rem[ij] : E_C[ij][k][l/2];
type = ptype[ij];
no_close = (((type==3)||(type==4))&&no_closingGU);
structure[i-1] = '(';
structure[j-1] = ')';
base_d1 = ((unsigned int)vars->reference_pt1[i] != j) ? 1 : -1;
base_d2 = ((unsigned int)vars->reference_pt2[i] != j) ? 1 : -1;
base_d1 += referenceBPs1[ij];
base_d2 += referenceBPs2[ij];
if(k == -1){
if(((unsigned int)base_d1 > maxD1) || ((unsigned int)base_d2 > maxD2)){
if(e == E_Hairpin(j-i-1, type, S1[i+1], S1[j-1], sequence+i-1, P)) return;
}
}
else{
if((unsigned int)base_d1 == k)
if((unsigned int)base_d2 == l)
if(E_Hairpin(j-i-1, type, S1[i+1], S1[j-1], sequence+i-1, P) == e) return;
}
maxp = MIN2(j-2-TURN,i+MAXLOOP+1);
for(p = i+1; p <= maxp; p++){
unsigned int minq, ln_pre;
minq = p + TURN + 1;
ln_pre = j - i - 1;
if(ln_pre > minq + MAXLOOP) minq = ln_pre - MAXLOOP - 1;
for (q = minq; q < j; q++) {
pq = my_iindx[p]-q;
type_2 = ptype[pq];
if (type_2==0) continue;
type_2 = rtype[type_2];
/* d2 = dbp(S_{i,j}, S_{p.q} + {i,j}) */
d1 = base_d1 - referenceBPs1[pq];
d2 = base_d2 - referenceBPs2[pq];
energy = E_IntLoop(p-i-1, j-q-1, type, type_2, S1[i+1], S1[j-1], S1[p-1], S1[q+1], P);
if(k == -1){
if(E_C_rem[pq] != INF)
if(e == (E_C_rem[pq] + energy)){
backtrack_c(p,q,-1,-1,structure,vars);
return;
}
if(E_C[pq])
for(cnt1 = k_min_values[pq];
cnt1 <= k_max_values[pq];
cnt1++)
for(cnt2 = l_min_values[pq][cnt1];
cnt2 <= l_max_values[pq][cnt1];
cnt2 += 2){
if(((cnt1 + d1) > maxD1) || ((cnt2 + d2) > maxD2)){
if(e == (E_C[pq][cnt1][cnt2/2] + energy)){
backtrack_c(p,q,cnt1,cnt2,structure,vars);
return;
}
}
}
}
else{
if(!E_C[pq]) continue;
if(d1 <= k && d2 <= l){
if((k-d1 >= k_min_values[pq]) && (k-d1) <= k_max_values[pq])
if((l - d2 >= l_min_values[pq][k-d1]) && (l-d2 <= l_max_values[pq][k-d1]))
if(E_C[pq][k-d1][(l-d2)/2] + energy == e){
backtrack_c(p, q, k-d1, l-d2, structure, vars);
return;
}
}
}
} /* end q-loop */
} /* end p-loop */
/* multi-loop decomposition ------------------------*/
if(!no_close){
unsigned int u;
int tt;
if(k==-1){
for(u=i+TURN+2; u<j-TURN-2;u++){
int i1u, u1j1;
i1u = my_iindx[i+1]-u;
u1j1 = my_iindx[u+1]-j+1;
tt = rtype[type];
energy = P->MLclosing;
if(dangles == 2)
energy += E_MLstem(tt, S1[j-1], S1[i+1], P);
else
energy += E_MLstem(tt, -1, -1, P);
if(E_M_rem[i1u] != INF){
if(E_M1[u1j1])
for(cnt1 = k_min_values_m1[u1j1];
cnt1 <= k_max_values_m1[u1j1];
cnt1++)
for(cnt2 = l_min_values_m1[u1j1][cnt1];
cnt2 <= l_max_values_m1[u1j1][cnt1];
cnt2 += 2){
if(e == (E_M_rem[i1u] + E_M1[u1j1][cnt1][cnt2/2] + energy)){
backtrack_m(i+1,u,-1,-1,structure,vars);
backtrack_m1(u+1,j-1,cnt1,cnt2,structure,vars);
return;
}
}
if(E_M1_rem[u1j1] != INF){
if(e == (E_M_rem[i1u] + E_M1_rem[u1j1] + energy)){
backtrack_m(i+1, u, -1, -1, structure, vars);
backtrack_m1(u+1, j-1, -1, -1, structure, vars);
return;
}
}
}
if(E_M1_rem[u1j1] != INF){
if(E_M[i1u])
for(cnt1 = k_min_values_m[i1u];
cnt1 <= k_max_values_m[i1u];
cnt1++)
for(cnt2 = l_min_values_m[i1u][cnt1];
cnt2 <= l_max_values_m[i1u][cnt1];
cnt2 += 2)
if(e == (E_M[i1u][cnt1][cnt2/2] + E_M1_rem[u1j1] + energy)){
backtrack_m(i+1,u,cnt1,cnt2,structure,vars);
backtrack_m1(u+1,j-1,-1,-1,structure,vars);
return;
}
}
/* now all cases where we exceed the maxD1/D2 scope by combination of E_M and E_M1 */
if(!E_M[i1u]) continue;
if(!E_M1[u1j1]) continue;
/* get distance to reference if closing this multiloop
* dist3 = dbp(S_{i,j}, {i,j} + S_{i+1.u} + S_{u+1,j-1})
*/
d1 = base_d1 - referenceBPs1[i1u] - referenceBPs1[u1j1];
d2 = base_d2 - referenceBPs2[i1u] - referenceBPs2[u1j1];
for(cnt1 = vars->k_min_values_m[i1u];
cnt1 <= vars->k_max_values_m[i1u];
cnt1++)
for(cnt2 = vars->l_min_values_m[i1u][cnt1];
cnt2 <= vars->l_max_values_m[i1u][cnt1];
cnt2+=2)
for(cnt3 = vars->k_min_values_m1[u1j1];
cnt3 <= vars->k_max_values_m1[u1j1];
cnt3++)
for(cnt4 = vars->l_min_values_m1[u1j1][cnt3];
cnt4 <= vars->l_max_values_m1[u1j1][cnt3];
cnt4+=2){
if(((cnt1 + cnt3 + d1) > maxD1) || ((cnt2 + cnt4 + d2) > maxD2)){
if(e == (E_M[i1u][cnt1][cnt2/2] + E_M1[u1j1][cnt3][cnt4/2] + energy)){
backtrack_m(i+1,u,cnt1,cnt2,structure,vars);
backtrack_m1(u+1,j-1,cnt3,cnt4,structure,vars);
return;
}
}
}
}
}
else{
for(u=i+TURN+2; u<j-TURN-2;u++){
int i1u, u1j1;
i1u = my_iindx[i+1]-u;
u1j1 = my_iindx[u+1]-j+1;
if(!E_M[i1u]) continue;
if(!E_M1[u1j1]) continue;
/* get distance to reference if closing this multiloop
* dist3 = dbp(S_{i,j}, {i,j} + S_{i+1.u} + S_{u+1,j-1})
*/
d1 = base_d1 - referenceBPs1[i1u] - referenceBPs1[u1j1];
d2 = base_d2 - referenceBPs2[i1u] - referenceBPs2[u1j1];
tt = rtype[type];
energy = P->MLclosing;
if(dangles == 2)
energy += E_MLstem(tt, S1[j-1], S1[i+1], P);
else
energy += E_MLstem(tt, -1, -1, P);
if((d1 <= k) && (d2 <= l))
for(cnt1 = k_min_values_m[i1u];
cnt1 <= MIN2(k-d1, k_max_values_m[i1u]);
cnt1++)
for(cnt2 = l_min_values_m[i1u][cnt1];
cnt2 <= MIN2(l-d2, l_max_values_m[i1u][cnt1]);
cnt2+=2)
if( ((k-d1-cnt1) >= k_min_values_m1[u1j1])
&& ((k-d1-cnt1) <= k_max_values_m1[u1j1]))
if( ((l-d2-cnt2) >= l_min_values_m1[u1j1][k-d1-cnt1])
&& ((l-d2-cnt2) <= l_max_values_m1[u1j1][k-d1-cnt1]))
if(e == (energy + E_M[i1u][cnt1][cnt2/2] + E_M1[u1j1][k-d1-cnt1][(l-d2-cnt2)/2])){
backtrack_m(i+1, u, cnt1, cnt2, structure, vars);
backtrack_m1(u+1, j-1, k-d1-cnt1, l-d2-cnt2, structure, vars);
return;
}
}
}
}
nrerror("backtracking failed in c");
}
PRIVATE void backtrack_m(unsigned int i, unsigned int j, int k, int l, char *structure, TwoDfold_vars *vars){
unsigned int u, ij, seq_length, base_d1, base_d2, d1, d2, maxD1, maxD2;
int *my_iindx, type, energy, dangles,circ, cnt1, cnt2, cnt3, cnt4;
int **l_min_values, **l_max_values,**l_min_values_m, **l_max_values_m;
int *k_min_values, *k_max_values,*k_min_values_m, *k_max_values_m;
int ***E_C, ***E_M, *E_C_rem, *E_M_rem;
short *S1;
unsigned int *referenceBPs1, *referenceBPs2;
char *ptype;
paramT *P;
P = vars->P;
seq_length = vars->seq_length;
S1 = vars->S1;
circ = vars->circ;
ptype = vars->ptype;
my_iindx = vars->my_iindx;
referenceBPs1 = vars->referenceBPs1;
referenceBPs2 = vars->referenceBPs2;
dangles = vars->dangles;
E_C = vars->E_C;
l_min_values = vars->l_min_values;
l_max_values = vars->l_max_values;
k_min_values = vars->k_min_values;
k_max_values = vars->k_max_values;
E_M = vars->E_M;
l_min_values_m = vars->l_min_values_m;
l_max_values_m = vars->l_max_values_m;
k_min_values_m = vars->k_min_values_m;
k_max_values_m = vars->k_max_values_m;
E_C_rem = vars->E_C_rem;
E_M_rem = vars->E_M_rem;
maxD1 = vars->maxD1;
maxD2 = vars->maxD2;
ij = my_iindx[i]-j;
int e = (k == -1) ? E_M_rem[ij] : E_M[ij][k][l/2];
base_d1 = referenceBPs1[ij];
base_d2 = referenceBPs2[ij];
if(k == -1){
/* new_fML = ML(i+1,j)+c */
d1 = base_d1 - referenceBPs1[my_iindx[i+1]-j];
d2 = base_d2 - referenceBPs2[my_iindx[i+1]-j];
if(E_M_rem[my_iindx[i+1]-j] != INF){
if(e == (E_M_rem[my_iindx[i+1]-j] + P->MLbase)){
backtrack_m(i+1,j,-1,-1,structure,vars);
return;
}
}
if(E_M[my_iindx[i+1]-j])
for(cnt1 = k_min_values_m[my_iindx[i+1]-j];
cnt1 <= k_max_values_m[my_iindx[i+1]-j];
cnt1++)
for(cnt2 = l_min_values_m[my_iindx[i+1]-j][cnt1];
cnt2 <= l_max_values_m[my_iindx[i+1]-j][cnt1];
cnt2 += 2)
if(((cnt1 + d1) > maxD1) || ((cnt2 + d2) > maxD2)){
if(e == (E_M[my_iindx[i+1]-j][cnt1][cnt2/2] + P->MLbase)){
backtrack_m(i+1,j,cnt1,cnt2,structure,vars);
return;
}
}
/* new_fML = min(ML(i,j-1) + c, new_fML) */
d1 = base_d1 - referenceBPs1[ij+1];
d2 = base_d2 - referenceBPs2[ij+1];
if(E_M_rem[ij+1] != INF){
if(e == (E_M_rem[ij+1] + P->MLbase)){
backtrack_m(i,j-1,-1,-1,structure,vars);
return;
}
}
if(E_M[ij+1])
for(cnt1 = k_min_values_m[ij+1];
cnt1 <= k_max_values_m[ij+1];
cnt1++)
for(cnt2 = l_min_values_m[ij+1][cnt1];
cnt2 <= l_max_values_m[ij+1][cnt1];
cnt2 += 2)
if(((cnt1 + d1) > maxD1) || ((cnt2 + d2) > maxD2)){
if(e == (E_M[ij+1][cnt1][cnt2/2] + P->MLbase)){
backtrack_m(i,j-1,cnt1,cnt2,structure,vars);
return;
}
}
/* new_fML = min(new_fML, C(i,j)+b) */
if(E_C_rem[ij] != INF){
type = ptype[ij];
if(dangles == 2)
energy = E_MLstem(type, ((i > 1) || circ) ? S1[i-1] : -1, ((j < seq_length) || circ) ? S1[j+1] : -1, P);
else
energy = E_MLstem(type, -1, -1, P);
if(e == (E_C_rem[ij] + energy)){
backtrack_c(i,j,-1,-1,structure,vars);
return;
}
}
/* modular decomposition -------------------------------*/
for(u = i+1+TURN; u <= j-2-TURN; u++){
int iu, uj;
iu = my_iindx[i]-u;
uj = my_iindx[u+1]-j;
type = ptype[uj];
d1 = base_d1 - referenceBPs1[iu] - referenceBPs1[uj];
d2 = base_d2 - referenceBPs2[iu] - referenceBPs2[uj];
if(dangles == 2)
energy = E_MLstem(type, S1[u], (j < seq_length) || circ ? S1[j+1] : -1, P);
else
energy = E_MLstem(type, -1, -1, P);
if(E_M_rem[iu] != INF){
if(E_C[uj])
for(cnt1 = k_min_values[uj];
cnt1 <= k_max_values[uj];
cnt1++)
for(cnt2 = l_min_values[uj][cnt1];
cnt2 <= l_max_values[uj][cnt1];
cnt2 += 2)
if(e == (E_M_rem[iu] + E_C[uj][cnt1][cnt2/2] + energy)){
backtrack_m(i,u,-1,-1,structure,vars);
backtrack_c(u+1,j,cnt1,cnt2,structure, vars);
return;
}
if(E_C_rem[uj] != INF){
if(e == (E_M_rem[iu] + E_C_rem[uj] + energy)){
backtrack_m(i,u,-1,-1,structure,vars);
backtrack_c(u+1,j,-1,-1,structure,vars);
return;
}
}
}
if(E_C_rem[uj] != INF){
if(E_M[iu])
for(cnt1 = k_min_values_m[iu];
cnt1 <= k_max_values_m[iu];
cnt1++)
for(cnt2 = l_min_values_m[iu][cnt1];
cnt2 <= l_max_values_m[iu][cnt1];
cnt2 += 2)
if(e == (E_M[iu][cnt1][cnt2/2] + E_C_rem[uj] + energy)){
backtrack_m(i,u,cnt1,cnt2,structure,vars);
backtrack_c(u+1,j,-1,-1,structure,vars);
return;
}
}
if(!E_M[iu]) continue;
if(!E_C[uj]) continue;
for(cnt1 = k_min_values_m[iu];
cnt1 <= k_max_values_m[iu];
cnt1++)
for(cnt2 = l_min_values_m[iu][cnt1];
cnt2 <= l_max_values_m[iu][cnt1];
cnt2 += 2)
for(cnt3 = k_min_values[uj];
cnt3 <= k_max_values[uj];
cnt3++){
for(cnt4 = l_min_values[uj][cnt3];
cnt4 <= l_max_values[uj][cnt3];
cnt4 += 2)
if(((cnt1 + cnt3 + d1) > maxD1) || ((cnt2 + cnt4 + d2) > maxD2))
if(e == (E_M[iu][cnt1][cnt2/2] + E_C[uj][cnt3][cnt4/2] + energy)){
backtrack_m(i, u, cnt1, cnt2, structure, vars);
backtrack_c(u+1, j, cnt3, cnt4, structure, vars);
return;
}
}
}
} /* end if (k == -1) */
else{
d1 = base_d1 - referenceBPs1[my_iindx[i+1]-j];
d2 = base_d2 - referenceBPs2[my_iindx[i+1]-j];
/* new_fML = ML(i+1,j)+c */
if(d1 <= k && d2 <= l)
if((k-d1 >= k_min_values_m[my_iindx[i+1]-j]) && (k-d1 <= k_max_values_m[my_iindx[i+1]-j]))
if((l-d2 >= l_min_values_m[my_iindx[i+1]-j][k-d1]) && (l-d2 <= l_max_values_m[my_iindx[i+1]-j][k-d1])){
if(E_M[my_iindx[i+1]-j][k-d1][(l-d2)/2] + P->MLbase == e){
backtrack_m(i+1, j, k-d1, l-d2, structure, vars);
return;
}
}
d1 = base_d1 - referenceBPs1[ij+1];
d2 = base_d2 - referenceBPs2[ij+1];
/* new_fML = min(ML(i,j-1) + c, new_fML) */
if(E_M[ij+1])
if(d1 <= k && d2 <= l)
if((k-d1 >= k_min_values_m[ij+1]) && (k-d1 <= k_max_values_m[ij+1]))
if((l-d2 >= l_min_values_m[ij+1][k-d1]) && (l-d2 <= l_max_values_m[ij+1][k-d1]))
if(E_M[ij+1][k-d1][(l-d2)/2] + P->MLbase == e){
backtrack_m(i, j-1, k-d1, l-d2, structure, vars);
return;
}
/* new_fML = min(new_fML, C(i,j)+b) */
if(E_C[ij]){
type = ptype[ij];
if(dangles == 2)
energy = E_MLstem(type, ((i > 1) || circ) ? S1[i-1] : -1, ((j < seq_length) || circ) ? S1[j+1] : -1, P);
else
energy = E_MLstem(type, -1, -1, P);
if((k >= k_min_values[ij]) && (k <= k_max_values[ij]))
if((l >= l_min_values[ij][k]) && (l <= l_max_values[ij][k])){
if(E_C[ij][k][l/2] + energy == e){
backtrack_c(i, j, k, l, structure, vars);
return;
}
}
}
/* modular decomposition -------------------------------*/
for(u = i+1+TURN; u <= j-2-TURN; u++){
if(!E_M[my_iindx[i]-u]) continue;
if(!E_C[my_iindx[u+1]-j]) continue;
type = ptype[my_iindx[u+1]-j];
d1 = base_d1 - referenceBPs1[my_iindx[i]-u] - referenceBPs1[my_iindx[u+1]-j];
d2 = base_d2 - referenceBPs2[my_iindx[i]-u] - referenceBPs2[my_iindx[u+1]-j];
if(dangles == 2)
energy = E_MLstem(type, S1[u], ((j < seq_length) || circ) ? S1[j+1] : -1, P);
else
energy = E_MLstem(type, -1, -1, P);
if(d1 <= k && d2 <= l)
for(cnt1 = k_min_values_m[my_iindx[i]-u]; cnt1 <= MIN2(k-d1, k_max_values_m[my_iindx[i]-u]); cnt1++)
for(cnt2 = l_min_values_m[my_iindx[i]-u][cnt1]; cnt2 <= MIN2(l-d2, l_max_values_m[my_iindx[i]-u][cnt1]); cnt2+=2)
if((k-d1-cnt1 >= k_min_values[my_iindx[u+1]-j]) && (k-d1-cnt1 <= k_max_values[my_iindx[u+1]-j]))
if((l-d2-cnt2 >= l_min_values[my_iindx[u+1]-j][k-d1-cnt1]) && (l-d2-cnt2 <= l_max_values[my_iindx[u+1]-j][k-d1-cnt1]))
if(E_M[my_iindx[i]-u][cnt1][cnt2/2] + E_C[my_iindx[u+1]-j][k-d1-cnt1][(l-d2-cnt2)/2] + energy == e){
backtrack_m(i, u, cnt1, cnt2, structure, vars);
backtrack_c(u+1, j, k-d1-cnt1, l-d2-cnt2, structure, vars);
return;
}
}
}
nrerror("backtracking failed in fML\n");
}
PRIVATE void backtrack_m1(unsigned int i, unsigned int j, int k, int l, char *structure, TwoDfold_vars *vars){
unsigned int ij, seq_length, d1, d2, *referenceBPs1, *referenceBPs2, maxD1, maxD2;
int *my_iindx, **l_min_values, **l_max_values,**l_min_values_m1, **l_max_values_m1;
int *k_min_values, *k_max_values,*k_min_values_m1, *k_max_values_m1, cnt1, cnt2;
int ***E_C, ***E_M1, *E_C_rem, *E_M1_rem, type, dangles, circ, energy, e_m1;
short *S1;
char *ptype;
paramT *P;
P = vars->P;
seq_length = vars->seq_length;
S1 = vars->S1;
ptype = vars->ptype;
circ = vars->circ;
my_iindx = vars->my_iindx;
referenceBPs1 = vars->referenceBPs1;
referenceBPs2 = vars->referenceBPs2;
dangles = vars->dangles;
E_C = vars->E_C;
l_min_values = vars->l_min_values;
l_max_values = vars->l_max_values;
k_min_values = vars->k_min_values;
k_max_values = vars->k_max_values;
E_M1 = vars->E_M1;
l_min_values_m1 = vars->l_min_values_m1;
l_max_values_m1 = vars->l_max_values_m1;
k_min_values_m1 = vars->k_min_values_m1;
k_max_values_m1 = vars->k_max_values_m1;
E_C_rem = vars->E_C_rem;
E_M1_rem = vars->E_M1_rem;
maxD1 = vars->maxD1;
maxD2 = vars->maxD2;
ij = my_iindx[i]-j;
e_m1 = (k == -1) ? E_M1_rem[ij] : E_M1[ij][k][l/2];
type = ptype[ij];
d1 = referenceBPs1[ij] - referenceBPs1[ij+1];
d2 = referenceBPs2[ij] - referenceBPs2[ij+1];
if(dangles == 2)
energy = E_MLstem(type, (i > 1) || circ ? S1[i-1] : -1, (j < seq_length) || circ ? S1[j+1] : -1, P);
else
energy = E_MLstem(type, -1, -1, P);
if(k == -1){
if(E_C_rem[ij] != INF){
if(e_m1 == (E_C_rem[ij] + energy)){
backtrack_c(i,j,-1,-1,structure,vars);
return;
}
}
if(E_M1_rem[ij+1] != INF){
if(e_m1 == (E_M1_rem[ij+1] + P->MLbase)){
backtrack_m1(i,j-1,-1,-1,structure,vars);
return;
}
}
for(cnt1 = k_min_values_m1[ij+1];
cnt1 <= k_max_values_m1[ij+1];
cnt1++)
for(cnt2 = l_min_values_m1[ij+1][cnt1];
cnt2 <= l_max_values_m1[ij+1][cnt1];
cnt2 += 2)
if(((cnt1 + d1) > maxD1) || ((cnt2 + d2) > maxD2)){
if(e_m1 == (E_M1[ij+1][cnt1][cnt2/2] + P->MLbase)){
backtrack_m1(i,j-1,cnt1,cnt2,structure,vars);
return;
}
}
}
else{
if(E_C[ij])
if((k >= k_min_values[ij]) && (k <= k_max_values[ij]))
if((l >= l_min_values[ij][k]) && (l <= l_max_values[ij][k]))
if(E_C[ij][k][l/2] + energy == e_m1){
backtrack_c(i, j, k, l, structure, vars);
return;
}
if(d1 <= k && d2 <= l)
if((k-d1 >= k_min_values_m1[ij+1]) && (k-d1 <= k_max_values_m1[ij+1]))
if((l-d2 >= l_min_values_m1[ij+1][k-d1]) && (l-d2 <= l_max_values_m1[ij+1][k-d1]))
if(E_M1[ij+1][k-d1][(l-d2)/2] + P->MLbase == e_m1){
backtrack_m1(i, j-1, k-d1, l-d2, structure, vars);
return;
}
}
nrerror("backtack failed in m1\n");
}
PRIVATE void backtrack_fc(int k, int l, char *structure, TwoDfold_vars *vars){
unsigned int d, i, j, seq_length, base_d1, base_d2, d1, d2, maxD1, maxD2;
int *my_iindx, energy, cnt1, cnt2, cnt3, cnt4;
short *S1;
unsigned int *referenceBPs1, *referenceBPs2;
char *sequence, *ptype;
int **E_Fc, **E_FcH, **E_FcI, **E_FcM, ***E_C, ***E_M, ***E_M2;
int *E_C_rem, *E_M_rem, *E_M2_rem, E_Fc_rem, E_FcH_rem, E_FcI_rem, E_FcM_rem;
int **l_min_values, **l_max_values, *k_min_values, *k_max_values;
int **l_min_values_m, **l_max_values_m, *k_min_values_m, *k_max_values_m;
int **l_min_values_m2, **l_max_values_m2, *k_min_values_m2, *k_max_values_m2;
int *l_min_values_fcH, *l_max_values_fcH, k_min_values_fcH, k_max_values_fcH;
int *l_min_values_fcI, *l_max_values_fcI, k_min_values_fcI, k_max_values_fcI;
int *l_min_values_fcM, *l_max_values_fcM, k_min_values_fcM, k_max_values_fcM;
paramT *P;
P = vars->P;
sequence = vars->sequence;
seq_length = vars->seq_length;
S1 = vars->S1;
ptype = vars->ptype;
my_iindx = vars->my_iindx;
referenceBPs1 = vars->referenceBPs1;
referenceBPs2 = vars->referenceBPs2;
base_d1 = referenceBPs1[my_iindx[1]-seq_length];
base_d2 = referenceBPs2[my_iindx[1]-seq_length];
E_C = vars->E_C;
l_min_values = vars->l_min_values;
l_max_values = vars->l_max_values;
k_min_values = vars->k_min_values;
k_max_values = vars->k_max_values;
E_M = vars->E_M;
l_min_values_m = vars->l_min_values_m;
l_max_values_m = vars->l_max_values_m;
k_min_values_m = vars->k_min_values_m;
k_max_values_m = vars->k_max_values_m;
E_M2 = vars->E_M2;
l_min_values_m2 = vars->l_min_values_m2;
l_max_values_m2 = vars->l_max_values_m2;
k_min_values_m2 = vars->k_min_values_m2;
k_max_values_m2 = vars->k_max_values_m2;
E_Fc = vars->E_Fc;
E_FcI = vars->E_FcI;
l_min_values_fcI = vars->l_min_values_fcI;
l_max_values_fcI = vars->l_max_values_fcI;
k_min_values_fcI = vars->k_min_values_fcI;
k_max_values_fcI = vars->k_max_values_fcI;
E_FcH = vars->E_FcH;
l_min_values_fcH = vars->l_min_values_fcH;
l_max_values_fcH = vars->l_max_values_fcH;
k_min_values_fcH = vars->k_min_values_fcH;
k_max_values_fcH = vars->k_max_values_fcH;
E_FcM = vars->E_FcM;
l_min_values_fcM = vars->l_min_values_fcM;
l_max_values_fcM = vars->l_max_values_fcM;
k_min_values_fcM = vars->k_min_values_fcM;
k_max_values_fcM = vars->k_max_values_fcM;
E_C_rem = vars->E_C_rem;
E_M_rem = vars->E_M_rem;
E_M2_rem = vars->E_M2_rem;
E_Fc_rem = vars->E_Fc_rem;
E_FcH_rem = vars->E_FcH_rem;
E_FcI_rem = vars->E_FcI_rem;
E_FcM_rem = vars->E_FcM_rem;
maxD1 = vars->maxD1;
maxD2 = vars->maxD2;
if(k==-1){
/* check if mfe might be open chain */
if(E_Fc_rem == 0)
if((referenceBPs1[my_iindx[1]-seq_length] > maxD1) || (referenceBPs2[my_iindx[1]-seq_length] > maxD2))
return;
/* check for hairpin configurations */
if(E_Fc_rem == E_FcH_rem){
for (d = TURN+2; d <= seq_length; d++) /* i,j in [1..length] */
for (j = d; j <= seq_length; j++) {
unsigned int u, ij;
int type, no_close;
char loopseq[10];
i = j-d+1;
ij = my_iindx[i]-j;
u = seq_length-j + i-1;
if (u<TURN) continue;
type = ptype[ij];
no_close = (((type==3)||(type==4))&&no_closingGU);
type=rtype[type];
if (!type) continue;
if(no_close) continue;
d1 = base_d1 - referenceBPs1[ij];
d2 = base_d2 - referenceBPs2[ij];
if (u<7) {
strcpy(loopseq , sequence+j-1);
strncat(loopseq, sequence, i);
}
energy = E_Hairpin(u, type, S1[j+1], S1[i-1], loopseq, P);
if(E_C_rem[ij] != INF){
if(E_Fc_rem == (E_C_rem[ij] + energy)){
backtrack_c(i,j,-1,-1,structure,vars);
return;
}
}
if(E_C[ij])
for(cnt1 = k_min_values[ij];
cnt1 <= k_max_values[ij];
cnt1++)
for(cnt2 = l_min_values[ij][cnt1];
cnt2 <= l_max_values[ij][cnt1];
cnt2 += 2)
if(((cnt1 + d1) > maxD1) || ((cnt2 + d2) > maxD2))
if(E_Fc_rem == (E_C[ij][cnt1][cnt2/2] + energy)){
backtrack_c(i,j,cnt1,cnt2,structure,vars);
return;
}
}
}
/* check for interior loop configurations */
if(E_Fc_rem == E_FcI_rem){
for (d = TURN+2; d <= seq_length; d++) /* i,j in [1..length] */
for (j = d; j <= seq_length; j++) {
unsigned int u, ij, p, q, pq;
int type, type_2;
i = j-d+1;
ij = my_iindx[i]-j;
u = seq_length-j + i-1;
if (u<TURN) continue;
type = rtype[(unsigned int)ptype[ij]];
if (!type) continue;
for(p = j+1; p < seq_length ; p++){
unsigned int u1, qmin, ln_pre;
u1 = p-j-1;
if (u1+i-1>MAXLOOP) break;
qmin = p + TURN + 1;
ln_pre = u1 + i + seq_length;
if(ln_pre > qmin + MAXLOOP) qmin = ln_pre - MAXLOOP - 1;
for(q = qmin; q <= seq_length; q++){
unsigned int u2;
pq = my_iindx[p]-q;
type_2 = rtype[(unsigned int)ptype[pq]];
if (type_2==0) continue;
u2 = i-1 + seq_length-q;
if (u1+u2>MAXLOOP) continue;
energy = E_IntLoop(u1, u2, type, type_2, S1[j+1], S1[i-1], S1[p-1], S1[q+1], P);
if(E_C_rem[ij] != INF){
if(E_C[pq])
for(cnt1 = k_min_values[pq];
cnt1 <= k_max_values[pq];
cnt1++)
for(cnt2 = l_min_values[pq][cnt1];
cnt2 <= l_max_values[pq][cnt1];
cnt2 += 2)
if(E_Fc_rem == (E_C_rem[ij] + E_C[pq][cnt1][cnt2/2] + energy)){
backtrack_c(i,j,-1,-1,structure,vars);
backtrack_c(p,q,cnt1,cnt2,structure,vars);
return;
}
if(E_C_rem[pq] != INF){
if(E_Fc_rem == (E_C_rem[ij] + E_C_rem[pq] + energy)){
backtrack_c(i,j,-1,-1,structure,vars);
backtrack_c(p,q,-1,-1,structure,vars);
return;
}
}
}
if(E_C_rem[pq] != INF){
if(E_C[ij])
for(cnt1 = k_min_values[ij];
cnt1 <= k_max_values[ij];
cnt1++)
for(cnt2 = l_min_values[ij][cnt1];
cnt2 <= l_max_values[ij][cnt1];
cnt2 += 2)
if(E_Fc_rem == (E_C[ij][cnt1][cnt2/2] + E_C_rem[pq] + energy)){
backtrack_c(i,j,cnt1,cnt2,structure,vars);
backtrack_c(p,q,-1,-1,structure,vars);
return;
}
}
if(!(E_C[ij])) continue;
if(!(E_C[pq])) continue;
/* get distance to reference if closing the interior loop
* d2a = dbp(T1_[1,n}, T1_{p,q} + T1_{i,j})
* d2b = dbp(T2_[1,n}, T2_{p,q} + T2_{i,j})
*/
d1 = base_d1 - referenceBPs1[ij] - referenceBPs1[pq];
d2 = base_d2 - referenceBPs2[ij] - referenceBPs2[pq];
for(cnt1 = k_min_values[ij];
cnt1 <= k_max_values[ij];
cnt1++)
for(cnt2 = l_min_values[ij][cnt1];
cnt2 <= l_max_values[ij][cnt1];
cnt2 += 2)
for(cnt3 = k_min_values[pq];
cnt3 <= k_max_values[pq];
cnt3++)
for(cnt4 = l_min_values[pq][cnt3];
cnt4 <= l_max_values[pq][cnt3];
cnt4 += 2)
if(((cnt1 + cnt3 + d1) > maxD1) || ((cnt2 + cnt4 + d2) > maxD2))
if(E_Fc_rem == (E_C[ij][cnt1][cnt2/2] + E_C[pq][cnt3][cnt4/2] + energy)){
backtrack_c(i, j, cnt1, cnt2, structure, vars);
backtrack_c(p, q, cnt3, cnt4, structure, vars);
return;
}
} /* end for p */
} /* end for q */
}
}
/* check for multi loop configurations */
if(E_Fc_rem == E_FcM_rem){
if(seq_length > 2*TURN)
for (i=TURN+1; i<seq_length-2*TURN; i++) {
/* get distancies to references
* d3a = dbp(T1_[1,n}, T1_{1,k} + T1_{k+1, n})
* d3b = dbp(T2_[1,n}, T2_{1,k} + T2_{k+1, n})
*/
if(E_M_rem[my_iindx[1]-i] != INF){
if(E_M2[i+1])
for(cnt1 = k_min_values_m2[i+1];
cnt1 <= k_max_values_m2[i+1];
cnt1++)
for(cnt2 = l_min_values_m2[i+1][cnt1];
cnt2 <= l_max_values_m2[i+1][cnt1];
cnt2 += 2)
if(E_Fc_rem == (E_M_rem[my_iindx[1]-i] + E_M2[i+1][cnt1][cnt2/2] + P->MLclosing)){
backtrack_m(1,i,-1,-1,structure,vars);
backtrack_m2(i+1,cnt1,cnt2,structure,vars);
return;
}
if(E_M2_rem[i+1] != INF){
if(E_Fc_rem == (E_M_rem[my_iindx[1]-i] + E_M2_rem[i+1] + P->MLclosing)){
backtrack_m(1,i,-1,-1,structure,vars);
backtrack_m2(i+1,-1,-1,structure,vars);
return;
}
}
}
if(E_M2_rem[i+1] != INF){
if(E_M[my_iindx[1]-i])
for(cnt1 = k_min_values_m[my_iindx[1]-i];
cnt1 <= k_max_values_m[my_iindx[1]-i];
cnt1++)
for(cnt2 = l_min_values_m[my_iindx[1]-i][cnt1];
cnt2 <= l_max_values_m[my_iindx[1]-i][cnt1];
cnt2 += 2)
if(E_Fc_rem == (E_M[my_iindx[1]-i][cnt1][cnt2/2] + E_M2_rem[i+1] + P->MLclosing)){
backtrack_m(1,i,cnt1,cnt2,structure,vars);
backtrack_m2(i+1,-1,-1,structure,vars);
return;
}
}
if(!(E_M[my_iindx[1]-i])) continue;
if(!(E_M2[i+1])) continue;
d1 = base_d1 - referenceBPs1[my_iindx[1]-i] - referenceBPs1[my_iindx[i+1]-seq_length];
d2 = base_d2 - referenceBPs2[my_iindx[1]-i] - referenceBPs2[my_iindx[i+1]-seq_length];
for(cnt1 = k_min_values_m[my_iindx[1]-i];
cnt1 <= k_max_values_m[my_iindx[1]-i];
cnt1++)
for(cnt2 = l_min_values_m[my_iindx[1]-i][cnt1];
cnt2 <= l_max_values_m[my_iindx[1]-i][cnt1];
cnt2 += 2)
for(cnt3 = k_min_values_m2[i+1];
cnt3 <= k_max_values_m2[i+1];
cnt3++)
for(cnt4 = l_min_values_m2[i+1][cnt3];
cnt4 <= l_max_values_m2[i+1][cnt3];
cnt4 += 2)
if(((cnt1 + cnt3 + d1) > maxD1) || ((cnt2 + cnt4 + d2) > maxD2)){
if(E_Fc_rem == (E_M[my_iindx[1]-i][cnt1][cnt2/2] + E_M2[i+1][cnt3][cnt4/2] + P->MLclosing)){
backtrack_m(1, i, cnt1, cnt2, structure, vars);
backtrack_m2(i+1, cnt3, cnt4, structure, vars);
return;
}
}
}
}
}
else{
/* open chain ? */
if(E_Fc[k][l/2] == 0)
if((k == referenceBPs1[my_iindx[1]-seq_length]) && (l == referenceBPs2[my_iindx[1]-seq_length])){
return;
}
if((k >= k_min_values_fcH) && (k <= k_max_values_fcH)){
if((l >= l_min_values_fcH[k]) && (l <= l_max_values_fcH[k]))
if(E_Fc[k][l/2] == E_FcH[k][l/2]){
for (d = TURN+2; d <= seq_length; d++) /* i,j in [1..length] */
for (j = d; j <= seq_length; j++) {
unsigned int u, ij;
int type, no_close;
char loopseq[10];
i = j-d+1;
ij = my_iindx[i]-j;
if (!E_C[ij]) continue;
u = seq_length-j + i-1;
if (u<TURN) continue;
type = ptype[ij];
no_close = (((type==3)||(type==4))&&no_closingGU);
type=rtype[type];
if (!type) continue;
if(no_close) continue;
d1 = base_d1 - referenceBPs1[ij];
d2 = base_d2 - referenceBPs2[ij];
if (u<7) {
strcpy(loopseq , sequence+j-1);
strncat(loopseq, sequence, i);
}
energy = E_Hairpin(u, type, S1[j+1], S1[i-1], loopseq, P);
if((k >= d1) && (l >= d2))
if((k-d1 >= k_min_values[ij]) && (k-d1 <= k_max_values[ij]))
if((l-d2 >= l_min_values[ij][k-d1]) && (l-d2 <= l_max_values[ij][k-d1])){
if(E_Fc[k][l/2] == E_C[ij][k-d1][(l-d2)/2] + energy){
backtrack_c(i, j, k-d1, l-d2, structure, vars);
return;
}
}
}
}
}
if((k >= k_min_values_fcI) && (k <= k_max_values_fcI)){
if((l >= l_min_values_fcI[k]) && (l <= l_max_values_fcI[k]))
if(E_Fc[k][l/2] == E_FcI[k][l/2]){
for (d = TURN+2; d <= seq_length; d++) /* i,j in [1..length] */
for (j = d; j <= seq_length; j++) {
unsigned int u, ij, p, q, pq;
int type, type_2;
i = j-d+1;
ij = my_iindx[i]-j;
if(!E_C[ij]) continue;
u = seq_length-j + i-1;
if (u<TURN) continue;
type = ptype[ij];
type=rtype[type];
if (!type) continue;
for(p = j+1; p < seq_length ; p++){
unsigned int u1, qmin, ln_pre;
u1 = p-j-1;
if (u1+i-1>MAXLOOP) break;
qmin = p + TURN + 1;
ln_pre = u1 + i + seq_length;
if(ln_pre > qmin + MAXLOOP) qmin = ln_pre - MAXLOOP - 1;
for(q = qmin; q <= seq_length; q++){
unsigned int u2;
pq = my_iindx[p]-q;
if(!E_C[pq]) continue;
type_2 = rtype[(unsigned int)ptype[pq]];
if (type_2==0) continue;
u2 = i-1 + seq_length-q;
if (u1+u2>MAXLOOP) continue;
/* get distance to reference if closing the interior loop
* d2a = dbp(T1_[1,n}, T1_{p,q} + T1_{i,j})
* d2b = dbp(T2_[1,n}, T2_{p,q} + T2_{i,j})
*/
d1 = base_d1 - referenceBPs1[ij] - referenceBPs1[pq];
d2 = base_d2 - referenceBPs2[ij] - referenceBPs2[pq];
energy = E_IntLoop(u1, u2, type, type_2, S1[j+1], S1[i-1], S1[p-1], S1[q+1], P);
if((k >= d1) && (l >= d2))
for(cnt1 = k_min_values[ij]; cnt1 <= MIN2(k_max_values[ij], k - d1); cnt1++)
for(cnt2 = l_min_values[ij][cnt1]; cnt2 <= MIN2(l_max_values[ij][cnt1], l - d2); cnt2+=2)
if((k - d1 - cnt1 >= k_min_values[pq]) && (k - d1 - cnt1 <= k_max_values[pq]))
if((l - d2 - cnt2 >= l_min_values[pq][k-d1-cnt1]) && (l - d2 - cnt2 <= l_max_values[pq][k-d1-cnt1])){
if((E_C[ij][cnt1][cnt2/2] + E_C[pq][k-d1-cnt1][(l-d2-cnt2)/2] + energy) == E_Fc[k][l/2]){
backtrack_c(i, j, cnt1, cnt2, structure, vars);
backtrack_c(p, q, k - d1 - cnt1, l - d2 - cnt2, structure, vars);
return;
}
}
}
}
}
}
}
if((k >= k_min_values_fcM) && (k <= k_max_values_fcM)){
if((l >= l_min_values_fcM[k]) && (l <= l_max_values_fcM[k]))
if(E_Fc[k][l/2] == E_FcM[k][l/2]){
if(seq_length > 2*TURN)
for (i=TURN+1; i<seq_length-2*TURN; i++) {
/* get distancies to references
* d3a = dbp(T1_[1,n}, T1_{1,k} + T1_{k+1, n})
* d3b = dbp(T2_[1,n}, T2_{1,k} + T2_{k+1, n})
*/
if(!E_M[my_iindx[1]-i]) continue;
if(!E_M2[i+1]) continue;
d1 = base_d1 - referenceBPs1[my_iindx[1]-i] - referenceBPs1[my_iindx[i+1]-seq_length];
d2 = base_d2 - referenceBPs2[my_iindx[1]-i] - referenceBPs2[my_iindx[i+1]-seq_length];
if((k >= d1) && (l >= d2))
for(cnt1 = k_min_values_m[my_iindx[1]-i]; cnt1 <= MIN2(k_max_values_m[my_iindx[1]-i], k-d1); cnt1++)
for(cnt2 = l_min_values_m[my_iindx[1]-i][cnt1]; cnt2 <= MIN2(l_max_values_m[my_iindx[1]-i][cnt1], l-d2); cnt2+=2)
if((k - d1 - cnt1 >= k_min_values_m2[i+1]) && (k - d1 - cnt1 <= k_max_values_m2[i+1]))
if((l - d2 - cnt2 >= l_min_values_m2[i+1][k-d1-cnt1]) && (l - d2 - cnt2 <= l_max_values_m2[i+1][k-d1-cnt1]))
if((E_M[my_iindx[1]-i][cnt1][cnt2/2] + E_M2[i+1][k-d1-cnt1][(l-d2-cnt2)/2] + P->MLclosing) == E_FcM[k][l/2]){
backtrack_m(1, i, cnt1, cnt2, structure, vars);
backtrack_m2(i+1, k - d1 - cnt1, l - d2 - cnt2, structure, vars);
return;
}
}
}
}
}
nrerror("backtack failed in fc\n");
}
PRIVATE void backtrack_m2(unsigned int i, int k, int l, char *structure, TwoDfold_vars *vars){
unsigned int j, ij, j3, n;
unsigned int *referenceBPs1, *referenceBPs2;
unsigned int d1, d2, base_d1, base_d2, maxD1, maxD2;
int *my_iindx, cnt1, cnt2, cnt3, cnt4;
int ***E_M1, ***E_M2, *E_M2_rem, *E_M1_rem, e;
int **l_min_values_m1, **l_max_values_m1, *k_min_values_m1, *k_max_values_m1;
n = vars->seq_length;
my_iindx = vars->my_iindx;
referenceBPs1 = vars->referenceBPs1;
referenceBPs2 = vars->referenceBPs2;
E_M1 = vars->E_M1;
l_min_values_m1 = vars->l_min_values_m1;
l_max_values_m1 = vars->l_max_values_m1;
k_min_values_m1 = vars->k_min_values_m1;
k_max_values_m1 = vars->k_max_values_m1;
E_M1_rem = vars->E_M1_rem;
E_M2 = vars->E_M2;
E_M2_rem = vars->E_M2_rem;
maxD1 = vars->maxD1;
maxD2 = vars->maxD2;
base_d1 = referenceBPs1[my_iindx[i]-n];
base_d2 = referenceBPs2[my_iindx[i]-n];
if(k == -1){
e = E_M2_rem[i];
for (j=i+TURN+1; j<n-TURN-1; j++){
if(E_M1_rem[my_iindx[i]-j] != INF){
if(E_M1[my_iindx[j+1]-n])
for(cnt1 = k_min_values_m1[my_iindx[j+1]-n];
cnt1 <= k_max_values_m1[my_iindx[j+1]-n];
cnt1++)
for(cnt2 = l_min_values_m1[my_iindx[j+1]-n][cnt1];
cnt2 <= l_max_values_m1[my_iindx[j+1]-n][cnt1];
cnt2++)
if(e == E_M1_rem[my_iindx[i]-j] + E_M1[my_iindx[j+1]-n][cnt1][cnt2/2]){
backtrack_m1(i, j, k, l, structure, vars);
backtrack_m1(j+1, n, cnt1, cnt2, structure, vars);
return;
}
if(E_M1_rem[my_iindx[j+1]-n] != INF){
if(e == E_M1_rem[my_iindx[i]-j] + E_M1_rem[my_iindx[j+1]-n]){
backtrack_m1(i, j, k, l, structure, vars);
backtrack_m1(j+1, n, k, l, structure, vars);
return;
}
}
}
if(E_M1_rem[my_iindx[j+1]-n] != INF){
if(E_M1[my_iindx[i]-j])
for(cnt1 = k_min_values_m1[my_iindx[i]-j];
cnt1 <= k_max_values_m1[my_iindx[i]-j];
cnt1++)
for(cnt2 = l_min_values_m1[my_iindx[i]-j][cnt1];
cnt2 <= l_max_values_m1[my_iindx[i]-j][cnt1];
cnt2 += 2)
if(e == E_M1[my_iindx[i]-j][cnt1][cnt2/2] + E_M1_rem[my_iindx[j+1]-n]){
backtrack_m1(i, j, cnt1, cnt2, structure, vars);
backtrack_m1(j+1, n, k, l, structure, vars);
return;
}
}
if(!E_M1[my_iindx[i]-j]) continue;
if(!E_M1[my_iindx[j+1]-n]) continue;
d1 = referenceBPs1[my_iindx[i]-n] - referenceBPs1[my_iindx[i]-j] - referenceBPs1[my_iindx[j+1]-n];
d2 = referenceBPs2[my_iindx[i]-n] - referenceBPs2[my_iindx[i]-j] - referenceBPs2[my_iindx[j+1]-n];
for(cnt1 = k_min_values_m1[my_iindx[i]-j]; cnt1 <= k_max_values_m1[my_iindx[i]-j]; cnt1++)
for(cnt2 = l_min_values_m1[my_iindx[i]-j][cnt1]; cnt2 <= l_max_values_m1[my_iindx[i]-j][cnt1]; cnt2+=2){
for(cnt3 = k_min_values_m1[my_iindx[j+1]-n]; cnt3 <= k_max_values_m1[my_iindx[j+1]-n]; cnt3++)
for(cnt4 = l_min_values_m1[my_iindx[j+1]-n][cnt3]; cnt4 <= l_max_values_m1[my_iindx[j+1]-n][cnt3]; cnt4+=2){
if(((cnt1 + cnt3 + d1) > maxD1) || ((cnt2 + cnt4 + d2) > maxD2)){
if(e == E_M1[my_iindx[i]-j][cnt1][cnt2/2] + E_M1[my_iindx[j+1]-n][cnt3][cnt4/2]){
backtrack_m1(i, j, cnt1, cnt2, structure, vars);
backtrack_m1(j+1, n, cnt3, cnt4, structure, vars);
return;
}
}
}
}
}
}
else{
for(j=i+TURN+1; j<n-TURN-1; j++){
if(!E_M1[my_iindx[i]-j]) continue;
if(!E_M1[my_iindx[j+1]-n]) continue;
ij = my_iindx[i]-j;
j3 = my_iindx[j+1]-n;
d1 = base_d1 - referenceBPs1[ij] - referenceBPs1[j3];
d2 = base_d2 - referenceBPs2[ij] - referenceBPs2[j3];
for(cnt1 = k_min_values_m1[ij]; cnt1 <= MIN2(k_max_values_m1[ij], k - d1); cnt1++)
for(cnt2 = l_min_values_m1[ij][cnt1]; cnt2 <= MIN2(l_max_values_m1[ij][cnt1], l-d2); cnt2+=2)
if((k - d1 - cnt1 >= k_min_values_m1[j3]) && (k - d1 - cnt1 <= k_max_values_m1[j3]))
if((l - d2 - cnt2 >= l_min_values_m1[j3][k - d1 - cnt1]) && (l - d2 - cnt2 <= l_max_values_m1[j3][k-d1-cnt1]))
if(E_M1[ij][cnt1][cnt2/2] + E_M1[j3][k-d1-cnt1][(l-d2-cnt2)/2] == E_M2[i][k][l/2]){
backtrack_m1(i, j, cnt1, cnt2, structure, vars);
backtrack_m1(j+1, n, k-d1-cnt1, l-d2-cnt2, structure, vars);
return;
}
}
}
nrerror("backtack failed in m2\n");
}
PRIVATE void mfe_circ(TwoDfold_vars *vars){
unsigned int d, i, j, maxD1, maxD2, seq_length, *referenceBPs1, *referenceBPs2, d1, d2, base_d1, base_d2, *mm1, *mm2, *bpdist;
int *my_iindx, energy, cnt1, cnt2, cnt3, cnt4;
short *S1;
char *sequence, *ptype;
int ***E_C, ***E_M, ***E_M1;
int *E_C_rem, *E_M_rem, *E_M1_rem;
int **l_min_values, **l_max_values, **l_min_values_m, **l_max_values_m, **l_min_values_m1, **l_max_values_m1;
int *k_min_values, *k_max_values,*k_min_values_m, *k_max_values_m,*k_min_values_m1, *k_max_values_m1;
paramT *P;
P = vars->P;
sequence = vars->sequence;
seq_length = vars->seq_length;
maxD1 = vars->maxD1;
maxD2 = vars->maxD2;
S1 = vars->S1;
ptype = vars->ptype;
my_iindx = vars->my_iindx;
referenceBPs1 = vars->referenceBPs1;
referenceBPs2 = vars->referenceBPs2;
mm1 = vars->mm1;
mm2 = vars->mm2;
bpdist = vars->bpdist;
E_C = vars->E_C;
l_min_values = vars->l_min_values;
l_max_values = vars->l_max_values;
k_min_values = vars->k_min_values;
k_max_values = vars->k_max_values;
E_M = vars->E_M;
l_min_values_m = vars->l_min_values_m;
l_max_values_m = vars->l_max_values_m;
k_min_values_m = vars->k_min_values_m;
k_max_values_m = vars->k_max_values_m;
E_M1 = vars->E_M1;
l_min_values_m1 = vars->l_min_values_m1;
l_max_values_m1 = vars->l_max_values_m1;
k_min_values_m1 = vars->k_min_values_m1;
k_max_values_m1 = vars->k_max_values_m1;
E_C_rem = vars->E_C_rem;
E_M_rem = vars->E_M_rem;
E_M1_rem = vars->E_M1_rem;
#ifdef _OPENMP
#pragma omp parallel for private(d1,d2,cnt1,cnt2,cnt3,cnt4,j, i)
#endif
for(i=1; i<seq_length-TURN-1; i++){
/* guess memory requirements for M2 */
int min_k, max_k, max_l, min_l;
int min_k_real, max_k_real, *min_l_real, *max_l_real;
min_k = min_l = 0;
max_k = mm1[my_iindx[i]-seq_length] + referenceBPs1[my_iindx[i] - seq_length];
max_l = mm2[my_iindx[i]-seq_length] + referenceBPs2[my_iindx[i] - seq_length];
prepareBoundaries(min_k,
max_k,
min_l,
max_l,
bpdist[my_iindx[i] - seq_length],
&vars->k_min_values_m2[i],
&vars->k_max_values_m2[i],
&vars->l_min_values_m2[i],
&vars->l_max_values_m2[i]
);
prepareArray( &vars->E_M2[i],
vars->k_min_values_m2[i],
vars->k_max_values_m2[i],
vars->l_min_values_m2[i],
vars->l_max_values_m2[i]
);
preparePosteriorBoundaries( vars->k_max_values_m2[i] - vars->k_min_values_m2[i] + 1,
vars->k_min_values_m2[i],
&min_k_real,
&max_k_real,
&min_l_real,
&max_l_real
);
/* begin filling of M2 array */
for (j=i+TURN+1; j<seq_length-TURN-1; j++){
if(E_M1_rem[my_iindx[i]-j] != INF){
if(E_M1[my_iindx[j+1]-seq_length])
for(cnt1 = k_min_values_m1[my_iindx[j+1]-seq_length];
cnt1 <= k_max_values_m1[my_iindx[j+1]-seq_length];
cnt1++)
for(cnt2 = l_min_values_m1[my_iindx[j+1]-seq_length][cnt1];
cnt2 <= l_max_values_m1[my_iindx[j+1]-seq_length][cnt1];
cnt2++)
vars->E_M2_rem[i] = MIN2(vars->E_M2_rem[i],
E_M1_rem[my_iindx[i]-j] + E_M1[my_iindx[j+1]-seq_length][cnt1][cnt2/2]
);
if(E_M1_rem[my_iindx[j+1]-seq_length] != INF)
vars->E_M2_rem[i] = MIN2(vars->E_M2_rem[i], E_M1_rem[my_iindx[i]-j] + E_M1_rem[my_iindx[j+1]-seq_length]);
}
if(E_M1_rem[my_iindx[j+1]-seq_length] != INF){
if(E_M1[my_iindx[i]-j])
for(cnt1 = k_min_values_m1[my_iindx[i]-j];
cnt1 <= k_max_values_m1[my_iindx[i]-j];
cnt1++)
for(cnt2 = l_min_values_m1[my_iindx[i]-j][cnt1];
cnt2 <= l_max_values_m1[my_iindx[i]-j][cnt1];
cnt2 += 2)
vars->E_M2_rem[i] = MIN2(vars->E_M2_rem[i],
E_M1[my_iindx[i]-j][cnt1][cnt2/2] + E_M1_rem[my_iindx[j+1]-seq_length]
);
}
if(!E_M1[my_iindx[i]-j]) continue;
if(!E_M1[my_iindx[j+1]-seq_length]) continue;
d1 = referenceBPs1[my_iindx[i]-seq_length] - referenceBPs1[my_iindx[i]-j] - referenceBPs1[my_iindx[j+1]-seq_length];
d2 = referenceBPs2[my_iindx[i]-seq_length] - referenceBPs2[my_iindx[i]-j] - referenceBPs2[my_iindx[j+1]-seq_length];
for(cnt1 = k_min_values_m1[my_iindx[i]-j]; cnt1 <= k_max_values_m1[my_iindx[i]-j]; cnt1++)
for(cnt2 = l_min_values_m1[my_iindx[i]-j][cnt1]; cnt2 <= l_max_values_m1[my_iindx[i]-j][cnt1]; cnt2+=2){
for(cnt3 = k_min_values_m1[my_iindx[j+1]-seq_length]; cnt3 <= k_max_values_m1[my_iindx[j+1]-seq_length]; cnt3++)
for(cnt4 = l_min_values_m1[my_iindx[j+1]-seq_length][cnt3]; cnt4 <= l_max_values_m1[my_iindx[j+1]-seq_length][cnt3]; cnt4+=2){
if(((cnt1 + cnt3 + d1) <= maxD1) && ((cnt2 + cnt4 + d2) <= maxD2)){
vars->E_M2[i][cnt1 + cnt3 + d1][(cnt2 + cnt4 + d2)/2] = MIN2( vars->E_M2[i][cnt1 + cnt3 + d1][(cnt2 + cnt4 + d2)/2],
E_M1[my_iindx[i]-j][cnt1][cnt2/2] + E_M1[my_iindx[j+1]-seq_length][cnt3][cnt4/2]
);
updatePosteriorBoundaries(cnt1+cnt3+d1,
cnt2+cnt4+d2,
&min_k_real,
&max_k_real,
&min_l_real,
&max_l_real
);
}
else{
vars->E_M2_rem[i] = MIN2(vars->E_M2_rem[i],
E_M1[my_iindx[i]-j][cnt1][cnt2/2] + E_M1[my_iindx[j+1]-seq_length][cnt3][cnt4/2]
);
}
}
}
}
/* resize and move memory portions of energy matrix E_M2 */
adjustArrayBoundaries(&vars->E_M2[i],
&vars->k_min_values_m2[i],
&vars->k_max_values_m2[i],
&vars->l_min_values_m2[i],
&vars->l_max_values_m2[i],
min_k_real,
max_k_real,
min_l_real,
max_l_real
);
} /* end for i */
base_d1 = referenceBPs1[my_iindx[1]-seq_length];
base_d2 = referenceBPs2[my_iindx[1]-seq_length];
/* guess memory requirements for E_FcH, E_FcI and E_FcM */
int min_k, max_k, max_l, min_l;
int min_k_real, max_k_real, min_k_real_fcH, max_k_real_fcH, min_k_real_fcI, max_k_real_fcI, min_k_real_fcM, max_k_real_fcM;
int *min_l_real, *max_l_real, *min_l_real_fcH, *max_l_real_fcH, *min_l_real_fcI, *max_l_real_fcI,*min_l_real_fcM, *max_l_real_fcM;
min_k = min_l = 0;
max_k = mm1[my_iindx[1] - seq_length] + referenceBPs1[my_iindx[1] - seq_length];
max_l = mm2[my_iindx[1] - seq_length] + referenceBPs2[my_iindx[1] - seq_length];
#ifdef _OPENMP
#pragma omp sections
{
#pragma omp section
{
#endif
prepareBoundaries(min_k,
max_k,
min_l,
max_l,
bpdist[my_iindx[1] - seq_length],
&vars->k_min_values_fc,
&vars->k_max_values_fc,
&vars->l_min_values_fc,
&vars->l_max_values_fc
);
prepareArray( &vars->E_Fc,
vars->k_min_values_fc,
vars->k_max_values_fc,
vars->l_min_values_fc,
vars->l_max_values_fc
);
#ifdef _OPENMP
}
#pragma omp section
{
#endif
prepareBoundaries(min_k,
max_k,
min_l,
max_l,
bpdist[my_iindx[1] - seq_length],
&vars->k_min_values_fcH,
&vars->k_max_values_fcH,
&vars->l_min_values_fcH,
&vars->l_max_values_fcH
);
prepareArray( &vars->E_FcH,
vars->k_min_values_fcH,
vars->k_max_values_fcH,
vars->l_min_values_fcH,
vars->l_max_values_fcH
);
#ifdef _OPENMP
}
#pragma omp section
{
#endif
prepareBoundaries(min_k,
max_k,
min_l,
max_l,
bpdist[my_iindx[1] - seq_length],
&vars->k_min_values_fcI,
&vars->k_max_values_fcI,
&vars->l_min_values_fcI,
&vars->l_max_values_fcI
);
prepareArray( &vars->E_FcI,
vars->k_min_values_fcI,
vars->k_max_values_fcI,
vars->l_min_values_fcI,
vars->l_max_values_fcI
);
#ifdef _OPENMP
}
#pragma omp section
{
#endif
prepareBoundaries(min_k,
max_k,
min_l,
max_l,
bpdist[my_iindx[1] - seq_length],
&vars->k_min_values_fcM,
&vars->k_max_values_fcM,
&vars->l_min_values_fcM,
&vars->l_max_values_fcM
);
prepareArray( &vars->E_FcM,
vars->k_min_values_fcM,
vars->k_max_values_fcM,
vars->l_min_values_fcM,
vars->l_max_values_fcM
);
#ifdef _OPENMP
}
#pragma omp section
{
#endif
preparePosteriorBoundaries( max_k - min_k + 1,
min_k,
&min_k_real,
&max_k_real,
&min_l_real,
&max_l_real
);
#ifdef _OPENMP
}
#pragma omp section
{
#endif
preparePosteriorBoundaries( max_k - min_k + 1,
min_k,
&min_k_real_fcH,
&max_k_real_fcH,
&min_l_real_fcH,
&max_l_real_fcH
);
#ifdef _OPENMP
}
#pragma omp section
{
#endif
preparePosteriorBoundaries( max_k - min_k + 1,
min_k,
&min_k_real_fcI,
&max_k_real_fcI,
&min_l_real_fcI,
&max_l_real_fcI
);
#ifdef _OPENMP
}
#pragma omp section
{
#endif
preparePosteriorBoundaries( max_k - min_k + 1,
min_k,
&min_k_real_fcM,
&max_k_real_fcM,
&min_l_real_fcM,
&max_l_real_fcM
);
#ifdef _OPENMP
}
}
#endif
/* begin actual energy calculations */
#ifdef _OPENMP
#pragma omp sections private(d, d1,d2,cnt1,cnt2,cnt3,cnt4,j, i, energy)
{
#pragma omp section
{
#endif
for (d = TURN+2; d <= seq_length; d++) /* i,j in [1..length] */
for (j = d; j <= seq_length; j++) {
unsigned int u, ij;
int type, no_close;
char loopseq[10];
i = j-d+1;
ij = my_iindx[i]-j;
u = seq_length-j + i-1;
if (u<TURN) continue;
type = ptype[ij];
no_close = (((type==3)||(type==4))&&no_closingGU);
type=rtype[type];
if (!type) continue;
if(no_close) continue;
d1 = base_d1 - referenceBPs1[ij];
d2 = base_d2 - referenceBPs2[ij];
if (u<7) {
strcpy(loopseq , sequence+j-1);
strncat(loopseq, sequence, i);
}
energy = E_Hairpin(u, type, S1[j+1], S1[i-1], loopseq, P);
if(E_C_rem[ij] != INF)
vars->E_FcH_rem = MIN2(vars->E_FcH_rem, E_C_rem[ij] + energy);
if (!E_C[ij]) continue;
for(cnt1 = k_min_values[ij]; cnt1 <= k_max_values[ij]; cnt1++)
for(cnt2 = l_min_values[ij][cnt1]; cnt2 <= l_max_values[ij][cnt1]; cnt2 += 2){
if(((cnt1 + d1) <= maxD1) && ((cnt2 + d2) <= maxD2)){
vars->E_FcH[cnt1 + d1][(cnt2+d2)/2] = MIN2( vars->E_FcH[cnt1 + d1][(cnt2+d2)/2],
energy + E_C[ij][cnt1][cnt2/2]
);
updatePosteriorBoundaries(cnt1 + d1,
cnt2 + d2,
&min_k_real_fcH,
&max_k_real_fcH,
&min_l_real_fcH,
&max_l_real_fcH
);
}
else
vars->E_FcH_rem = MIN2(vars->E_FcH_rem, energy + E_C[ij][cnt1][cnt2/2]);
}
}
/* end of i-j loop */
/* resize and move memory portions of energy matrix E_FcH */
adjustArrayBoundaries(&vars->E_FcH,
&vars->k_min_values_fcH,
&vars->k_max_values_fcH,
&vars->l_min_values_fcH,
&vars->l_max_values_fcH,
min_k_real_fcH,
max_k_real_fcH,
min_l_real_fcH,
max_l_real_fcH
);
#ifdef _OPENMP
}
#pragma omp section
{
#endif
for (d = TURN+2; d <= seq_length; d++) /* i,j in [1..length] */
for (j = d; j <= seq_length; j++) {
unsigned int u, ij, p, q, pq;
int type, type_2, no_close;
i = j-d+1;
ij = my_iindx[i]-j;
u = seq_length-j + i-1;
if (u<TURN) continue;
type = ptype[ij];
no_close = (((type==3)||(type==4))&&no_closingGU);
type=rtype[type];
if (!type) continue;
if(no_close) continue;
if(E_C_rem[ij] != INF){
for(p = j+1; p < seq_length ; p++){
unsigned int u1, qmin, ln_pre;
u1 = p-j-1;
if (u1+i-1>MAXLOOP) break;
qmin = p + TURN + 1;
ln_pre = u1 + i + seq_length;
if(ln_pre > qmin + MAXLOOP) qmin = ln_pre - MAXLOOP - 1;
for(q = qmin; q <= seq_length; q++){
unsigned int u2;
pq = my_iindx[p]-q;
type_2 = rtype[(unsigned int)ptype[pq]];
if (type_2==0) continue;
u2 = i-1 + seq_length-q;
if (u1+u2>MAXLOOP) continue;
/* get distance to reference if closing the interior loop
* d2a = dbp(T1_[1,n}, T1_{p,q} + T1_{i,j})
* d2b = dbp(T2_[1,n}, T2_{p,q} + T2_{i,j})
*/
d1 = base_d1 - referenceBPs1[ij] - referenceBPs1[pq];
d2 = base_d2 - referenceBPs2[ij] - referenceBPs2[pq];
energy = E_IntLoop(u1, u2, type, type_2, S1[j+1], S1[i-1], S1[p-1], S1[q+1], P);
if(E_C_rem[pq] != INF)
vars->E_FcI_rem = MIN2(vars->E_FcI_rem, E_C_rem[ij] + E_C_rem[pq] + energy);
if(E_C[pq])
for(cnt1 = k_min_values[pq];
cnt1 <= k_max_values[pq];
cnt1++)
for(cnt2 = l_min_values[pq][cnt1];
cnt2 <= l_max_values[pq][cnt1];
cnt2 += 2)
vars->E_FcI_rem = MIN2(vars->E_FcI_rem, E_C_rem[ij] + E_C[pq][cnt1][cnt2/2] + energy);
}
}
}
if(E_C[ij]){
for(p = j+1; p < seq_length ; p++){
unsigned int u1, qmin, ln_pre;
u1 = p-j-1;
if (u1+i-1>MAXLOOP) break;
qmin = p + TURN + 1;
ln_pre = u1 + i + seq_length;
if(ln_pre > qmin + MAXLOOP) qmin = ln_pre - MAXLOOP - 1;
for(q = qmin; q <= seq_length; q++){
unsigned int u2;
pq = my_iindx[p]-q;
type_2 = rtype[(unsigned int)ptype[pq]];
if (type_2==0) continue;
u2 = i-1 + seq_length-q;
if (u1+u2>MAXLOOP) continue;
/* get distance to reference if closing the interior loop
* d2a = dbp(T1_[1,n}, T1_{p,q} + T1_{i,j})
* d2b = dbp(T2_[1,n}, T2_{p,q} + T2_{i,j})
*/
d1 = base_d1 - referenceBPs1[ij] - referenceBPs1[pq];
d2 = base_d2 - referenceBPs2[ij] - referenceBPs2[pq];
energy = E_IntLoop(u1, u2, type, type_2, S1[j+1], S1[i-1], S1[p-1], S1[q+1], P);
if(E_C_rem[pq] != INF){
for(cnt1 = k_min_values[ij];
cnt1 <= k_max_values[ij];
cnt1++)
for(cnt2 = l_min_values[ij][cnt1];
cnt2 <= l_max_values[ij][cnt1];
cnt2 += 2)
vars->E_FcI_rem = MIN2(vars->E_FcI_rem, E_C[ij][cnt1][cnt2/2] + E_C_rem[pq] + energy);
}
if(E_C[pq])
for(cnt1 = k_min_values[ij];
cnt1 <= k_max_values[ij];
cnt1++)
for(cnt2 = l_min_values[ij][cnt1];
cnt2 <= l_max_values[ij][cnt1];
cnt2 += 2)
for(cnt3 = k_min_values[pq];
cnt3 <= k_max_values[pq];
cnt3++)
for(cnt4 = l_min_values[pq][cnt3];
cnt4 <= l_max_values[pq][cnt3];
cnt4 += 2){
if(((cnt1 + cnt3 + d1) <= maxD1) && ((cnt2 + cnt4 + d2) <= maxD2)){
vars->E_FcI[cnt1 + cnt3 + d1][(cnt2 + cnt4 + d2)/2] = MIN2(
vars->E_FcI[cnt1 + cnt3 + d1][(cnt2 + cnt4 + d2)/2],
E_C[ij][cnt1][cnt2/2]
+ E_C[pq][cnt3][cnt4/2]
+ energy
);
updatePosteriorBoundaries(cnt1 + cnt3 + d1,
cnt2 + cnt4 + d2,
&min_k_real_fcI,
&max_k_real_fcI,
&min_l_real_fcI,
&max_l_real_fcI
);
}
else{
vars->E_FcI_rem = MIN2(
vars->E_FcI_rem,
E_C[ij][cnt1][cnt2/2]
+ E_C[pq][cnt3][cnt4/2]
+ energy
);
}
}
}
}
}
}
/* end of i-j loop */
/* resize and move memory portions of energy matrix E_FcI */
adjustArrayBoundaries(&vars->E_FcI,
&vars->k_min_values_fcI,
&vars->k_max_values_fcI,
&vars->l_min_values_fcI,
&vars->l_max_values_fcI,
min_k_real_fcI,
max_k_real_fcI,
min_l_real_fcI,
max_l_real_fcI
);
#ifdef _OPENMP
}
#pragma omp section
{
#endif
if(seq_length > 2*TURN){
for (i=TURN+1; i<seq_length-2*TURN; i++) {
/* get distancies to references
* d3a = dbp(T1_[1,n}, T1_{1,k} + T1_{k+1, n})
* d3b = dbp(T2_[1,n}, T2_{1,k} + T2_{k+1, n})
*/
d1 = base_d1 - referenceBPs1[my_iindx[1]-i] - referenceBPs1[my_iindx[i+1]-seq_length];
d2 = base_d2 - referenceBPs2[my_iindx[1]-i] - referenceBPs2[my_iindx[i+1]-seq_length];
if(E_M_rem[my_iindx[1]-i] != INF){
if(vars->E_M2[i+1])
for(cnt1 = vars->k_min_values_m2[i+1];
cnt1 <= vars->k_max_values_m2[i+1];
cnt1++)
for(cnt2 = vars->l_min_values_m2[i+1][cnt1];
cnt2 <= vars->l_max_values_m2[i+1][cnt1];
cnt2 += 2)
vars->E_FcM_rem = MIN2(vars->E_FcM_rem, E_M_rem[my_iindx[1]-i] + vars->E_M2[i+1][cnt1][cnt2/2] + P->MLclosing);
if(vars->E_M2_rem[i+1] != INF)
vars->E_FcM_rem = MIN2(vars->E_FcM_rem, E_M_rem[my_iindx[1]-i] + vars->E_M2_rem[i+1] + P->MLclosing);
}
if(vars->E_M2_rem[i+1] != INF){
if(E_M[my_iindx[1]-i])
for(cnt1 = k_min_values_m[my_iindx[1]-i];
cnt1 <= k_max_values_m[my_iindx[1]-i];
cnt1++)
for(cnt2 = l_min_values_m[my_iindx[1]-i][cnt1];
cnt2 <= l_max_values_m[my_iindx[1]-i][cnt1];
cnt2 += 2)
vars->E_FcM_rem = MIN2(vars->E_FcM_rem, E_M[my_iindx[1]-i][cnt1][cnt2/2] + vars->E_M2_rem[i+1] + P->MLclosing);
}
if(!E_M[my_iindx[1]-i]) continue;
if(!vars->E_M2[i+1]) continue;
for(cnt1 = k_min_values_m[my_iindx[1]-i]; cnt1 <= k_max_values_m[my_iindx[1]-i]; cnt1++)
for(cnt2 = l_min_values_m[my_iindx[1]-i][cnt1]; cnt2 <= l_max_values_m[my_iindx[1]-i][cnt1]; cnt2 += 2)
for(cnt3 = vars->k_min_values_m2[i+1]; cnt3 <= vars->k_max_values_m2[i+1]; cnt3++)
for(cnt4 = vars->l_min_values_m2[i+1][cnt3]; cnt4 <= vars->l_max_values_m2[i+1][cnt3]; cnt4 += 2){
if(((cnt1 + cnt3 + d1) <= maxD1) && ((cnt2 + cnt4 + d2) <= maxD2)){
vars->E_FcM[cnt1 + cnt3 + d1][(cnt2 + cnt4 + d2)/2] = MIN2(
vars->E_FcM[cnt1 + cnt3 + d1][(cnt2 + cnt4 + d2)/2],
E_M[my_iindx[1]-i][cnt1][cnt2/2]
+ vars->E_M2[i+1][cnt3][cnt4/2]
+ P->MLclosing
);
updatePosteriorBoundaries(cnt1 + cnt3 + d1,
cnt2 + cnt4 + d2,
&min_k_real_fcM,
&max_k_real_fcM,
&min_l_real_fcM,
&max_l_real_fcM
);
}
else{
vars->E_FcM_rem = MIN2(
vars->E_FcM_rem,
E_M[my_iindx[1]-i][cnt1][cnt2/2]
+ vars->E_M2[i+1][cnt3][cnt4/2]
+ P->MLclosing
);
}
}
}
}
/* resize and move memory portions of energy matrix E_FcM */
adjustArrayBoundaries(&vars->E_FcM,
&vars->k_min_values_fcM,
&vars->k_max_values_fcM,
&vars->l_min_values_fcM,
&vars->l_max_values_fcM,
min_k_real_fcM,
max_k_real_fcM,
min_l_real_fcM,
max_l_real_fcM
);
#ifdef _OPENMP
}
}
#endif
/* compute E_Fc_rem */
vars->E_Fc_rem = MIN2(vars->E_FcH_rem, vars->E_FcI_rem);
vars->E_Fc_rem = MIN2(vars->E_Fc_rem, vars->E_FcM_rem);
/* add the case were structure is unfolded chain */
if((referenceBPs1[my_iindx[1]-seq_length] > maxD1) || (referenceBPs2[my_iindx[1]-seq_length] > maxD2))
vars->E_Fc_rem = MIN2(vars->E_Fc_rem, 0);
/* compute all E_Fc */
for(cnt1 = vars->k_min_values_fcH; cnt1 <= vars->k_max_values_fcH; cnt1++)
for(cnt2 = vars->l_min_values_fcH[cnt1]; cnt2 <= vars->l_max_values_fcH[cnt1]; cnt2 += 2){
vars->E_Fc[cnt1][cnt2/2] = MIN2(vars->E_Fc[cnt1][cnt2/2],
vars->E_FcH[cnt1][cnt2/2]
);
updatePosteriorBoundaries(cnt1,
cnt2,
&min_k_real,
&max_k_real,
&min_l_real,
&max_l_real
);
}
for(cnt1 = vars->k_min_values_fcI; cnt1 <= vars->k_max_values_fcI; cnt1++)
for(cnt2 = vars->l_min_values_fcI[cnt1]; cnt2 <= vars->l_max_values_fcI[cnt1]; cnt2 += 2){
vars->E_Fc[cnt1][cnt2/2] = MIN2(vars->E_Fc[cnt1][cnt2/2],
vars->E_FcI[cnt1][cnt2/2]
);
updatePosteriorBoundaries(cnt1,
cnt2,
&min_k_real,
&max_k_real,
&min_l_real,
&max_l_real
);
}
for(cnt1 = vars->k_min_values_fcM; cnt1 <= vars->k_max_values_fcM; cnt1++)
for(cnt2 = vars->l_min_values_fcM[cnt1]; cnt2 <= vars->l_max_values_fcM[cnt1]; cnt2 += 2){
vars->E_Fc[cnt1][cnt2/2] = MIN2(vars->E_Fc[cnt1][cnt2/2],
vars->E_FcM[cnt1][cnt2/2]
);
updatePosteriorBoundaries(cnt1,
cnt2,
&min_k_real,
&max_k_real,
&min_l_real,
&max_l_real
);
}
/* add the case were structure is unfolded chain */
vars->E_Fc[referenceBPs1[my_iindx[1]-seq_length]][referenceBPs2[my_iindx[1]-seq_length]/2] = MIN2(vars->E_Fc[referenceBPs1[my_iindx[1]-seq_length]][referenceBPs2[my_iindx[1]-seq_length]/2],
0);
updatePosteriorBoundaries(referenceBPs1[my_iindx[1]-seq_length],
referenceBPs2[my_iindx[1]-seq_length],
&min_k_real,
&max_k_real,
&min_l_real,
&max_l_real
);
adjustArrayBoundaries(&vars->E_Fc,
&vars->k_min_values_fc,
&vars->k_max_values_fc,
&vars->l_min_values_fc,
&vars->l_max_values_fc,
min_k_real,
max_k_real,
min_l_real,
max_l_real
);
}
PRIVATE void adjustArrayBoundaries(int ***array, int *k_min, int *k_max, int **l_min, int **l_max, int k_min_post, int k_max_post, int *l_min_post, int *l_max_post){
int cnt1;
int k_diff_pre = k_min_post - *k_min;
int mem_size = k_max_post - k_min_post + 1;
if(k_min_post < INF){
/* free all the unused memory behind actual data */
for(cnt1 = k_max_post + 1; cnt1 <= *k_max; cnt1++){
(*array)[cnt1] += (*l_min)[cnt1]/2;
free((*array)[cnt1]);
}
/* free unused memory before actual data */
for(cnt1 = *k_min; cnt1 < k_min_post; cnt1++){
(*array)[cnt1] += (*l_min)[cnt1]/2;
free((*array)[cnt1]);
}
/* move data to front and thereby eliminating unused memory in front of actual data */
if(k_diff_pre > 0){
memmove((int **)(*array),((int **)(*array)) + k_diff_pre, sizeof(int *) * mem_size);
memmove((int *) (*l_min),((int *) (*l_min)) + k_diff_pre, sizeof(int) * mem_size);
memmove((int *) (*l_max),((int *) (*l_max)) + k_diff_pre, sizeof(int) * mem_size);
}
/* reallocating memory to actual size used */
*array += *k_min;
*array = (int **)realloc(*array, sizeof(int *) * mem_size);
*array -= k_min_post;
*l_min += *k_min;
*l_min = (int *)realloc(*l_min, sizeof(int) * mem_size);
*l_min -= k_min_post;
*l_max += *k_min;
*l_max = (int *)realloc(*l_max, sizeof(int) * mem_size);
*l_max -= k_min_post;
/* adjust l dimension of array */
for(cnt1 = k_min_post; cnt1 <= k_max_post; cnt1++){
if(l_min_post[cnt1] < INF){
/* new memsize */
mem_size = (l_max_post[cnt1] - l_min_post[cnt1] + 1)/2 + 1;
/* reshift the pointer */
(*array)[cnt1] += (*l_min)[cnt1]/2;
int shift = (l_min_post[cnt1]%2 == (*l_min)[cnt1]%2) ? 0 : 1;
/* eliminate unused memory in front of actual data */
unsigned int start = (l_min_post[cnt1] - (*l_min)[cnt1])/2 + shift;
if(start > 0)
memmove((int *)((*array)[cnt1]), (int *)((*array)[cnt1])+start, sizeof(int) * mem_size);
(*array)[cnt1] = (int *) realloc((*array)[cnt1], sizeof(int) * mem_size);
(*array)[cnt1] -= l_min_post[cnt1]/2;
}
else{
/* free according memory */
(*array)[cnt1] += (*l_min)[cnt1]/2;
free((*array)[cnt1]);
}
(*l_min)[cnt1] = l_min_post[cnt1];
(*l_max)[cnt1] = l_max_post[cnt1];
}
}
else{
/* we have to free all unused memory */
for(cnt1 = *k_min; cnt1 <= *k_max; cnt1++){
(*array)[cnt1] += (*l_min)[cnt1]/2;
free((*array)[cnt1]);
}
(*l_min) += *k_min;
(*l_max) += *k_min;
free(*l_min);
free(*l_max);
(*array) += *k_min;
free(*array);
*array = NULL;
}
l_min_post += *k_min;
l_max_post += *k_min;
free(l_min_post);
free(l_max_post);
*k_min = k_min_post;
*k_max = k_max_post;
}
INLINE PRIVATE void preparePosteriorBoundaries(int size, int shift, int *min_k, int *max_k, int **min_l, int **max_l){
int i;
*min_k = INF;
*max_k = 0;
*min_l = (int *)space(sizeof(int) * size);
*max_l = (int *)space(sizeof(int) * size);
for(i = 0; i < size; i++){
(*min_l)[i] = INF;
(*max_l)[i] = 0;
}
*min_l -= shift;
*max_l -= shift;
}
INLINE PRIVATE void updatePosteriorBoundaries(int d1, int d2, int *min_k, int *max_k, int **min_l, int **max_l){
(*min_l)[d1] = MIN2((*min_l)[d1], d2);
(*max_l)[d1] = MAX2((*max_l)[d1], d2);
*min_k = MIN2(*min_k, d1);
*max_k = MAX2(*max_k, d1);
}
INLINE PRIVATE void prepareBoundaries(int min_k_pre, int max_k_pre, int min_l_pre, int max_l_pre, int bpdist, int *min_k, int *max_k, int **min_l, int **max_l){
int cnt;
int mem = max_k_pre - min_k_pre + 1;
*min_k = min_k_pre;
*max_k = max_k_pre;
*min_l = (int *) space(sizeof(int) * mem);
*max_l = (int *) space(sizeof(int) * mem);
*min_l -= min_k_pre;
*max_l -= min_k_pre;
/* for each k guess the according minimum l*/
for(cnt = min_k_pre; cnt <= max_k_pre; cnt++){
(*min_l)[cnt] = min_l_pre;
(*max_l)[cnt] = max_l_pre;
while((*min_l)[cnt] + cnt < bpdist) (*min_l)[cnt]++;
if((bpdist % 2) != (((*min_l)[cnt] + cnt) % 2)) (*min_l)[cnt]++;
}
}
INLINE PRIVATE void prepareArray(int ***array, int min_k, int max_k, int *min_l, int *max_l){
int i, j, mem;
*array = (int **)space(sizeof(int *) * (max_k - min_k + 1));
*array -= min_k;
for(i = min_k; i <= max_k; i++){
mem = (max_l[i] - min_l[i] + 1)/2 + 1;
(*array)[i] = (int *)space(sizeof(int) * mem);
for(j = 0; j < mem; j++)
(*array)[i][j] = INF;
(*array)[i] -= min_l[i]/2;
}
}
INLINE PRIVATE void prepareArray2(unsigned long ***array, int min_k, int max_k, int *min_l, int *max_l){
int i, mem;
*array = (unsigned long **)space(sizeof(unsigned long *) * (max_k - min_k + 1));
*array -= min_k;
for(i = min_k; i <= max_k; i++){
mem = (max_l[i] - min_l[i] + 1)/2 + 1;
(*array)[i] = (unsigned long *)space(sizeof(unsigned long) * mem);
(*array)[i] -= min_l[i]/2;
}
}
|
GB_binop__ge_fp64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__ge_fp64)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__ge_fp64)
// A.*B function (eWiseMult): GB (_AemultB_03__ge_fp64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__ge_fp64)
// A*D function (colscale): GB (_AxD__ge_fp64)
// D*A function (rowscale): GB (_DxB__ge_fp64)
// C+=B function (dense accum): GB (_Cdense_accumB__ge_fp64)
// C+=b function (dense accum): GB (_Cdense_accumb__ge_fp64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ge_fp64)
// C=scalar+B GB (_bind1st__ge_fp64)
// C=scalar+B' GB (_bind1st_tran__ge_fp64)
// C=A+scalar GB (_bind2nd__ge_fp64)
// C=A'+scalar GB (_bind2nd_tran__ge_fp64)
// C type: bool
// A type: double
// B,b type: double
// BinaryOp: cij = (aij >= bij)
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
double bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x >= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_GE || GxB_NO_FP64 || GxB_NO_GE_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__ge_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__ge_fp64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__ge_fp64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__ge_fp64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__ge_fp64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__ge_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__ge_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__ge_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__ge_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__ge_fp64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__ge_fp64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = Bx [p] ;
Cx [p] = (x >= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__ge_fp64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
double aij = Ax [p] ;
Cx [p] = (aij >= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = (x >= aij) ; \
}
GrB_Info GB (_bind1st_tran__ge_fp64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = (aij >= y) ; \
}
GrB_Info GB (_bind2nd_tran__ge_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
sum_openmp.c | /*
Copyright (C) 2018 Francesc Alted
http://blosc.org
License: BSD 3-Clause (see LICENSE.txt)
Example program showing how to operate with compressed buffers.
To compile this program for synthetic data (default):
$ gcc -fopenmp -O3 sum_openmp.c -o sum_openmp -lblosc2
To run:
$ OMP_PROC_BIND=spread OMP_NUM_THREADS=8 ./sum_openmp
Blosc version info: 2.0.0a6.dev ($Date:: 2018-05-18 #$)
Sum for uncompressed data: 199950000000
Sum time for uncompressed data: 0.0288 s, 26459.3 MB/s
Compression ratio: 762.9 MB -> 14.0 MB (54.6x)
Compression time: 0.288 s, 2653.5 MB/s
Sum for *compressed* data: 199950000000
Sum time for *compressed* data: 0.0188 s, 40653.7 MB/s
To use real (rainfall) data:
$ gcc -DRAINFALL -fopenmp -Ofast sum_openmp.c -o sum_openmp
And running it:
$ OMP_PROC_BIND=spread OMP_NUM_THREADS=8 ./sum_openmp
Blosc version info: 2.0.0a6.dev ($Date:: 2018-05-18 #$)
Sum for uncompressed data: 29741012
Sum time for uncompressed data: 0.0149 s, 25627.4 MB/s
Compression ratio: 381.5 MB -> 71.3 MB (5.3x)
Compression time: 1.53 s, 249.1 MB/s
Sum for *compressed* data: 29741012
Sum time for *compressed* data: 0.0247 s, 15467.5 MB/s
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <sys/stat.h>
#include <errno.h>
#include <assert.h>
#include "blosc2.h"
#define KB 1024.
#define MB (1024*KB)
#define GB (1024*MB)
#define N (100 * 1000 * 1000)
#define CHUNKSIZE (16 * 1000)
#define NCHUNKS (N / CHUNKSIZE)
#define NTHREADS 8
#define NITER 5
#ifdef RAINFALL
#define SYNTHETIC false
#else
#define SYNTHETIC true
#endif
#if SYNTHETIC == true
#define DTYPE int64_t
#define CLEVEL 3
#define CODEC BLOSC_BLOSCLZ
#else
#define DTYPE float
#define CLEVEL 1
#define CODEC BLOSC_LZ4HC
#endif
int main() {
static DTYPE udata[N];
DTYPE chunk_buf[CHUNKSIZE];
size_t isize = CHUNKSIZE * sizeof(DTYPE);
DTYPE sum, compressed_sum;
int64_t nbytes, cbytes;
blosc2_cparams cparams = BLOSC2_CPARAMS_DEFAULTS;
blosc2_dparams dparams = BLOSC2_DPARAMS_DEFAULTS;
blosc2_schunk* schunk;
int i, j, nchunk;
blosc_timestamp_t last, current;
double ttotal, itotal;
printf("Blosc version info: %s (%s)\n",
BLOSC_VERSION_STRING, BLOSC_VERSION_DATE);
// Fill the buffer for a chunk
if (SYNTHETIC) {
for (j = 0; j < CHUNKSIZE; j++) {
chunk_buf[j] = j;
}
}
else {
struct stat info;
const char *filegrid = "rainfall-grid-150x150.bin";
if (stat(filegrid, &info) != 0) {
printf("Grid file %s not found!", filegrid);
exit(1);
}
char *cdata = malloc(info.st_size);
FILE *f = fopen(filegrid, "rb");
size_t blocks_read = fread(cdata, info.st_size, 1, f);
assert(blocks_read == 1);
fclose(f);
int dsize = blosc_getitem(cdata, 0, CHUNKSIZE, chunk_buf);
if (dsize < 0) {
printf("blosc_getitem() error. Error code: %d\n. Probaly reading too much data?", dsize);
exit(1);
}
free(cdata);
}
// Fill the uncompressed dataset with data chunks
for (i = 0; i < N / CHUNKSIZE; i++) {
for (j = 0; j < CHUNKSIZE; j++) {
udata[i * CHUNKSIZE + j] = chunk_buf[j];
}
}
// Reduce uncompressed dataset
ttotal = 1e10;
sum = 0;
for (int n = 0; n < NITER; n++) {
sum = 0;
blosc_set_timestamp(&last);
#pragma omp parallel for reduction (+:sum)
for (i = 0; i < N; i++) {
sum += udata[i];
}
blosc_set_timestamp(¤t);
itotal = blosc_elapsed_secs(last, current);
if (itotal < ttotal) ttotal = itotal;
}
printf("Sum for uncompressed data: %10.0f\n", (double)sum);
printf("Sum time for uncompressed data: %.3g s, %.1f MB/s\n",
ttotal, (isize * NCHUNKS) / (ttotal * (double)MB));
// Create a super-chunk container for the compressed container
cparams.typesize = sizeof(DTYPE);
cparams.compcode = CODEC;
cparams.clevel = CLEVEL;
cparams.nthreads = 1;
dparams.nthreads = 1;
blosc_set_timestamp(&last);
schunk = blosc2_new_schunk(cparams, dparams, NULL);
for (nchunk = 0; nchunk < NCHUNKS; nchunk++) {
for (i = 0; i < CHUNKSIZE; i++) {
chunk_buf[i] = udata[i + nchunk * CHUNKSIZE];
}
blosc2_schunk_append_buffer(schunk, chunk_buf, isize);
}
blosc_set_timestamp(¤t);
ttotal = blosc_elapsed_secs(last, current);
nbytes = schunk->nbytes;
cbytes = schunk->cbytes;
printf("Compression ratio: %.1f MB -> %.1f MB (%.1fx)\n",
nbytes / MB, cbytes / MB, (1. * nbytes) / cbytes);
printf("Compression time: %.3g s, %.1f MB/s\n",
ttotal, nbytes / (ttotal * MB));
int nthreads = NTHREADS;
char* envvar = getenv("OMP_NUM_THREADS");
if (envvar != NULL) {
long value;
value = strtol(envvar, NULL, 10);
if ((value != EINVAL) && (value >= 0)) {
nthreads = (int)value;
}
}
// Build buffers and contexts for computations
int nchunks_thread = NCHUNKS / nthreads;
int remaining_chunks = NCHUNKS - nchunks_thread * nthreads;
blosc2_context **dctx = malloc(nthreads * sizeof(void*));
DTYPE** chunk = malloc(nthreads * sizeof(void*));
for (j = 0; j < nthreads; j++) {
chunk[j] = malloc(CHUNKSIZE * sizeof(DTYPE));
}
// Reduce uncompressed dataset
blosc_set_timestamp(&last);
ttotal = 1e10;
compressed_sum = 0;
for (int n = 0; n < NITER; n++) {
compressed_sum = 0;
#pragma omp parallel for private(nchunk) reduction (+:compressed_sum)
for (j = 0; j < nthreads; j++) {
dctx[j] = blosc2_create_dctx(dparams);
for (nchunk = 0; nchunk < nchunks_thread; nchunk++) {
blosc2_decompress_ctx(dctx[j], schunk->data[j * nchunks_thread + nchunk],
(void*)(chunk[j]), isize);
for (i = 0; i < CHUNKSIZE; i++) {
compressed_sum += chunk[j][i];
//compressed_sum += i + (j * nchunks_thread + nchunk) * CHUNKSIZE;
}
}
}
for (nchunk = NCHUNKS - remaining_chunks; nchunk < NCHUNKS; nchunk++) {
blosc2_decompress_ctx(dctx[0], schunk->data[nchunk], (void*)(chunk[0]), isize);
for (i = 0; i < CHUNKSIZE; i++) {
compressed_sum += chunk[0][i];
//compressed_sum += i + nchunk * CHUNKSIZE;
}
}
blosc_set_timestamp(¤t);
itotal = blosc_elapsed_secs(last, current);
if (itotal < ttotal) ttotal = itotal;
}
printf("Sum for *compressed* data: %10.0f\n", (double)compressed_sum);
printf("Sum time for *compressed* data: %.3g s, %.1f MB/s\n",
ttotal, nbytes / (ttotal * MB));
//printf("sum, csum: %f, %f\n", sum, compressed_sum);
if (SYNTHETIC) {
// difficult to fulfill for single precision
assert(sum == compressed_sum);
}
/* Free resources */
blosc2_free_schunk(schunk);
return 0;
}
|
bellmanFordCompleteGraphCpuParallel.c | #include "bellmanFordCompleteGraphCpuParallel.h"
double bellmanFordParallelCpu(CompleteGraph *graph, unsigned int startVertex, unsigned int numberOfThreads) {
if (!graph || !graph->adjMatrix || !graph->predecessor || !graph->dist) {
return -1;
}
initArrays(graph->dist, graph->predecessor, graph->size);
graph->dist[startVertex] = 0;
double starttime, endtime;
bool finished;
omp_set_num_threads(numberOfThreads);
starttime = omp_get_wtime();
unsigned int y, n;
for (n = 0; n < graph->size; n++) {
finished = true;
#pragma omp parallel for
for (y = 0; y < graph->size; y++) {
for (unsigned int x = 0; x < graph->size; x++) {
float weight = graph->adjMatrix[y][x];
if (graph->dist[y] + weight < graph->dist[x]) {
graph->dist[x] = graph->dist[y] + weight;
graph->predecessor[x] = y;
finished = false;
}
}
}
if (finished) {
break;
}
}
endtime = omp_get_wtime();
return endtime - starttime;
}
|
rose_v1_jacobi_seq.c | /* An example code
*
* */
#include <stdio.h>
#include <math.h>
#include <omp.h>
void driver();
void initialize();
void jacobi();
void error_check();
#define MSIZE 200
int n;
int m;
int mits;
double tol;
double relax = 1.0;
double alpha = 0.0543;
double u[200][200];
double f[200][200];
double uold[200][200];
double dx;
double dy;
int main()
{
// float toler;
/* printf("Input n,m (< %d) - grid dimension in x,y direction:\n",MSIZE);
scanf ("%d",&n);
scanf ("%d",&m);
printf("Input tol - error tolerance for iterative solver\n");
scanf("%f",&toler);
tol=(double)toler;
printf("Input mits - Maximum iterations for solver\n");
scanf("%d",&mits);
*/
n = 200;
m = 200;
tol = 0.0000000001;
mits = 1000;
driver();
return 1;
}
/*************************************************************
* Subroutine driver ()
* This is where the arrays are allocated and initialzed.
*
* Working varaibles/arrays
* dx - grid spacing in x direction
* dy - grid spacing in y direction
*************************************************************/
void driver()
{
initialize();
/* Solve Helmholtz equation */
jacobi();
/* error_check (n,m,alpha,dx,dy,u,f) */
error_check();
}
/* subroutine initialize (n,m,alpha,dx,dy,u,f)
******************************************************
* Initializes data
* Assumes exact solution is u(x,y) = (1-x^2)*(1-y^2)
*
******************************************************/
void initialize()
{
int i;
int j;
int xx;
int yy;
// double PI = 3.1415926;
// -->dx@112:2
dx = 2.0 / (n - 1);
//-->dy@113:2
dy = 2.0 / (m - 1);
/* Initialize initial condition and RHS */
//#pragma omp parallel for private(i,j,xx,yy)
#pragma omp parallel for private (xx,yy,i,j) firstprivate (n,m)
for (i = 0; i <= n - 1; i += 1) {
#pragma omp parallel for private (xx,yy,j) firstprivate (alpha,dx,dy)
for (j = 0; j <= m - 1; j += 1) {
/* -1 < x < 1 */
xx = ((int )(- 1.0 + dx * (i - 1)));
/* -1 < y < 1 */
yy = ((int )(- 1.0 + dy * (j - 1)));
u[i][j] = 0.0;
f[i][j] = - 1.0 * alpha * (1.0 - (xx * xx)) * (1.0 - (yy * yy)) - 2.0 * (1.0 - (xx * xx)) - 2.0 * (1.0 - (yy * yy));
}
}
}
/* subroutine jacobi (n,m,dx,dy,alpha,omega,u,f,tol,maxit)
******************************************************************
* Subroutine HelmholtzJ
* Solves poisson equation on rectangular grid assuming :
* (1) Uniform discretization in each direction, and
* (2) Dirichlect boundary conditions
*
* Jacobi method is used in this routine
*
* Input : n,m Number of grid points in the X/Y directions
* dx,dy Grid spacing in the X/Y directions
* alpha Helmholtz eqn. coefficient
* omega Relaxation factor
* f(n,m) Right hand side function
* u(n,m) Dependent variable/Solution
* tol Tolerance for iterative solver
* maxit Maximum number of iterations
*
* Output : u(n,m) - Solution
*****************************************************************/
void jacobi()
{
double omega;
int i;
int j;
int k;
double error;
double resid;
double ax;
double ay;
double b;
omega = relax;
/*
* Initialize coefficients */
/* X-direction coef */
ax = 1.0 / (dx * dx);
/* Y-direction coef */
ay = 1.0 / (dy * dy);
/* Central coeff */
b = - 2.0 / (dx * dx) - 2.0 / (dy * dy) - alpha;
error = 10.0 * tol;
k = 1;
while(k <= mits && error > tol){
error = 0.0;
/* Copy new solution into old */
//#pragma omp parallel
{
//#pragma omp for private(i,j)
#pragma omp parallel for private (i,j)
for (i = 0; i <= n - 1; i += 1) {
#pragma omp parallel for private (j)
for (j = 0; j <= m - 1; j += 1) {
uold[i][j] = u[i][j];
}
}
//#pragma omp for private(i,j,resid) reduction(+:error) nowait
for (i = 1; i <= n - 1 - 1; i += 1) {
for (j = 1; j <= m - 1 - 1; j += 1) {
resid = (ax * (uold[i - 1][j] + uold[i + 1][j]) + ay * (uold[i][j - 1] + uold[i][j + 1]) + b * uold[i][j] - f[i][j]) / b;
u[i][j] = uold[i][j] - omega * resid;
error = error + resid * resid;
}
}
}
/* omp end parallel */
/* Error check */
// k = k + 1;
error = sqrt(error) / (n * m);
/* End iteration loop */
}
printf("Total Number of Iterations:%d\n",k);
printf("Residual:%E\n",error);
}
void error_check()
{
int i;
int j;
double xx;
double yy;
double temp;
double error;
dx = 2.0 / (n - 1);
dy = 2.0 / (m - 1);
error = 0.0;
//#pragma omp parallel for private(i,j,xx,yy,temp) reduction(+:error)
#pragma omp parallel for private (xx,yy,temp,i,j) reduction (+:error)
for (i = 0; i <= n - 1; i += 1) {
#pragma omp parallel for private (xx,yy,temp,j) reduction (+:error) firstprivate (dx,dy)
for (j = 0; j <= m - 1; j += 1) {
xx = - 1.0 + dx * (i - 1);
yy = - 1.0 + dy * (j - 1);
temp = u[i][j] - (1.0 - xx * xx) * (1.0 - yy * yy);
error = error + temp * temp;
}
}
error = sqrt(error) / (n * m);
printf("Solution Error :%E \n",error);
}
|
GB_unop__cos_fc64_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__cos_fc64_fc64
// op(A') function: GB_unop_tran__cos_fc64_fc64
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// cast: GxB_FC64_t cij = aij
// unaryop: cij = ccos (aij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = ccos (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = aij ; \
Cx [pC] = ccos (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_COS || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__cos_fc64_fc64
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC64_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = ccos (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = ccos (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__cos_fc64_fc64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ast-dump-openmp-begin-declare-variant_11.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify=c_mode -ast-dump %s | FileCheck %s --check-prefix=C
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify=cxx_mode -ast-dump %s -x c++| FileCheck %s --check-prefix=CXX
// c_mode-no-diagnostics
#ifdef __cplusplus
#define CONST constexpr
#else
#define CONST __attribute__((const))
#endif
#pragma omp begin declare variant match(implementation = {vendor(llvm)})
CONST int also_after1(void) { // cxx_mode-note {{previous declaration is here}}
return 0;
}
static int also_after2(void) {
return 0;
}
__attribute__((nothrow)) int also_after3(void) {
return 0;
}
static CONST __attribute__((nothrow, always_inline)) __inline__ int also_after4(void) { // cxx_mode-note {{previous declaration is here}}
return 0;
}
#pragma omp end declare variant
int also_after1(void) { // cxx_mode-error {{non-constexpr declaration of 'also_after1' follows constexpr declaration}}
return 1;
}
int also_after2(void) {
return 2;
}
int also_after3(void) {
return 3;
}
int also_after4(void) { // cxx_mode-error {{non-constexpr declaration of 'also_after4' follows constexpr declaration}}
return 4;
}
int main() {
// Should return 0.
return also_after1() + also_after2() + also_after3() + also_after4();
}
// Make sure:
// - we see the specialization in the AST
// - we pick the right callees
// C: |-FunctionDecl [[ADDR_0:0x[a-z0-9]*]] <{{.*}}, line:13:27> col:11 implicit used also_after1 'int ({{.*}})'
// C-NEXT: | |-ConstAttr [[ADDR_1:0x[a-z0-9]*]] <line:9:30>
// C-NEXT: | `-OMPDeclareVariantAttr [[ADDR_2:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)}
// C-NEXT: | `-DeclRefExpr [[ADDR_3:0x[a-z0-9]*]] <col:15> 'int ({{.*}})' Function [[ADDR_4:0x[a-z0-9]*]] 'also_after1[implementation={vendor(llvm)}]' 'int ({{.*}})'
// C-NEXT: |-FunctionDecl [[ADDR_4]] <col:15, line:15:1> line:9:15 also_after1[implementation={vendor(llvm)}] 'int ({{.*}})'
// C-NEXT: | |-CompoundStmt [[ADDR_5:0x[a-z0-9]*]] <line:13:29, line:15:1>
// C-NEXT: | | `-ReturnStmt [[ADDR_6:0x[a-z0-9]*]] <line:14:3, col:10>
// C-NEXT: | | `-IntegerLiteral [[ADDR_7:0x[a-z0-9]*]] <col:10> 'int' 0
// C-NEXT: | `-ConstAttr [[ADDR_8:0x[a-z0-9]*]] <line:9:30>
// C-NEXT: |-FunctionDecl [[ADDR_9:0x[a-z0-9]*]] <line:16:1, col:28> col:12 implicit used also_after2 'int ({{.*}})' static
// C-NEXT: | `-OMPDeclareVariantAttr [[ADDR_10:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)}
// C-NEXT: | `-DeclRefExpr [[ADDR_11:0x[a-z0-9]*]] <col:1> 'int ({{.*}})' Function [[ADDR_12:0x[a-z0-9]*]] 'also_after2[implementation={vendor(llvm)}]' 'int ({{.*}})'
// C-NEXT: |-FunctionDecl [[ADDR_12]] <col:1, line:18:1> line:16:1 also_after2[implementation={vendor(llvm)}] 'int ({{.*}})' static
// C-NEXT: | `-CompoundStmt [[ADDR_13:0x[a-z0-9]*]] <col:30, line:18:1>
// C-NEXT: | `-ReturnStmt [[ADDR_14:0x[a-z0-9]*]] <line:17:3, col:10>
// C-NEXT: | `-IntegerLiteral [[ADDR_15:0x[a-z0-9]*]] <col:10> 'int' 0
// C-NEXT: |-FunctionDecl [[ADDR_16:0x[a-z0-9]*]] <line:19:1, col:46> col:30 implicit used also_after3 'int ({{.*}})'
// C-NEXT: | |-NoThrowAttr [[ADDR_17:0x[a-z0-9]*]] <col:16>
// C-NEXT: | `-OMPDeclareVariantAttr [[ADDR_18:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)}
// C-NEXT: | `-DeclRefExpr [[ADDR_19:0x[a-z0-9]*]] <col:1> 'int ({{.*}})' Function [[ADDR_20:0x[a-z0-9]*]] 'also_after3[implementation={vendor(llvm)}]' 'int ({{.*}})'
// C-NEXT: |-FunctionDecl [[ADDR_20]] <col:1, line:21:1> line:19:1 also_after3[implementation={vendor(llvm)}] 'int ({{.*}})'
// C-NEXT: | |-CompoundStmt [[ADDR_21:0x[a-z0-9]*]] <col:48, line:21:1>
// C-NEXT: | | `-ReturnStmt [[ADDR_22:0x[a-z0-9]*]] <line:20:3, col:10>
// C-NEXT: | | `-IntegerLiteral [[ADDR_23:0x[a-z0-9]*]] <col:10> 'int' 0
// C-NEXT: | `-NoThrowAttr [[ADDR_24:0x[a-z0-9]*]] <line:19:16>
// C-NEXT: |-FunctionDecl [[ADDR_25:0x[a-z0-9]*]] <line:22:1, col:85> col:69 implicit used also_after4 'int ({{.*}})' static inline
// C-NEXT: | |-ConstAttr [[ADDR_26:0x[a-z0-9]*]] <line:9:30>
// C-NEXT: | |-NoThrowAttr [[ADDR_27:0x[a-z0-9]*]] <line:22:29>
// C-NEXT: | |-AlwaysInlineAttr [[ADDR_28:0x[a-z0-9]*]] <col:38> always_inline
// C-NEXT: | `-OMPDeclareVariantAttr [[ADDR_29:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)}
// C-NEXT: | `-DeclRefExpr [[ADDR_30:0x[a-z0-9]*]] <col:1> 'int ({{.*}})' Function [[ADDR_31:0x[a-z0-9]*]] 'also_after4[implementation={vendor(llvm)}]' 'int ({{.*}})'
// C-NEXT: |-FunctionDecl [[ADDR_31]] <col:1, line:24:1> line:22:1 also_after4[implementation={vendor(llvm)}] 'int ({{.*}})' static inline
// C-NEXT: | |-CompoundStmt [[ADDR_32:0x[a-z0-9]*]] <col:87, line:24:1>
// C-NEXT: | | `-ReturnStmt [[ADDR_33:0x[a-z0-9]*]] <line:23:3, col:10>
// C-NEXT: | | `-IntegerLiteral [[ADDR_34:0x[a-z0-9]*]] <col:10> 'int' 0
// C-NEXT: | |-ConstAttr [[ADDR_35:0x[a-z0-9]*]] <line:9:30>
// C-NEXT: | |-NoThrowAttr [[ADDR_36:0x[a-z0-9]*]] <line:22:29>
// C-NEXT: | `-AlwaysInlineAttr [[ADDR_37:0x[a-z0-9]*]] <col:38> always_inline
// C-NEXT: |-FunctionDecl [[ADDR_38:0x[a-z0-9]*]] prev [[ADDR_0]] <line:27:1, line:29:1> line:27:5 used also_after1 'int ({{.*}})'
// C-NEXT: | |-CompoundStmt [[ADDR_39:0x[a-z0-9]*]] <col:23, line:29:1>
// C-NEXT: | | `-ReturnStmt [[ADDR_40:0x[a-z0-9]*]] <line:28:3, col:10>
// C-NEXT: | | `-IntegerLiteral [[ADDR_41:0x[a-z0-9]*]] <col:10> 'int' 1
// C-NEXT: | |-ConstAttr [[ADDR_42:0x[a-z0-9]*]] <line:9:30> Inherited
// C-NEXT: | `-OMPDeclareVariantAttr [[ADDR_43:0x[a-z0-9]*]] <<invalid sloc>> Inherited Implicit implementation={vendor(llvm)}
// C-NEXT: | `-DeclRefExpr [[ADDR_3]] <col:15> 'int ({{.*}})' Function [[ADDR_4]] 'also_after1[implementation={vendor(llvm)}]' 'int ({{.*}})'
// C-NEXT: |-FunctionDecl [[ADDR_44:0x[a-z0-9]*]] prev [[ADDR_9]] <line:30:1, line:32:1> line:30:5 used also_after2 'int ({{.*}})'
// C-NEXT: | |-CompoundStmt [[ADDR_45:0x[a-z0-9]*]] <col:23, line:32:1>
// C-NEXT: | | `-ReturnStmt [[ADDR_46:0x[a-z0-9]*]] <line:31:3, col:10>
// C-NEXT: | | `-IntegerLiteral [[ADDR_47:0x[a-z0-9]*]] <col:10> 'int' 2
// C-NEXT: | `-OMPDeclareVariantAttr [[ADDR_48:0x[a-z0-9]*]] <<invalid sloc>> Inherited Implicit implementation={vendor(llvm)}
// C-NEXT: | `-DeclRefExpr [[ADDR_11]] <line:16:1> 'int ({{.*}})' Function [[ADDR_12]] 'also_after2[implementation={vendor(llvm)}]' 'int ({{.*}})'
// C-NEXT: |-FunctionDecl [[ADDR_49:0x[a-z0-9]*]] prev [[ADDR_16]] <line:33:1, line:35:1> line:33:5 used also_after3 'int ({{.*}})'
// C-NEXT: | |-CompoundStmt [[ADDR_50:0x[a-z0-9]*]] <col:23, line:35:1>
// C-NEXT: | | `-ReturnStmt [[ADDR_51:0x[a-z0-9]*]] <line:34:3, col:10>
// C-NEXT: | | `-IntegerLiteral [[ADDR_52:0x[a-z0-9]*]] <col:10> 'int' 3
// C-NEXT: | |-NoThrowAttr [[ADDR_53:0x[a-z0-9]*]] <line:19:16> Inherited
// C-NEXT: | `-OMPDeclareVariantAttr [[ADDR_54:0x[a-z0-9]*]] <<invalid sloc>> Inherited Implicit implementation={vendor(llvm)}
// C-NEXT: | `-DeclRefExpr [[ADDR_19]] <col:1> 'int ({{.*}})' Function [[ADDR_20]] 'also_after3[implementation={vendor(llvm)}]' 'int ({{.*}})'
// C-NEXT: |-FunctionDecl [[ADDR_55:0x[a-z0-9]*]] prev [[ADDR_25]] <line:36:1, line:38:1> line:36:5 used also_after4 'int ({{.*}})'
// C-NEXT: | |-CompoundStmt [[ADDR_56:0x[a-z0-9]*]] <col:23, line:38:1>
// C-NEXT: | | `-ReturnStmt [[ADDR_57:0x[a-z0-9]*]] <line:37:3, col:10>
// C-NEXT: | | `-IntegerLiteral [[ADDR_58:0x[a-z0-9]*]] <col:10> 'int' 4
// C-NEXT: | |-ConstAttr [[ADDR_59:0x[a-z0-9]*]] <line:9:30> Inherited
// C-NEXT: | |-NoThrowAttr [[ADDR_60:0x[a-z0-9]*]] <line:22:29> Inherited
// C-NEXT: | |-AlwaysInlineAttr [[ADDR_61:0x[a-z0-9]*]] <col:38> Inherited always_inline
// C-NEXT: | `-OMPDeclareVariantAttr [[ADDR_62:0x[a-z0-9]*]] <<invalid sloc>> Inherited Implicit implementation={vendor(llvm)}
// C-NEXT: | `-DeclRefExpr [[ADDR_30]] <col:1> 'int ({{.*}})' Function [[ADDR_31]] 'also_after4[implementation={vendor(llvm)}]' 'int ({{.*}})'
// C-NEXT: `-FunctionDecl [[ADDR_63:0x[a-z0-9]*]] <line:41:1, line:44:1> line:41:5 main 'int ({{.*}})'
// C-NEXT: `-CompoundStmt [[ADDR_64:0x[a-z0-9]*]] <col:12, line:44:1>
// C-NEXT: `-ReturnStmt [[ADDR_65:0x[a-z0-9]*]] <line:43:3, col:70>
// C-NEXT: `-BinaryOperator [[ADDR_66:0x[a-z0-9]*]] <col:10, col:70> 'int' '+'
// C-NEXT: |-BinaryOperator [[ADDR_67:0x[a-z0-9]*]] <col:10, col:54> 'int' '+'
// C-NEXT: | |-BinaryOperator [[ADDR_68:0x[a-z0-9]*]] <col:10, col:38> 'int' '+'
// C-NEXT: | | |-PseudoObjectExpr [[ADDR_69:0x[a-z0-9]*]] <col:10, col:22> 'int'
// C-NEXT: | | | |-CallExpr [[ADDR_70:0x[a-z0-9]*]] <col:10, col:22> 'int'
// C-NEXT: | | | | `-ImplicitCastExpr [[ADDR_71:0x[a-z0-9]*]] <col:10> 'int (*)({{.*}})' <FunctionToPointerDecay>
// C-NEXT: | | | | `-DeclRefExpr [[ADDR_72:0x[a-z0-9]*]] <col:10> 'int ({{.*}})' Function [[ADDR_38]] 'also_after1' 'int ({{.*}})'
// C-NEXT: | | | `-CallExpr [[ADDR_73:0x[a-z0-9]*]] <line:9:15, line:43:22> 'int'
// C-NEXT: | | | `-ImplicitCastExpr [[ADDR_74:0x[a-z0-9]*]] <line:9:15> 'int (*)({{.*}})' <FunctionToPointerDecay>
// C-NEXT: | | | `-DeclRefExpr [[ADDR_3]] <col:15> 'int ({{.*}})' Function [[ADDR_4]] 'also_after1[implementation={vendor(llvm)}]' 'int ({{.*}})'
// C-NEXT: | | `-PseudoObjectExpr [[ADDR_75:0x[a-z0-9]*]] <line:43:26, col:38> 'int'
// C-NEXT: | | |-CallExpr [[ADDR_76:0x[a-z0-9]*]] <col:26, col:38> 'int'
// C-NEXT: | | | `-ImplicitCastExpr [[ADDR_77:0x[a-z0-9]*]] <col:26> 'int (*)({{.*}})' <FunctionToPointerDecay>
// C-NEXT: | | | `-DeclRefExpr [[ADDR_78:0x[a-z0-9]*]] <col:26> 'int ({{.*}})' Function [[ADDR_44]] 'also_after2' 'int ({{.*}})'
// C-NEXT: | | `-CallExpr [[ADDR_79:0x[a-z0-9]*]] <line:16:1, line:43:38> 'int'
// C-NEXT: | | `-ImplicitCastExpr [[ADDR_80:0x[a-z0-9]*]] <line:16:1> 'int (*)({{.*}})' <FunctionToPointerDecay>
// C-NEXT: | | `-DeclRefExpr [[ADDR_11]] <col:1> 'int ({{.*}})' Function [[ADDR_12]] 'also_after2[implementation={vendor(llvm)}]' 'int ({{.*}})'
// C-NEXT: | `-PseudoObjectExpr [[ADDR_81:0x[a-z0-9]*]] <line:43:42, col:54> 'int'
// C-NEXT: | |-CallExpr [[ADDR_82:0x[a-z0-9]*]] <col:42, col:54> 'int'
// C-NEXT: | | `-ImplicitCastExpr [[ADDR_83:0x[a-z0-9]*]] <col:42> 'int (*)({{.*}})' <FunctionToPointerDecay>
// C-NEXT: | | `-DeclRefExpr [[ADDR_84:0x[a-z0-9]*]] <col:42> 'int ({{.*}})' Function [[ADDR_49]] 'also_after3' 'int ({{.*}})'
// C-NEXT: | `-CallExpr [[ADDR_85:0x[a-z0-9]*]] <line:19:1, line:43:54> 'int'
// C-NEXT: | `-ImplicitCastExpr [[ADDR_86:0x[a-z0-9]*]] <line:19:1> 'int (*)({{.*}})' <FunctionToPointerDecay>
// C-NEXT: | `-DeclRefExpr [[ADDR_19]] <col:1> 'int ({{.*}})' Function [[ADDR_20]] 'also_after3[implementation={vendor(llvm)}]' 'int ({{.*}})'
// C-NEXT: `-PseudoObjectExpr [[ADDR_87:0x[a-z0-9]*]] <line:43:58, col:70> 'int'
// C-NEXT: |-CallExpr [[ADDR_88:0x[a-z0-9]*]] <col:58, col:70> 'int'
// C-NEXT: | `-ImplicitCastExpr [[ADDR_89:0x[a-z0-9]*]] <col:58> 'int (*)({{.*}})' <FunctionToPointerDecay>
// C-NEXT: | `-DeclRefExpr [[ADDR_90:0x[a-z0-9]*]] <col:58> 'int ({{.*}})' Function [[ADDR_55]] 'also_after4' 'int ({{.*}})'
// C-NEXT: `-CallExpr [[ADDR_91:0x[a-z0-9]*]] <line:22:1, line:43:70> 'int'
// C-NEXT: `-ImplicitCastExpr [[ADDR_92:0x[a-z0-9]*]] <line:22:1> 'int (*)({{.*}})' <FunctionToPointerDecay>
// C-NEXT: `-DeclRefExpr [[ADDR_30]] <col:1> 'int ({{.*}})' Function [[ADDR_31]] 'also_after4[implementation={vendor(llvm)}]' 'int ({{.*}})'
// CXX: |-FunctionDecl [[ADDR_0:0x[a-z0-9]*]] <{{.*}}, line:13:27> col:11 implicit used constexpr also_after1 'int ({{.*}})'
// CXX-NEXT: | `-OMPDeclareVariantAttr [[ADDR_1:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)}
// CXX-NEXT: | `-DeclRefExpr [[ADDR_2:0x[a-z0-9]*]] <line:7:15> 'int ({{.*}})' Function [[ADDR_3:0x[a-z0-9]*]] 'also_after1[implementation={vendor(llvm)}]' 'int ({{.*}})'
// CXX-NEXT: |-FunctionDecl [[ADDR_3]] <col:15, line:15:1> line:7:15 constexpr also_after1[implementation={vendor(llvm)}] 'int ({{.*}})'
// CXX-NEXT: | `-CompoundStmt [[ADDR_4:0x[a-z0-9]*]] <line:13:29, line:15:1>
// CXX-NEXT: | `-ReturnStmt [[ADDR_5:0x[a-z0-9]*]] <line:14:3, col:10>
// CXX-NEXT: | `-IntegerLiteral [[ADDR_6:0x[a-z0-9]*]] <col:10> 'int' 0
// CXX-NEXT: |-FunctionDecl [[ADDR_7:0x[a-z0-9]*]] <line:16:1, col:28> col:12 implicit used also_after2 'int ({{.*}})' static
// CXX-NEXT: | `-OMPDeclareVariantAttr [[ADDR_8:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)}
// CXX-NEXT: | `-DeclRefExpr [[ADDR_9:0x[a-z0-9]*]] <col:1> 'int ({{.*}})' Function [[ADDR_10:0x[a-z0-9]*]] 'also_after2[implementation={vendor(llvm)}]' 'int ({{.*}})'
// CXX-NEXT: |-FunctionDecl [[ADDR_10]] <col:1, line:18:1> line:16:1 also_after2[implementation={vendor(llvm)}] 'int ({{.*}})' static
// CXX-NEXT: | `-CompoundStmt [[ADDR_11:0x[a-z0-9]*]] <col:30, line:18:1>
// CXX-NEXT: | `-ReturnStmt [[ADDR_12:0x[a-z0-9]*]] <line:17:3, col:10>
// CXX-NEXT: | `-IntegerLiteral [[ADDR_13:0x[a-z0-9]*]] <col:10> 'int' 0
// CXX-NEXT: |-FunctionDecl [[ADDR_14:0x[a-z0-9]*]] <line:19:1, col:46> col:30 implicit used also_after3 'int ({{.*}}) __attribute__((nothrow))'
// CXX-NEXT: | `-OMPDeclareVariantAttr [[ADDR_15:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)}
// CXX-NEXT: | `-DeclRefExpr [[ADDR_16:0x[a-z0-9]*]] <col:1> 'int ({{.*}}) __attribute__((nothrow))' Function [[ADDR_17:0x[a-z0-9]*]] 'also_after3[implementation={vendor(llvm)}]' 'int ({{.*}}) __attribute__((nothrow))'
// CXX-NEXT: |-FunctionDecl [[ADDR_17]] <col:1, line:21:1> line:19:1 also_after3[implementation={vendor(llvm)}] 'int ({{.*}}) __attribute__((nothrow))'
// CXX-NEXT: | `-CompoundStmt [[ADDR_18:0x[a-z0-9]*]] <col:48, line:21:1>
// CXX-NEXT: | `-ReturnStmt [[ADDR_19:0x[a-z0-9]*]] <line:20:3, col:10>
// CXX-NEXT: | `-IntegerLiteral [[ADDR_20:0x[a-z0-9]*]] <col:10> 'int' 0
// CXX-NEXT: |-FunctionDecl [[ADDR_21:0x[a-z0-9]*]] <line:22:1, col:85> col:69 implicit used constexpr also_after4 'int ({{.*}}) __attribute__((nothrow))' static inline
// CXX-NEXT: | |-AlwaysInlineAttr [[ADDR_22:0x[a-z0-9]*]] <col:38> always_inline
// CXX-NEXT: | `-OMPDeclareVariantAttr [[ADDR_23:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)}
// CXX-NEXT: | `-DeclRefExpr [[ADDR_24:0x[a-z0-9]*]] <col:1> 'int ({{.*}}) __attribute__((nothrow))' Function [[ADDR_25:0x[a-z0-9]*]] 'also_after4[implementation={vendor(llvm)}]' 'int ({{.*}}) __attribute__((nothrow))'
// CXX-NEXT: |-FunctionDecl [[ADDR_25]] <col:1, line:24:1> line:22:1 constexpr also_after4[implementation={vendor(llvm)}] 'int ({{.*}}) __attribute__((nothrow))' static inline
// CXX-NEXT: | |-CompoundStmt [[ADDR_26:0x[a-z0-9]*]] <col:87, line:24:1>
// CXX-NEXT: | | `-ReturnStmt [[ADDR_27:0x[a-z0-9]*]] <line:23:3, col:10>
// CXX-NEXT: | | `-IntegerLiteral [[ADDR_28:0x[a-z0-9]*]] <col:10> 'int' 0
// CXX-NEXT: | `-AlwaysInlineAttr [[ADDR_29:0x[a-z0-9]*]] <line:22:38> always_inline
// CXX-NEXT: |-FunctionDecl [[ADDR_30:0x[a-z0-9]*]] <line:27:1, line:29:1> line:27:5 invalid also_after1 'int ({{.*}})'
// CXX-NEXT: | |-CompoundStmt [[ADDR_31:0x[a-z0-9]*]] <col:23, line:29:1>
// CXX-NEXT: | | `-ReturnStmt [[ADDR_32:0x[a-z0-9]*]] <line:28:3, col:10>
// CXX-NEXT: | | `-IntegerLiteral [[ADDR_33:0x[a-z0-9]*]] <col:10> 'int' 1
// CXX-NEXT: | `-OMPDeclareVariantAttr [[ADDR_34:0x[a-z0-9]*]] <<invalid sloc>> Inherited Implicit implementation={vendor(llvm)}
// CXX-NEXT: | `-DeclRefExpr [[ADDR_2]] <line:7:15> 'int ({{.*}})' Function [[ADDR_3]] 'also_after1[implementation={vendor(llvm)}]' 'int ({{.*}})'
// CXX-NEXT: |-FunctionDecl [[ADDR_35:0x[a-z0-9]*]] prev [[ADDR_7]] <line:30:1, line:32:1> line:30:5 used also_after2 'int ({{.*}})'
// CXX-NEXT: | |-CompoundStmt [[ADDR_36:0x[a-z0-9]*]] <col:23, line:32:1>
// CXX-NEXT: | | `-ReturnStmt [[ADDR_37:0x[a-z0-9]*]] <line:31:3, col:10>
// CXX-NEXT: | | `-IntegerLiteral [[ADDR_38:0x[a-z0-9]*]] <col:10> 'int' 2
// CXX-NEXT: | `-OMPDeclareVariantAttr [[ADDR_39:0x[a-z0-9]*]] <<invalid sloc>> Inherited Implicit implementation={vendor(llvm)}
// CXX-NEXT: | `-DeclRefExpr [[ADDR_9]] <line:16:1> 'int ({{.*}})' Function [[ADDR_10]] 'also_after2[implementation={vendor(llvm)}]' 'int ({{.*}})'
// CXX-NEXT: |-FunctionDecl [[ADDR_40:0x[a-z0-9]*]] prev [[ADDR_14]] <line:33:1, line:35:1> line:33:5 used also_after3 'int ({{.*}})'
// CXX-NEXT: | |-CompoundStmt [[ADDR_41:0x[a-z0-9]*]] <col:23, line:35:1>
// CXX-NEXT: | | `-ReturnStmt [[ADDR_42:0x[a-z0-9]*]] <line:34:3, col:10>
// CXX-NEXT: | | `-IntegerLiteral [[ADDR_43:0x[a-z0-9]*]] <col:10> 'int' 3
// CXX-NEXT: | `-OMPDeclareVariantAttr [[ADDR_44:0x[a-z0-9]*]] <<invalid sloc>> Inherited Implicit implementation={vendor(llvm)}
// CXX-NEXT: | `-DeclRefExpr [[ADDR_16]] <line:19:1> 'int ({{.*}}) __attribute__((nothrow))' Function [[ADDR_17]] 'also_after3[implementation={vendor(llvm)}]' 'int ({{.*}}) __attribute__((nothrow))'
// CXX-NEXT: |-FunctionDecl [[ADDR_45:0x[a-z0-9]*]] <line:36:1, line:38:1> line:36:5 invalid also_after4 'int ({{.*}})'
// CXX-NEXT: | |-CompoundStmt [[ADDR_46:0x[a-z0-9]*]] <col:23, line:38:1>
// CXX-NEXT: | | `-ReturnStmt [[ADDR_47:0x[a-z0-9]*]] <line:37:3, col:10>
// CXX-NEXT: | | `-IntegerLiteral [[ADDR_48:0x[a-z0-9]*]] <col:10> 'int' 4
// CXX-NEXT: | |-AlwaysInlineAttr [[ADDR_49:0x[a-z0-9]*]] <line:22:38> Inherited always_inline
// CXX-NEXT: | `-OMPDeclareVariantAttr [[ADDR_50:0x[a-z0-9]*]] <<invalid sloc>> Inherited Implicit implementation={vendor(llvm)}
// CXX-NEXT: | `-DeclRefExpr [[ADDR_24]] <col:1> 'int ({{.*}}) __attribute__((nothrow))' Function [[ADDR_25]] 'also_after4[implementation={vendor(llvm)}]' 'int ({{.*}}) __attribute__((nothrow))'
// CXX-NEXT: `-FunctionDecl [[ADDR_51:0x[a-z0-9]*]] <line:41:1, line:44:1> line:41:5 main 'int ({{.*}})'
// CXX-NEXT: `-CompoundStmt [[ADDR_52:0x[a-z0-9]*]] <col:12, line:44:1>
// CXX-NEXT: `-ReturnStmt [[ADDR_53:0x[a-z0-9]*]] <line:43:3, col:70>
// CXX-NEXT: `-BinaryOperator [[ADDR_54:0x[a-z0-9]*]] <col:10, col:70> 'int' '+'
// CXX-NEXT: |-BinaryOperator [[ADDR_55:0x[a-z0-9]*]] <col:10, col:54> 'int' '+'
// CXX-NEXT: | |-BinaryOperator [[ADDR_56:0x[a-z0-9]*]] <col:10, col:38> 'int' '+'
// CXX-NEXT: | | |-PseudoObjectExpr [[ADDR_57:0x[a-z0-9]*]] <col:10, col:22> 'int'
// CXX-NEXT: | | | |-CallExpr [[ADDR_58:0x[a-z0-9]*]] <col:10, col:22> 'int'
// CXX-NEXT: | | | | `-ImplicitCastExpr [[ADDR_59:0x[a-z0-9]*]] <col:10> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CXX-NEXT: | | | | `-DeclRefExpr [[ADDR_60:0x[a-z0-9]*]] <col:10> 'int ({{.*}})' {{.*}}Function [[ADDR_0]] 'also_after1' 'int ({{.*}})'
// CXX-NEXT: | | | `-CallExpr [[ADDR_61:0x[a-z0-9]*]] <line:7:15, line:43:22> 'int'
// CXX-NEXT: | | | `-ImplicitCastExpr [[ADDR_62:0x[a-z0-9]*]] <line:7:15> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CXX-NEXT: | | | `-DeclRefExpr [[ADDR_2]] <col:15> 'int ({{.*}})' Function [[ADDR_3]] 'also_after1[implementation={vendor(llvm)}]' 'int ({{.*}})'
// CXX-NEXT: | | `-PseudoObjectExpr [[ADDR_63:0x[a-z0-9]*]] <line:43:26, col:38> 'int'
// CXX-NEXT: | | |-CallExpr [[ADDR_64:0x[a-z0-9]*]] <col:26, col:38> 'int'
// CXX-NEXT: | | | `-ImplicitCastExpr [[ADDR_65:0x[a-z0-9]*]] <col:26> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CXX-NEXT: | | | `-DeclRefExpr [[ADDR_66:0x[a-z0-9]*]] <col:26> 'int ({{.*}})' {{.*}}Function [[ADDR_35]] 'also_after2' 'int ({{.*}})'
// CXX-NEXT: | | `-CallExpr [[ADDR_67:0x[a-z0-9]*]] <line:16:1, line:43:38> 'int'
// CXX-NEXT: | | `-ImplicitCastExpr [[ADDR_68:0x[a-z0-9]*]] <line:16:1> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CXX-NEXT: | | `-DeclRefExpr [[ADDR_9]] <col:1> 'int ({{.*}})' Function [[ADDR_10]] 'also_after2[implementation={vendor(llvm)}]' 'int ({{.*}})'
// CXX-NEXT: | `-PseudoObjectExpr [[ADDR_69:0x[a-z0-9]*]] <line:43:42, col:54> 'int'
// CXX-NEXT: | |-CallExpr [[ADDR_70:0x[a-z0-9]*]] <col:42, col:54> 'int'
// CXX-NEXT: | | `-ImplicitCastExpr [[ADDR_71:0x[a-z0-9]*]] <col:42> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CXX-NEXT: | | `-DeclRefExpr [[ADDR_72:0x[a-z0-9]*]] <col:42> 'int ({{.*}})' {{.*}}Function [[ADDR_40]] 'also_after3' 'int ({{.*}})'
// CXX-NEXT: | `-CallExpr [[ADDR_73:0x[a-z0-9]*]] <line:19:1, line:43:54> 'int'
// CXX-NEXT: | `-ImplicitCastExpr [[ADDR_74:0x[a-z0-9]*]] <line:19:1> 'int (*)({{.*}}) __attribute__((nothrow))' <FunctionToPointerDecay>
// CXX-NEXT: | `-DeclRefExpr [[ADDR_16]] <col:1> 'int ({{.*}}) __attribute__((nothrow))' Function [[ADDR_17]] 'also_after3[implementation={vendor(llvm)}]' 'int ({{.*}}) __attribute__((nothrow))'
// CXX-NEXT: `-PseudoObjectExpr [[ADDR_75:0x[a-z0-9]*]] <line:43:58, col:70> 'int'
// CXX-NEXT: |-CallExpr [[ADDR_76:0x[a-z0-9]*]] <col:58, col:70> 'int'
// CXX-NEXT: | `-ImplicitCastExpr [[ADDR_77:0x[a-z0-9]*]] <col:58> 'int (*)({{.*}}) __attribute__((nothrow))' <FunctionToPointerDecay>
// CXX-NEXT: | `-DeclRefExpr [[ADDR_78:0x[a-z0-9]*]] <col:58> 'int ({{.*}}) __attribute__((nothrow))' {{.*}}Function [[ADDR_21]] 'also_after4' 'int ({{.*}}) __attribute__((nothrow))'
// CXX-NEXT: `-CallExpr [[ADDR_79:0x[a-z0-9]*]] <line:22:1, line:43:70> 'int'
// CXX-NEXT: `-ImplicitCastExpr [[ADDR_80:0x[a-z0-9]*]] <line:22:1> 'int (*)({{.*}}) __attribute__((nothrow))' <FunctionToPointerDecay>
// CXX-NEXT: `-DeclRefExpr [[ADDR_24]] <col:1> 'int ({{.*}}) __attribute__((nothrow))' Function [[ADDR_25]] 'also_after4[implementation={vendor(llvm)}]' 'int ({{.*}}) __attribute__((nothrow))'
|
octree_mex.c | /*
Copyright (c) 2012 by Marcin Krotkiewski, University of Oslo
See ../License.txt for License Agreement.
*/
/* libutils headers */
#include <libutils/config.h>
#include <libutils/utils.h>
#include <libutils/parallel.h>
#include <libmatlab/mesh.h>
#include <libmatlab/mexparams.h>
/* system headers */
#include <stdlib.h>
#include <stdio.h>
#include "octree_opts.h"
/* maximum integer value that can be stored in a double */
/* such that all smaller integer values can also be stored in a double */
#define MAX_TREE_DEPTH MIN(53, (sizeof(Uint)*CHAR_BIT))
#define ROOT_DEPTH (MAX_TREE_DEPTH-1)
#define MAX_VAL (double)(MaxUint ^ (MaxUint<<ROOT_DEPTH))
#define EMPTY_ELID ((dimType)-1)
#ifndef NDIM
#define NDIM 3
#endif
/* different string constants for quadtrees and octrees */
#if NDIM==3
#define QTREE_STR_ID "oTreeMP"
#define QTREE_STR "otree"
#define QUADTREE_STR "octree"
#define QUADRANT_STR "octant"
#else
#define QTREE_STR_ID "qTreeMP"
#define QTREE_STR "qtree"
#define QUADTREE_STR "quadtree"
#define QUADRANT_STR "quadrant"
#endif
Double MACHEPS;
Int vtk_write2d(char *model_name, dimType *elems, Double *nodes, dimType *celldata,
dimType nnod, dimType nel, dimType nnodel);
Int vtk_write3d(char *model_name, dimType *elems, Double *nodes, dimType *celldata,
dimType nnod, dimType nel, dimType nnodel);
#if NDIM==2
#define NCHILDREN 4
typedef struct {
Double x, y;
} t_node_coords;
#else
#define NCHILDREN 8
typedef struct {
Double x, y, z;
} t_node_coords;
#endif
/* quadrant structure */
#define EMPTY_QUADRANT ((dimType)-1)
typedef struct _t_quadrant t_quadrant;
struct _t_quadrant {
Uint x_code;
Uint y_code;
#if NDIM==3
Uint z_code;
#endif
Uint level;
size_t parent;
size_t children[NCHILDREN];
dimType n_points; /* how many points are in the quadrant */
dimType point_id[]; /* point id of the points in quadrant. */
/* this thing is actually an array of n_leaf_points point ids */
};
/* quadtree structure */
typedef struct {
char name[8];
dimType n_leaves;
dimType n_quadrants;
dimType n_leaf_points;
size_t quadrant_size;
dimType n_points;
Double xmin, xmax, iextentx;
Double ymin, ymax, iextenty;
#if NDIM==3
Double zmin, zmax, iextentz;
#endif
} t_quadtree;
size_t header_size = sizeof(t_quadtree);
/* memory pool structure */
typedef struct {
/* complete memory allocated, including the header */
/* and the subsequent list of quadrants pointed to by base_ptr */
char *head_ptr;
/* base_ptr is the pointer to the quadrant array */
/* We can not use t_quadrant since the type definition is incomplete, */
/* therefore pointer arithmetic on t_quadrant* is not defined */
char *base_ptr;
/* quadrant_size (size of the t_quadrant structure) */
/* depends on the n_leaf_points specified at runtime */
size_t quadrant_size;
size_t size;
size_t realloc_size;
dimType ptr;
} t_mempool;
#define EMPTY_MEMPOOL_STRUCT {NULL,NULL,0,0,0,0}
/* global variables */
/* search statistics */
static dimType n_leaves = 1;
static Ulong n_elems_searched = 0;
static Double avg_elems_searched = 0;
static Ulong n_max_elems_searched = 0;
/* lists of elements to be searched */
/* while looking for the element containing a marker */
static Uint nlists = 0;
static dimType *slist[1024] = {0};
static size_t *slist_size[1024] = {0};
static Uint initialized = 0;
/* free list structure */
void quadtree_mex_cleanup(void) {
Uint i;
for(i=0; i<nlists; i++) {
if(slist[i]) {
mfree(slist[i], sizeof(dimType)*slist_size[i][0]);
}
}
}
/* compute the quadrant pointer from the base memory pool address */
/* and quadrant offset */
#define CHILD_POINTER(node, n, mempool) \
(t_quadrant*)(mempool->base_ptr + node->children[n])
/* allocate and initialize new leaf quadrant */
/* reallocate memory pool if necessary */
#if NDIM==3
STATIC INLINE void create_child(t_quadrant **dest, Uint n, Uint _x_code, Uint _y_code, Uint _z_code, t_mempool *mempool)
#else
STATIC INLINE void create_child(t_quadrant **dest, Uint n, Uint _x_code, Uint _y_code, t_mempool *mempool)
#endif
{
t_quadrant *child = NULL;
size_t offset = (char*)dest[0] - mempool->base_ptr;
if(mempool->ptr == mempool->size){
mempool->size += mempool->realloc_size;
mrealloc(mempool->head_ptr,
header_size + mempool->size*mempool->quadrant_size,
mempool->realloc_size*mempool->quadrant_size);
mempool->base_ptr = mempool->head_ptr + header_size;
dest[0] = (t_quadrant*)(mempool->base_ptr + offset);
}
dest[0]->children[n] = (size_t)mempool->ptr*mempool->quadrant_size;
mempool->ptr++;
child = CHILD_POINTER(dest[0], n, mempool);
child->x_code = _x_code;
child->y_code = _y_code;
#if NDIM==3
child->z_code = _z_code;
#endif
child->level = dest[0]->level-1;
child->parent = offset;
child->children[0]= (size_t)EMPTY_QUADRANT;
child->n_points = 0;
child->point_id[0]= EMPTY_ELID;
}
t_quadrant *quadtree_locate_codes(t_quadrant *dest, t_node_coords coords, t_mempool *mempool)
{
/* node coordinates out of bounds - point outside of the quadrant */
Double x = (Double)dest->x_code/MAX_VAL;
Double y = (Double)dest->y_code/MAX_VAL;
Uint x_code;
Uint y_code;
#if NDIM==3
Double z = (Double)dest->z_code/MAX_VAL;
Uint z_code;
#endif
Double d = 1.0/(Double)(MAX_TREE_DEPTH-dest->level);
Uint level;
Uint bit;
Uint child;
/* check if the node belongs to this quadrant, or any of its children */
if(coords.x<x || coords.x>x+d ||
coords.y<y || coords.y>y+d) return NULL;
/* fix the case where point is located at the domain boundary */
if(coords.x==1.0) coords.x = coords.x-MACHEPS;
if(coords.y==1.0) coords.y = coords.y-MACHEPS;
x_code = (Uint)(coords.x*MAX_VAL);
y_code = (Uint)(coords.y*MAX_VAL);
/* the same for the Z-dimension */
#if NDIM==3
if(coords.z<z || coords.z>z+d) return NULL;
if(coords.z==1.0) coords.z = coords.z-MACHEPS;
z_code = (Uint)(coords.z*MAX_VAL);
#endif
level = dest->level-1;
bit = (Uint)1 << level;
while((dest)->children[0] != EMPTY_QUADRANT){
#if NDIM==3
child =
((x_code & bit) !=0) |
((y_code & bit) !=0)<<1 |
((z_code & bit) !=0)<<2 ;
#else
child =
((x_code & bit) !=0) |
((y_code & bit) !=0)<<1;
#endif
dest = CHILD_POINTER(dest, child, mempool);
bit >>= 1;
}
return dest;
}
/* Incrementally build a quadtree from nodes. */
/* Add nodes in sequence, quadtree structure is refined in the process. */
/* Internally the wuadtree structure is built from a normalized domain, */
/* i.e., coordinates from [0,1]. The coordinates of added points */\
/* are normalized as we go. */
void quadtree_add_node(t_quadrant *dest, Double *nodes, dimType point_id, dimType n_leaf_points,
dimType *n_qtree_points, t_mempool *mempool)
{
t_node_coords coords;
coords.x = nodes[point_id*NDIM+0];
coords.y = nodes[point_id*NDIM+1];
#if NDIM==3
coords.z = nodes[point_id*NDIM+2];
#endif
{
/* normalize coordinates */
t_quadtree *tree = (t_quadtree *)mempool->head_ptr;
coords.x = (coords.x - tree->xmin)*tree->iextentx; //(tree->xmax - tree->xmin);
coords.y = (coords.y - tree->ymin)*tree->iextenty; //(tree->ymax - tree->ymin);
#if NDIM==3
coords.z = (coords.z - tree->zmin)*tree->iextentz; //(tree->zmax - tree->zmin);
#endif
}
/* look for the destination quadrant */
if(dest->children[0] != EMPTY_QUADRANT){
dest = quadtree_locate_codes(dest, coords, mempool);
}
/* nothing to do - point outside of the quadrant domain */
if(!dest) {
#if NDIM==3
USERERROR(QTREE_STR": point outside of domain: (%lf, %lf, %lf)", MUTILS_INVALID_PARAMETER,
nodes[point_id*NDIM+0], nodes[point_id*NDIM+1], nodes[point_id*NDIM+2]);
#else
USERERROR(QTREE_STR": point outside of domain: (%lf, %lf)", MUTILS_INVALID_PARAMETER,
nodes[point_id*NDIM+0], nodes[point_id*NDIM+1]);
#endif
return;
}
/* if the quadrant has free space simply add the node */
if(dest->n_points < n_leaf_points){
dest->point_id[dest->n_points++] = point_id;
(*n_qtree_points)++;
return;
}
/* safequard - quadtree maximum level exceeded */
if(dest->level==0) {
USERWARNING(QTREE_STR": maximum tree level exceeded (too much local refinement).\n Point %"PRI_DIMTYPE" not added to quadtree.",
MUTILS_INTEGER_OVERFLOW, point_id);
return;
}
/* split the leaf (dest) and reassign the nodes to new quadtree leaves */
/* do not clear the node information in the parent */
/* useful when leaves are empty and we want to have */
/* some information about nearby points/elements during search */
{
Uint bit = (Uint)1<<(dest->level-1);
#if NDIM==2
create_child((&dest), 0, (dest->x_code) , (dest->y_code) , mempool);
create_child((&dest), 1, (dest->x_code) | bit, (dest->y_code) , mempool);
create_child((&dest), 2, (dest->x_code) , (dest->y_code) | bit, mempool);
create_child((&dest), 3, (dest->x_code) | bit, (dest->y_code) | bit, mempool);
/* update number of leaves */
n_leaves += 3;
#else
create_child((&dest), 0, (dest->x_code) , (dest->y_code) , (dest->z_code) , mempool);
create_child((&dest), 1, (dest->x_code) | bit, (dest->y_code) , (dest->z_code) , mempool);
create_child((&dest), 2, (dest->x_code) , (dest->y_code) | bit, (dest->z_code) , mempool);
create_child((&dest), 3, (dest->x_code) | bit, (dest->y_code) | bit, (dest->z_code) , mempool);
create_child((&dest), 4, (dest->x_code) , (dest->y_code) , (dest->z_code) | bit, mempool);
create_child((&dest), 5, (dest->x_code) | bit, (dest->y_code) , (dest->z_code) | bit, mempool);
create_child((&dest), 6, (dest->x_code) , (dest->y_code) | bit, (dest->z_code) | bit, mempool);
create_child((&dest), 7, (dest->x_code) | bit, (dest->y_code) | bit, (dest->z_code) | bit, mempool);
/* update number of leaves */
n_leaves += 7;
#endif
}
/* add the old nodes directly to the correct child quadrant */
{
dimType ptid;
t_quadrant *child;
Uint childid;
Uint bit = (Uint)1 << (dest->level-1);
Uint x_code, y_code;
#if NDIM==3
Uint z_code;
#endif
/* NOTE: memory pool might have been reallocated, refresh tree pointer! */
t_quadtree *tree = (t_quadtree *)mempool->head_ptr;
for(ptid=0; ptid<n_leaf_points; ptid++){
coords.x = nodes[(size_t)dest->point_id[ptid]*NDIM+0];
coords.y = nodes[(size_t)dest->point_id[ptid]*NDIM+1];
/* normalize coordinates */
coords.x = (coords.x - tree->xmin)*tree->iextentx; //(tree->xmax - tree->xmin);
coords.y = (coords.y - tree->ymin)*tree->iextenty; //(tree->ymax - tree->ymin);
x_code = (Uint)(coords.x*MAX_VAL);
y_code = (Uint)(coords.y*MAX_VAL);
#if NDIM==3
coords.z = nodes[(size_t)dest->point_id[ptid]*NDIM+2];
coords.z = (coords.z - tree->zmin)*tree->iextentz; //(tree->zmax - tree->zmin);
z_code = (Uint)(coords.z*MAX_VAL);
childid =
((x_code & bit) !=0) |
((y_code & bit) !=0)<<1 |
((z_code & bit) !=0)<<2 ;
#else
childid =
((x_code & bit) !=0) |
((y_code & bit) !=0)<<1;
#endif
child = CHILD_POINTER(dest, childid, mempool);
child->point_id[child->n_points++] = dest->point_id[ptid];
}
}
/* add the new node recursively */
quadtree_add_node(dest, nodes, point_id, n_leaf_points, n_qtree_points, mempool);
}
/* linearize the quadtree - extract leaves in Z-curve ordering */
#ifdef _MSC_VER
#pragma auto_inline(off)
#endif
void quadtree_extract_leaves(t_quadrant *dest, t_quadrant **tree_leaves, dimType *itree_leaves, t_mempool *mempool)
{
/* store the leaves */
if(dest->children[0]==EMPTY_QUADRANT){
tree_leaves[*itree_leaves] = dest;
(*itree_leaves)++;
return;
}
/* traverse the subtrees */
quadtree_extract_leaves(CHILD_POINTER(dest, 0, mempool), tree_leaves, itree_leaves, mempool);
quadtree_extract_leaves(CHILD_POINTER(dest, 1, mempool), tree_leaves, itree_leaves, mempool);
quadtree_extract_leaves(CHILD_POINTER(dest, 2, mempool), tree_leaves, itree_leaves, mempool);
quadtree_extract_leaves(CHILD_POINTER(dest, 3, mempool), tree_leaves, itree_leaves, mempool);
#if NDIM==3
quadtree_extract_leaves(CHILD_POINTER(dest, 4, mempool), tree_leaves, itree_leaves, mempool);
quadtree_extract_leaves(CHILD_POINTER(dest, 5, mempool), tree_leaves, itree_leaves, mempool);
quadtree_extract_leaves(CHILD_POINTER(dest, 6, mempool), tree_leaves, itree_leaves, mempool);
quadtree_extract_leaves(CHILD_POINTER(dest, 7, mempool), tree_leaves, itree_leaves, mempool);
#endif
}
/* linearize the quadtree - extract points in Z-curve ordering */
#ifdef _MSC_VER
#pragma auto_inline(off)
#endif
void quadtree_extract_points(t_quadrant *dest, dimType *points, dimType *points_ptr, t_mempool *mempool)
{
dimType i;
/* copy point data from the leaves */
if(dest->children[0]==EMPTY_QUADRANT){
if(dest->n_points){
for(i=0; i<dest->n_points; i++){
points[(*points_ptr)+i] = dest->point_id[i]+ONE_BASED_INDEX;
}
(*points_ptr) += dest->n_points;
}
return;
}
/* traverse the subtrees */
quadtree_extract_points(CHILD_POINTER(dest, 0, mempool), points, points_ptr, mempool);
quadtree_extract_points(CHILD_POINTER(dest, 1, mempool), points, points_ptr, mempool);
quadtree_extract_points(CHILD_POINTER(dest, 2, mempool), points, points_ptr, mempool);
quadtree_extract_points(CHILD_POINTER(dest, 3, mempool), points, points_ptr, mempool);
#if NDIM==3
quadtree_extract_points(CHILD_POINTER(dest, 4, mempool), points, points_ptr, mempool);
quadtree_extract_points(CHILD_POINTER(dest, 5, mempool), points, points_ptr, mempool);
quadtree_extract_points(CHILD_POINTER(dest, 6, mempool), points, points_ptr, mempool);
quadtree_extract_points(CHILD_POINTER(dest, 7, mempool), points, points_ptr, mempool);
#endif
}
#define ENQUEUE_ERR_MSG "Integer overflow in quadtree_locate_element at memory reallocation."
#define ENQUEUE_NEIGHBOR(n) \
if((n)!=EMPTY_ELID){ \
if(thr_slist_nel==thr_slist_size){ \
size_t size; \
uint64_t temp; \
safemult_u(sizeof(dimType), 2, temp, ENQUEUE_ERR_MSG); \
safemult_u(temp, thr_slist_size, temp, ENQUEUE_ERR_MSG); \
managed_type_cast(size_t, size, temp, ENQUEUE_ERR_MSG); \
mrealloc(slist[thrid],sizeof(dimType)*2*thr_slist_size, sizeof(dimType)*thr_slist_size); \
slist_size[thrid][0] *= 2; \
thr_slist = slist[thrid]; \
thr_slist_size = slist_size[thrid][0]; \
} \
thr_slist[thr_slist_nel++] = (n); \
} \
#if NDIM==2
#include "point_in_triangle.c"
#else
#include "point_in_tetrahedron.c"
#endif
/***********************************************************/
/* MATLAB INTERFACE */
/***********************************************************/
mxArray *qtree2mex(t_quadtree *tree, size_t tree_size){
#define n_fieldnames 5
const char *fieldnames[n_fieldnames] = {QTREE_STR, "n_leaves", "n_leaf_points", "n_"QUADRANT_STR"s", "n_points"};
mxArray *outp = mxCreateStructMatrix(1, 1, n_fieldnames, fieldnames);
mxArray *field;
Uint n = 0;
mxClassID class_id;
field = mxCreateNumericMatrix(0, 0, mxUINT8_CLASS,mxREAL);
mxSetData(field, (void*)tree);
mxSetN(field, 1);
mxSetM(field, tree_size);
mxSetField(outp, 0, fieldnames[n++], field);
get_matlab_class(dimType, class_id);
field = mxCreateNumericMatrix(1,1,class_id,mxREAL);
((dimType*)mxGetData(field))[0] = tree->n_leaves;
mxSetField(outp, 0, fieldnames[n++], field);
field = mxCreateNumericMatrix(1,1,class_id,mxREAL);
((dimType*)mxGetData(field))[0] = tree->n_leaf_points;
mxSetField(outp, 0, fieldnames[n++], field);
field = mxCreateNumericMatrix(1,1,class_id,mxREAL);
((dimType*)mxGetData(field))[0] = tree->n_quadrants;
mxSetField(outp, 0, fieldnames[n++], field);
field = mxCreateNumericMatrix(1,1,class_id,mxREAL);
((dimType*)mxGetData(field))[0] = tree->n_points;
mxSetField(outp, 0, fieldnames[n++], field);
return outp;
}
t_quadtree* mex2qtree(const mxArray *qtree_struct){
mxArray *field;
t_quadtree *qtree;
if(!mxIsStruct(qtree_struct)){
USERERROR(QTREE_STR"_struct is not a structure", MUTILS_INVALID_PARAMETER);
}
/* quadtree memory pointer */
field = mxGetField(qtree_struct, 0, QTREE_STR);
if(!field){
USERERROR(QTREE_STR"_struct is not a valid "QUADTREE_STR, MUTILS_INVALID_PARAMETER);
}
qtree = (t_quadtree*)mxGetData(field);
/* verify the contents - memory area header */
qtree->name[7] = 0;
if(strcmp(qtree->name, QTREE_STR_ID)){
USERERROR(QTREE_STR"_struct is not a valid "QUADTREE_STR" - invalid header", MUTILS_INVALID_PARAMETER);
}
return qtree;
}
mxArray *mex_quadtree_create(int nargin, const mxArray *pargin[])
{
size_t m, n;
char buff[256];
Double *points = NULL;
dimType n_points;
dimType n_dim;
dimType i;
int arg = 1;
dimType n_leaf_points;
/* domain size */
Double xmin, xmax;
Double ymin, ymax;
#if NDIM==3
Double zmin, zmax;
#endif
t_mempool mempool = {NULL,NULL,0,0,0,0};
size_t initial_size = 0;
t_quadtree *qtree = NULL;
t_quadrant *root = NULL;
dimType n_qtree_points = 0;
size_t pow2m1;
if(!initialized){
initialized = 1;
mexAtExit(quadtree_mex_cleanup);
}
#if NDIM==3
if(nargin<8){
USERERROR("Usage: "QUADTREE_STR" = "QUADTREE_STR"('create', POINTS, xmin, xmax, ymin, ymax, zmin, zmax, [max_points_in_leaf])", MUTILS_INVALID_PARAMETER);
}
#else
if(nargin<6){
USERERROR("Usage: "QUADTREE_STR" = "QUADTREE_STR"('create', POINTS, xmin, xmax, ymin, ymax, [max_points_in_leaf])", MUTILS_INVALID_PARAMETER);
}
#endif
/* POINTS */
{
char _buff[10];
sprintf(_buff, "%d", NDIM);
m = NDIM;
n = 0;
points = mex_get_matrix(Double, pargin[arg++], &m, &n, "POINTS", _buff, "number of points", 0);
}
SNPRINTF(buff, 255, "No dimensions of 'POINTS' can be larger than %"PRI_DIMTYPE, MaxDimType);
managed_type_cast(dimType, n_dim, m, buff);
managed_type_cast(dimType, n_points, n, buff);
/* domain extents */
m = 1;
xmin = mex_get_matrix(Double, pargin[arg++], &m, &m, "xmin", "1", "1", 0)[0];
xmax = mex_get_matrix(Double, pargin[arg++], &m, &m, "xmax", "1", "1", 0)[0];
ymin = mex_get_matrix(Double, pargin[arg++], &m, &m, "ymin", "1", "1", 0)[0];
ymax = mex_get_matrix(Double, pargin[arg++], &m, &m, "ymax", "1", "1", 0)[0];
#if NDIM==3
zmin = mex_get_matrix(Double, pargin[arg++], &m, &m, "zmin", "1", "1", 0)[0];
zmax = mex_get_matrix(Double, pargin[arg++], &m, &m, "zmax", "1", "1", 0)[0];
#endif
/* maximum number of points in quadrant */
if(nargin>arg){
n_leaf_points = mex_get_integer_scalar(dimType, pargin[arg++], "max_points_in_leaf", 0, 0);
arg++;
} else {
n_leaf_points = 1;
}
if(n_leaf_points>n_points) n_leaf_points = n_points;
if(n_leaf_points<1) n_leaf_points = 1;
tic();
/* setup the memory pool */
/* Allocate roughly the correct amount of memory */
/* for the case when points are spread uniformly in space. */
pow2m1 = pow2m1_roundup(n_leaf_points);
initial_size = (size_t)n_points*2/(pow2m1+1);
mempool.size = initial_size;
mempool.realloc_size = mempool.size/2;
mempool.ptr = 1;
mempool.quadrant_size = sizeof(t_quadrant) + sizeof(dimType)*n_leaf_points;
mcalloc(mempool.head_ptr, header_size + mempool.size*mempool.quadrant_size);
mempool.base_ptr = mempool.head_ptr + header_size;
/* set real domain dimensions for coordinate normalization */
qtree = (t_quadtree*)mempool.head_ptr;
qtree->xmin = xmin;
qtree->xmax = xmax;
qtree->ymin = ymin;
qtree->ymax = ymax;
qtree->iextentx = 1.0/(qtree->xmax - qtree->xmin);
qtree->iextenty = 1.0/(qtree->ymax - qtree->ymin);
#if NDIM==3
qtree->zmin = zmin;
qtree->zmax = zmax;
qtree->iextentz = 1.0/(qtree->zmax - qtree->zmin);
#endif
/* add root quadrant */
root = (t_quadrant*)mempool.base_ptr;
root->level = ROOT_DEPTH;
root->x_code = 0;
root->y_code = 0;
#if NDIM==3
root->z_code = 0;
#endif
root->n_points = 0;
root->children[0] = (size_t)EMPTY_QUADRANT;
root->parent = (size_t)EMPTY_QUADRANT;
n_leaves = 1;
/* run */
n_qtree_points = 0;
for(i=0; i<n_points; i++){
/* memory pool can be reallocated in quadtree_add_node */
root = (t_quadrant*)mempool.base_ptr;
quadtree_add_node(root, points, i, n_leaf_points, &n_qtree_points, &mempool);
}
ntoc("actual work");
/* fill the memory header */
qtree = (t_quadtree*)mempool.head_ptr;
strncpy(qtree->name, QTREE_STR_ID, 8);
qtree->n_leaves = n_leaves;
qtree->n_quadrants = mempool.ptr;
qtree->n_leaf_points = n_leaf_points;
qtree->quadrant_size = mempool.quadrant_size;
qtree->n_points = n_qtree_points;
#if 0
if(n_qtree_points != n_points){
#if NDIM==3
MESSAGE("Some of the points were outside of the specified domain:\n\n\
(xmin=%.1e, xmax=%.1e, ymin=%.1e, ymax=%.1e, zmin=%.1e, zmax=%.1e)\n\n\
and were not added to the "QUADTREE_STR".\n \
Please specify correct domain extents.", xmin, xmax, ymin, ymax, zmin, zmax);
#else
MESSAGE("Some of the points were outside of the specified domain:\n\n\
(xmin=%.1e, xmax=%.1e, ymin=%.1e, ymax=%.1e)\n\nand were not added to the "QUADTREE_STR".\n \
Please specify correct domain extents.", xmin, xmax, ymin, ymax);
#endif
}
#endif
/* reallocate memory using MATLAB's allocation routines */
{
t_quadtree *_qtree;
mmalloc_global(_qtree, header_size + mempool.size*mempool.quadrant_size);
memcpy(_qtree, qtree, header_size + mempool.size*mempool.quadrant_size);
mfree(qtree, header_size + mempool.size*mempool.quadrant_size);
qtree = _qtree;
mpersistent(qtree, header_size + mempool.size*mempool.quadrant_size);
}
return qtree2mex(qtree, header_size + mempool.size*mempool.quadrant_size);
}
mxArray *mex_quadtree_locate(int nargin, const mxArray *pargin[])
{
size_t m, n;
char buff[256];
Uint arg = 1;
dimType *element_map = NULL;
dimType n_dim;
t_quadtree *tree = NULL;
t_mempool mempool = EMPTY_MEMPOOL_STRUCT;
t_mesh mesh = EMPTY_MESH_STRUCT;
Ulong n_markers;
Double *markers;
dimType *elids = NULL;
mxArray *outp = NULL;
t_opts opts;
if(!initialized){
initialized = 1;
mexAtExit(quadtree_mex_cleanup);
#ifdef ROBUST_PREDICATES
exactinit();
#endif
}
if(nargin<4){
USERERROR("Usage: [MAP, stats] = "QUADTREE_STR"('locate', "QUADTREE_STR", MESH, MARKERS, [MAP], [opts])",
MUTILS_INVALID_PARAMETER);
}
/* qtree structure */
tree = mex2qtree(pargin[arg++]);
mempool.head_ptr = (char*)tree;
mempool.base_ptr = mempool.head_ptr + header_size;
mempool.quadrant_size = tree->quadrant_size;
mempool.ptr = tree->n_quadrants;
/* triangular mesh structure */
mesh = mex2mesh(pargin[arg++], NDIM);
if(!mesh.neighbors){
USERERROR("MESH must contain NEIGHBORS information", MUTILS_INVALID_MESH);
return NULL;
}
/* MARKERS */
{
char _buff[10];
sprintf(_buff, "%d", NDIM);
m = NDIM;
n = 0;
markers = mex_get_matrix(Double, pargin[arg++], &m, &n, "MARKERS", _buff, "number of markers", 0);
}
SNPRINTF(buff, 255, "No dimensions of 'MARKERS' can be larger than %"PRI_ULONG, MaxUlong);
managed_type_cast(dimType, n_dim, m, buff);
managed_type_cast(Ulong, n_markers, n, buff);
/* optional - previous marker-to-element map to use */
if(nargin>=5){
m = 1;
n = n_markers;
element_map = mex_get_matrix(dimType, pargin[arg++], &m, &n, "MAP", "1", "number of markers", 1);
}
/* options */
if(nargin>=6){
opts = mex2opts(pargin[5]);
} else {
opts = mex2opts(NULL);
}
/* optional - inplace map. Existing MAP input will be overwritten and returned as output. */
/* Not allowed in MATLAB, so be careful and make sure MAP is not used elsewhere/linked to. */
if(opts.inplace && element_map){
opts.inplace = opts.inplace!=0;
}
if(opts.inplace){
outp = (mxArray *)pargin[4];
elids = element_map;
}
/* MEX output, needs to be global and persistent */
if(!outp){
mcalloc_global(elids, sizeof(dimType)*n_markers);
}
n_elems_searched = 0;
n_max_elems_searched = 0;
/* use default/environment defined number of threads */
parallel_set_num_threads(opts.nthreads);
#ifdef USE_OPENMP
#pragma omp parallel
#endif
{
Ulong i;
Uint thrid, nthr;
Ulong marker_start, marker_end;
dimType elid = EMPTY_ELID;
Ulong nel_searched;
t_quadrant *quadrant = NULL;
Ulong thr_n_elems_searched = 0;
Ulong thr_n_max_elems_searched = 0;
t_quadrant *root = NULL;
Ulong blk_size;
Ulong *map;
mcalloc(map, sizeof(Ulong)*mesh.n_elems);
/* locate the markers in the elements using the quadtree */
root = (t_quadrant*)mempool.base_ptr;
parallel_get_info(&thrid, &nthr);
if(opts.cpu_affinity) affinity_bind(thrid, opts.cpu_start + thrid);
blk_size = n_markers/nthr+1;
marker_start = blk_size*thrid;
marker_end = blk_size*(thrid+1);
marker_end = MIN(n_markers, marker_end);
/* global list initialization */
nlists = MAX(nlists, nthr);
if(slist[thrid]==NULL){
/* allocate a lot to avoid page sharing between threads */
mmalloc(slist[thrid], sizeof(dimType)*4096);
mmalloc(slist_size[thrid], sizeof(size_t)*4096);
slist_size[thrid][0] = 4096;
}
for(i=marker_start; i<marker_end; i++){
elid = EMPTY_ELID;
/* prefetch markers - non-temporal to make space */
/* for the qtree structure in the CPU caches */
/* if(i+16<marker_end) _mm_prefetch(((char*)markers)+(i+16), _MM_HINT_NTA); */
if(element_map){
elid = element_map[i];
if(elid<ONE_BASED_INDEX || elid-ONE_BASED_INDEX>=mesh.n_elems)
elid = EMPTY_ELID;
else
elid -= ONE_BASED_INDEX;
}
if(elid==EMPTY_ELID){
/* Locate the quadrant. */
/* quadrant is needed only to get some 'nearby' element id. */
/* The correct element containing the marker is located */
/* by searching the element neighbors. */
/* uptree traversal does not speed up things at all */
/* even if the input points are reasonably sorted */
/* quadrant = quadtree_locate_sorted(quadrant, markers[i*2+0], markers[i*2+1], &mempool); */
t_node_coords coords;
/* normalize coordinates */
coords.x = (markers[(size_t)i*NDIM+0] - tree->xmin)*tree->iextentx; //(tree->xmax - tree->xmin);
coords.y = (markers[(size_t)i*NDIM+1] - tree->ymin)*tree->iextenty; //(tree->ymax - tree->ymin);
#if NDIM==3
coords.z = (markers[(size_t)i*NDIM+2] - tree->zmin)*tree->iextentz; //(tree->zmax - tree->zmin);
#endif
quadrant = quadtree_locate_codes(root, coords, &mempool);
if(quadrant){
/* Find a nearby node in the quadtree. */
/* If the given quadrant is empty, */
/* return a node stored in first non-empty parent */
elid = quadrant->point_id[0];
while(elid==EMPTY_ELID){
if(quadrant->parent == EMPTY_QUADRANT) break;
quadrant = (t_quadrant*)(quadrant->parent + mempool.base_ptr);
elid = quadrant->point_id[0];
}
}
}
/* find containing element */
/* NOTE: coordinate normalization not needed here since we do a mesh search, */
/* not a quadtree search. */
#if NDIM==3
elid = quadtree_locate_tet(elid, i+1, markers+(size_t)i*NDIM, mesh, map, &nel_searched, thrid);
#else
elid = quadtree_locate_tri(elid, i+1, markers+(size_t)i*NDIM, mesh, map, &nel_searched, thrid);
#endif
elids[i] = ONE_BASED_INDEX + elid;
thr_n_elems_searched += nel_searched;
thr_n_max_elems_searched = MAX(thr_n_max_elems_searched, nel_searched);
}
#ifdef USE_OPENMP
#pragma omp atomic
#endif
n_elems_searched += thr_n_elems_searched;
#ifdef USE_OPENMP
#pragma omp critical
#endif
n_max_elems_searched = MAX(n_max_elems_searched, thr_n_max_elems_searched);
mfree(map, sizeof(Ulong)*mesh.n_elems);
}
avg_elems_searched = (Double)n_elems_searched/n_markers;
if(!outp) outp = mex_set_matrix(dimType, elids, 1, n_markers);
return outp;
}
mxArray *mex_quadtree_reorder(int nargin, const mxArray *pargin[])
{
Uint arg = 1;
t_quadtree *tree = NULL;
t_mempool mempool = EMPTY_MEMPOOL_STRUCT;
dimType *I;
dimType points_ptr = 0;
mxArray *outp;
if(!initialized){
initialized = 1;
mexAtExit(quadtree_mex_cleanup);
}
if(nargin<2){
USERERROR("Usage: I = "QUADTREE_STR"('reorder', "QUADTREE_STR")", MUTILS_INVALID_PARAMETER);
}
tree = mex2qtree(pargin[arg++]);
mempool.head_ptr = (char*)tree;
mempool.base_ptr = mempool.head_ptr + header_size;
mempool.quadrant_size = tree->quadrant_size;
mempool.ptr = tree->n_quadrants;
/* MEX output, needs to be global and persistent */
mmalloc_global(I, sizeof(dimType)*tree->n_points);
/* extract nodes in the Z-ordering */
quadtree_extract_points((t_quadrant*)mempool.base_ptr, I, &points_ptr, &mempool);
outp = mex_set_matrix(dimType, I, 1, points_ptr);
return outp;
}
void mex_vtkwrite(int nargin, const mxArray *pargin[])
{
dimType i;
/* prepare vtk data */
t_quadrant **tree_leaves;
dimType itree_leaves = 0;
Uint arg = 1;
t_quadtree *tree;
t_mempool mempool = EMPTY_MEMPOOL_STRUCT;
t_quadrant *root;
Double *vtk_nodes;
dimType *vtk_elems;
dimType *vtk_celld;
dimType n_cells = 0;
char fname[512];
if(!initialized){
initialized = 1;
mexAtExit(quadtree_mex_cleanup);
}
if(nargin<2){
USERERROR("Usage: "QUADTREE_STR"('vtkwrite', "QUADTREE_STR", [file_name])", MUTILS_INVALID_PARAMETER);
}
/* quadtree */
tree = mex2qtree(pargin[arg++]);
mempool.head_ptr = (char*)tree;
mempool.base_ptr = mempool.head_ptr + header_size;
mempool.quadrant_size = tree->quadrant_size;
mempool.ptr = tree->n_quadrants;
/* file name */
if(nargin>2){
if(!mxIsChar(pargin[arg])) USERERROR("'file_name' must be a string", MUTILS_INVALID_PARAMETER);
if(0!=mxGetString(pargin[arg], fname, 511))
USERERROR("file_name too long, can be maximum 511 characters.", MUTILS_INVALID_PARAMETER);
} else {
sprintf(fname, "%s", QUADTREE_STR);
}
root = (t_quadrant*)mempool.base_ptr;
mcalloc(tree_leaves, sizeof(t_quadrant*)*n_leaves);
quadtree_extract_leaves(root, tree_leaves, &itree_leaves, &mempool);
mcalloc(vtk_nodes, sizeof(Double)*NCHILDREN*n_leaves*NDIM);
mcalloc(vtk_elems, sizeof(dimType)*NCHILDREN*n_leaves);
mcalloc(vtk_celld, sizeof(dimType)*n_leaves);
for(i=0; i<n_leaves; i++){
Double mix, miy, max, may;
Double dx, dy;
#if NDIM==3
Double miz, maz, dz;
#endif
dx = (1L<<(tree_leaves[i]->level))/MAX_VAL;
dy = (1L<<(tree_leaves[i]->level))/MAX_VAL;
mix = tree_leaves[i]->x_code/MAX_VAL;
miy = tree_leaves[i]->y_code/MAX_VAL;
max = dx + mix;
may = dy + miy;
dx = dx*0.03;
dy = dy*0.03;
#if NDIM==3
dz = (1L<<(tree_leaves[i]->level))/MAX_VAL;
miz = tree_leaves[i]->z_code/MAX_VAL;
maz = dz + miz;
dz = dz*0.03;
#endif
vtk_nodes[i*NCHILDREN*NDIM + 0*NDIM + 0] = mix+dx;
vtk_nodes[i*NCHILDREN*NDIM + 0*NDIM + 1] = miy+dy;
vtk_nodes[i*NCHILDREN*NDIM + 1*NDIM + 0] = max-dx;
vtk_nodes[i*NCHILDREN*NDIM + 1*NDIM + 1] = miy+dy;
vtk_nodes[i*NCHILDREN*NDIM + 2*NDIM + 0] = max-dx;
vtk_nodes[i*NCHILDREN*NDIM + 2*NDIM + 1] = may-dy;
vtk_nodes[i*NCHILDREN*NDIM + 3*NDIM + 0] = mix+dx;
vtk_nodes[i*NCHILDREN*NDIM + 3*NDIM + 1] = may-dy;
vtk_elems[i*NCHILDREN + 0] = i*NCHILDREN + 0;
vtk_elems[i*NCHILDREN + 1] = i*NCHILDREN + 1;
vtk_elems[i*NCHILDREN + 2] = i*NCHILDREN + 2;
vtk_elems[i*NCHILDREN + 3] = i*NCHILDREN + 3;
#if NDIM==3
/* add Z-coordinate to first 4 nodes */
vtk_nodes[i*NCHILDREN*NDIM + 0*NDIM + 2] = miz+dz;
vtk_nodes[i*NCHILDREN*NDIM + 1*NDIM + 2] = miz+dz;
vtk_nodes[i*NCHILDREN*NDIM + 2*NDIM + 2] = miz+dz;
vtk_nodes[i*NCHILDREN*NDIM + 3*NDIM + 2] = miz+dz;
/* add 4 more nodes */
vtk_nodes[i*NCHILDREN*NDIM + 4*NDIM + 0] = mix+dx;
vtk_nodes[i*NCHILDREN*NDIM + 4*NDIM + 1] = miy+dy;
vtk_nodes[i*NCHILDREN*NDIM + 4*NDIM + 2] = maz-dz;
vtk_nodes[i*NCHILDREN*NDIM + 5*NDIM + 0] = max-dx;
vtk_nodes[i*NCHILDREN*NDIM + 5*NDIM + 1] = miy+dy;
vtk_nodes[i*NCHILDREN*NDIM + 5*NDIM + 2] = maz-dz;
vtk_nodes[i*NCHILDREN*NDIM + 6*NDIM + 0] = max-dx;
vtk_nodes[i*NCHILDREN*NDIM + 6*NDIM + 1] = may-dy;
vtk_nodes[i*NCHILDREN*NDIM + 6*NDIM + 2] = maz-dz;
vtk_nodes[i*NCHILDREN*NDIM + 7*NDIM + 0] = mix+dx;
vtk_nodes[i*NCHILDREN*NDIM + 7*NDIM + 1] = may-dy;
vtk_nodes[i*NCHILDREN*NDIM + 7*NDIM + 2] = maz-dz;
vtk_elems[i*NCHILDREN + 4] = i*NCHILDREN + 4;
vtk_elems[i*NCHILDREN + 5] = i*NCHILDREN + 5;
vtk_elems[i*NCHILDREN + 6] = i*NCHILDREN + 6;
vtk_elems[i*NCHILDREN + 7] = i*NCHILDREN + 7;
#endif
vtk_celld[i] = tree_leaves[i]->n_points;
n_cells += vtk_celld[i];
}
#if NDIM==3
vtk_write3d(fname, vtk_elems, vtk_nodes, vtk_celld, n_leaves*NCHILDREN, n_leaves, NCHILDREN);
#else
vtk_write2d(fname, vtk_elems, vtk_nodes, vtk_celld, n_leaves*NCHILDREN, n_leaves, NCHILDREN);
#endif
mfree(tree_leaves, sizeof(t_quadrant*)*n_leaves);
mfree(vtk_nodes, sizeof(Double)*NCHILDREN*n_leaves*NDIM);
mfree(vtk_elems, sizeof(dimType)*NCHILDREN*n_leaves);
mfree(vtk_celld, sizeof(dimType)*n_leaves);
}
Int vtk_write2d(char *model_name, dimType *elems, Double *nodes, dimType *celldata,
dimType nnod, dimType nel, dimType nnodel)
{
FILE *out_vtk;
Ulong i;
char file_name[512+4];
sprintf(file_name, "%s.vtk", model_name);
out_vtk=fopen(file_name, "w");
fprintf(out_vtk,"# vtk DataFile Version 3.0\n");
fprintf(out_vtk,"my cool data\n");
fprintf(out_vtk,"ASCII\n");
fprintf(out_vtk,"DATASET UNSTRUCTURED_GRID\n");
fprintf(out_vtk,"POINTS %"PRI_DIMTYPE" double\n", nnod);
for (i=0;i<nnod;i++){
fprintf(out_vtk,"%lf %lf 0.0\n", nodes[2*i+0], nodes[2*i+1]);
}
fprintf(out_vtk,"CELLS %"PRI_DIMTYPE" %"PRI_DIMTYPE"\n", nel, (1+nnodel)*nel);
for (i=0;i<nel;i++){
fprintf(out_vtk,"4 %"PRI_DIMTYPE" %"PRI_DIMTYPE" %"PRI_DIMTYPE" %"PRI_DIMTYPE"\n",
elems[nnodel*i+0], elems[nnodel*i+1], elems[nnodel*i+2], elems[nnodel*i+3]);
}
fprintf(out_vtk,"CELL_TYPES %"PRI_DIMTYPE"\n", nel);
for (i=0;i<nel;i++){
fprintf(out_vtk,"9\n");
}
fprintf(out_vtk,"CELL_DATA %"PRI_DIMTYPE"\n", nel);
fprintf(out_vtk,"SCALARS n_nodes_in_quadrant long 1\n");
fprintf(out_vtk,"LOOKUP_TABLE default\n");
for (i=0;i<nel;i++){
fprintf(out_vtk,"%"PRI_DIMTYPE"\n", celldata[i]);
}
fclose(out_vtk);
return 0;
}
Int vtk_write3d(char *model_name, dimType *elems, Double *nodes, dimType *celldata,
dimType nnod, dimType nel, dimType nnodel)
{
FILE *out_vtk;
Ulong i;
char file_name[512+4];
sprintf(file_name, "%s.vtk", model_name);
out_vtk=fopen(file_name, "w");
fprintf(out_vtk,"# vtk DataFile Version 3.0\n");
fprintf(out_vtk,"my cool data\n");
fprintf(out_vtk,"ASCII\n");
fprintf(out_vtk,"DATASET UNSTRUCTURED_GRID\n");
fprintf(out_vtk,"POINTS %"PRI_DIMTYPE" double\n", nnod);
for (i=0;i<nnod;i++){
fprintf(out_vtk,"%lf %lf %lf\n", nodes[3*i+0], nodes[3*i+1], nodes[3*i+2]);
}
fprintf(out_vtk,"CELLS %"PRI_DIMTYPE" %"PRI_DIMTYPE"\n", nel, (1+nnodel)*nel);
for (i=0;i<nel;i++){
fprintf(out_vtk,"%"PRI_DIMTYPE" %"PRI_DIMTYPE" %"PRI_DIMTYPE" %"PRI_DIMTYPE" %"PRI_DIMTYPE
" %"PRI_DIMTYPE" %"PRI_DIMTYPE" %"PRI_DIMTYPE" %"PRI_DIMTYPE"\n",
nnodel,
elems[nnodel*i+0], elems[nnodel*i+1], elems[nnodel*i+2], elems[nnodel*i+3],
elems[nnodel*i+4], elems[nnodel*i+5], elems[nnodel*i+6], elems[nnodel*i+7]);
}
fprintf(out_vtk,"CELL_TYPES %"PRI_DIMTYPE"\n", nel);
for (i=0;i<nel;i++){
fprintf(out_vtk,"12\n");
}
fprintf(out_vtk,"CELL_DATA %"PRI_DIMTYPE"\n", nel);
fprintf(out_vtk,"SCALARS n_nodes_in_quadrant long 1\n");
fprintf(out_vtk,"LOOKUP_TABLE default\n");
for (i=0;i<nel;i++){
fprintf(out_vtk,"%"PRI_DIMTYPE"\n", celldata[i]);
}
fclose(out_vtk);
return 0;
}
mxArray *mex_quadtree_stats(void)
{
#undef n_fieldnames
#define n_fieldnames 4
const char *fieldnames[n_fieldnames] = {"n_elems_searched", "avg_elems_searched", "n_max_elems_searched", "list_size"};
mxArray *outp = mxCreateStructMatrix(1, 1, n_fieldnames, fieldnames);
mxArray *field;
Uint n = 0;
field = mxCreateNumericMatrix(1,1,mxUINT64_CLASS,mxREAL);
((Ulong*)mxGetData(field))[0] = n_elems_searched;
mxSetField(outp, 0, fieldnames[n++], field);
field = mxCreateNumericMatrix(1,1,mxDOUBLE_CLASS,mxREAL);
((double*)mxGetData(field))[0] = avg_elems_searched;
mxSetField(outp, 0, fieldnames[n++], field);
field = mxCreateNumericMatrix(1,1,mxUINT64_CLASS,mxREAL);
((Ulong*)mxGetData(field))[0] = n_max_elems_searched;
mxSetField(outp, 0, fieldnames[n++], field);
field = mxCreateNumericMatrix(1,1,mxUINT64_CLASS,mxREAL);
if(nlists){
((Ulong*)mxGetData(field))[0] = slist_size[0][0];
} else {
((Ulong*)mxGetData(field))[0] = -1;
}
mxSetField(outp, 0, fieldnames[n++], field);
return outp;
}
void mexFunction(int nargout, mxArray *pargout [ ], int nargin, const mxArray *pargin[])
{
int arg = 0;
char cmd[256];
/* get machine epsilon */
MACHEPS = macheps();
if (nargin < 1) MEXHELP;
/* command */
{
if(!mxIsChar(pargin[arg])){
USERERROR("command parameter must be a string", MUTILS_INVALID_PARAMETER);
}
mxGetString(pargin[arg], cmd, 255);
arg++;
}
if(!strcmp(cmd, "create")){
if(nargout>0){
pargout[0] = mex_quadtree_create(nargin, pargin);
}
DEBUG_STATISTICS;
return;
}
if(!strcmp(cmd, "vtkwrite")){
mex_vtkwrite(nargin, pargin);
DEBUG_STATISTICS;
return;
}
if(!strcmp(cmd, "locate")){
if(nargout>0){
pargout[0] = mex_quadtree_locate(nargin, pargin);
}
if(nargout>1){
pargout[1] = mex_quadtree_stats();
}
DEBUG_STATISTICS;
return;
}
if(!strcmp(cmd, "reorder")){
if(nargout>0){
pargout[0] = mex_quadtree_reorder(nargin, pargin);
}
DEBUG_STATISTICS;
return;
}
USERERROR("unknown command", MUTILS_INVALID_PARAMETER);
}
|
GB_binop__isne_uint32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__isne_uint32
// A.*B function (eWiseMult): GB_AemultB__isne_uint32
// A*D function (colscale): GB_AxD__isne_uint32
// D*A function (rowscale): GB_DxB__isne_uint32
// C+=B function (dense accum): GB_Cdense_accumB__isne_uint32
// C+=b function (dense accum): GB_Cdense_accumb__isne_uint32
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__isne_uint32
// C=scalar+B GB_bind1st__isne_uint32
// C=scalar+B' GB_bind1st_tran__isne_uint32
// C=A+scalar GB_bind2nd__isne_uint32
// C=A'+scalar GB_bind2nd_tran__isne_uint32
// C type: uint32_t
// A type: uint32_t
// B,b type: uint32_t
// BinaryOp: cij = (aij != bij)
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = (x != y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISNE || GxB_NO_UINT32 || GxB_NO_ISNE_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__isne_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__isne_uint32
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__isne_uint32
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__isne_uint32
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__isne_uint32
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__isne_uint32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__isne_uint32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__isne_uint32
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint32_t bij = Bx [p] ;
Cx [p] = (x != bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__isne_uint32
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint32_t aij = Ax [p] ;
Cx [p] = (aij != y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = Ax [pA] ; \
Cx [pC] = (x != aij) ; \
}
GrB_Info GB_bind1st_tran__isne_uint32
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = Ax [pA] ; \
Cx [pC] = (aij != y) ; \
}
GrB_Info GB_bind2nd_tran__isne_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
pr45784.c | /* PR c/45784 */
/* { dg-do run } */
void
foo (int n)
{
char *p, vla[2 * n];
int i;
#pragma omp parallel for
for (p = vla; p < vla + (sizeof (vla) / sizeof (vla[0])); p++)
*p = ' ';
#pragma omp parallel for
for (i = 0; i < 2 * n; i++)
if (vla[i] != ' ')
__builtin_abort ();
}
void
bar (int n)
{
char *p, vla1[n], vla2[n * 2], vla3[n * 3], vla4[n * 4];
int i;
__builtin_memset (vla4, ' ', n * 4);
#pragma omp parallel for
for (p = vla4 + sizeof (vla1); p < vla4 + sizeof (vla3) - sizeof (vla2) + sizeof (vla1); p += sizeof (vla4) / sizeof (vla4))
p[0] = '!';
#pragma omp parallel for
for (i = 0; i < n * 4; i++)
if (vla4[i] != ((i >= n && i < 2 * n) ? '!' : ' '))
__builtin_abort ();
}
int
main ()
{
volatile int n;
n = 128;
foo (n);
bar (n);
return 0;
}
|
preprocess-1.c | /* { dg-do preprocess } */
void foo (void)
{
int i1, j1, k1;
#define p parallel
#define P(x) private (x##1)
#define S(x) shared (x##1)
#define F(x) firstprivate (x##1)
#pragma omp p P(i) \
S(j) \
F(k)
;
}
/* { dg-final { scan-file preprocess-1.i "(^|\n)#pragma omp parallel private \\(i1\\) shared \\(j1\\) firstprivate \\(k1\\)($|\n)" } } */
|
compare.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC OOO M M PPPP AAA RRRR EEEEE %
% C O O MM MM P P A A R R E %
% C O O M M M PPPP AAAAA RRRR EEE %
% C O O M M P A A R R E %
% CCCC OOO M M P A A R R EEEEE %
% %
% %
% MagickCore Image Comparison Methods %
% %
% Software Design %
% Cristy %
% December 2003 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/client.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/compare.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/fourier.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/property.h"
#include "MagickCore/registry.h"
#include "MagickCore/resource_.h"
#include "MagickCore/string_.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/transform.h"
#include "MagickCore/utility.h"
#include "MagickCore/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m p a r e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CompareImages() compares one or more pixel channels of an image to a
% reconstructed image and returns the difference image.
%
% The format of the CompareImages method is:
%
% Image *CompareImages(const Image *image,const Image *reconstruct_image,
% const MetricType metric,double *distortion,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o metric: the metric.
%
% o distortion: the computed distortion between the images.
%
% o exception: return any errors or warnings in this structure.
%
*/
static size_t GetImageChannels(const Image *image)
{
ssize_t
i;
size_t
channels;
channels=0;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) != 0)
channels++;
}
return(channels == 0 ? (size_t) 1 : channels);
}
MagickExport Image *CompareImages(Image *image,const Image *reconstruct_image,
const MetricType metric,double *distortion,ExceptionInfo *exception)
{
CacheView
*highlight_view,
*image_view,
*reconstruct_view;
const char
*artifact;
double
fuzz;
Image
*clone_image,
*difference_image,
*highlight_image;
MagickBooleanType
status;
PixelInfo
highlight,
lowlight,
masklight;
RectangleInfo
geometry;
size_t
columns,
rows;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
assert(distortion != (double *) NULL);
*distortion=0.0;
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=GetImageDistortion(image,reconstruct_image,metric,distortion,
exception);
if (status == MagickFalse)
return((Image *) NULL);
columns=MagickMax(image->columns,reconstruct_image->columns);
rows=MagickMax(image->rows,reconstruct_image->rows);
SetGeometry(image,&geometry);
geometry.width=columns;
geometry.height=rows;
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageMask(clone_image,ReadPixelMask,(Image *) NULL,exception);
difference_image=ExtentImage(clone_image,&geometry,exception);
clone_image=DestroyImage(clone_image);
if (difference_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageAlphaChannel(difference_image,OpaqueAlphaChannel,exception);
highlight_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (highlight_image == (Image *) NULL)
{
difference_image=DestroyImage(difference_image);
return((Image *) NULL);
}
status=SetImageStorageClass(highlight_image,DirectClass,exception);
if (status == MagickFalse)
{
difference_image=DestroyImage(difference_image);
highlight_image=DestroyImage(highlight_image);
return((Image *) NULL);
}
(void) SetImageMask(highlight_image,ReadPixelMask,(Image *) NULL,exception);
(void) SetImageAlphaChannel(highlight_image,OpaqueAlphaChannel,exception);
(void) QueryColorCompliance("#f1001ecc",AllCompliance,&highlight,exception);
artifact=GetImageArtifact(image,"compare:highlight-color");
if (artifact != (const char *) NULL)
(void) QueryColorCompliance(artifact,AllCompliance,&highlight,exception);
(void) QueryColorCompliance("#ffffffcc",AllCompliance,&lowlight,exception);
artifact=GetImageArtifact(image,"compare:lowlight-color");
if (artifact != (const char *) NULL)
(void) QueryColorCompliance(artifact,AllCompliance,&lowlight,exception);
(void) QueryColorCompliance("#888888cc",AllCompliance,&masklight,exception);
artifact=GetImageArtifact(image,"compare:masklight-color");
if (artifact != (const char *) NULL)
(void) QueryColorCompliance(artifact,AllCompliance,&masklight,exception);
/*
Generate difference image.
*/
status=MagickTrue;
fuzz=GetFuzzyColorDistance(image,reconstruct_image);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
highlight_view=AcquireAuthenticCacheView(highlight_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,highlight_image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
MagickBooleanType
sync;
const Quantum
*magick_restrict p,
*magick_restrict q;
Quantum
*magick_restrict r;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
r=QueueCacheViewAuthenticPixels(highlight_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL) ||
(r == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
MagickStatusType
difference;
ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
SetPixelViaPixelInfo(highlight_image,&masklight,r);
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
r+=GetPixelChannels(highlight_image);
continue;
}
difference=MagickFalse;
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance,
pixel;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
pixel=(double) p[i]-GetPixelChannel(reconstruct_image,channel,q);
else
pixel=Sa*p[i]-Da*GetPixelChannel(reconstruct_image,channel,q);
distance=pixel*pixel;
if (distance >= fuzz)
{
difference=MagickTrue;
break;
}
}
if (difference == MagickFalse)
SetPixelViaPixelInfo(highlight_image,&lowlight,r);
else
SetPixelViaPixelInfo(highlight_image,&highlight,r);
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
r+=GetPixelChannels(highlight_image);
}
sync=SyncCacheViewAuthenticPixels(highlight_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
highlight_view=DestroyCacheView(highlight_view);
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
(void) CompositeImage(difference_image,highlight_image,image->compose,
MagickTrue,0,0,exception);
highlight_image=DestroyImage(highlight_image);
if (status == MagickFalse)
difference_image=DestroyImage(difference_image);
return(difference_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e D i s t o r t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageDistortion() compares one or more pixel channels of an image to a
% reconstructed image and returns the specified distortion metric.
%
% The format of the GetImageDistortion method is:
%
% MagickBooleanType GetImageDistortion(const Image *image,
% const Image *reconstruct_image,const MetricType metric,
% double *distortion,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o metric: the metric.
%
% o distortion: the computed distortion between the images.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType GetAbsoluteDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
fuzz;
MagickBooleanType
status;
size_t
columns,
rows;
ssize_t
y;
/*
Compute the absolute difference in pixels between two images.
*/
status=MagickTrue;
fuzz=GetFuzzyColorDistance(image,reconstruct_image);
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
j,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
MagickBooleanType
difference;
ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
difference=MagickFalse;
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance,
pixel;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
pixel=(double) p[i]-GetPixelChannel(reconstruct_image,channel,q);
else
pixel=Sa*p[i]-Da*GetPixelChannel(reconstruct_image,channel,q);
distance=pixel*pixel;
if (distance >= fuzz)
{
channel_distortion[i]++;
difference=MagickTrue;
}
}
if (difference != MagickFalse)
channel_distortion[CompositePixelChannel]++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetAbsoluteDistortion)
#endif
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]+=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
return(status);
}
static MagickBooleanType GetFuzzDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
area;
MagickBooleanType
status;
ssize_t
j;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
area=0.0;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,rows,1) reduction(+:area)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distance=QuantumScale*(p[i]-GetPixelChannel(reconstruct_image,
channel,q));
else
distance=QuantumScale*(Sa*p[i]-Da*GetPixelChannel(reconstruct_image,
channel,q));
channel_distortion[i]+=distance*distance;
channel_distortion[CompositePixelChannel]+=distance*distance;
}
area++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetFuzzDistortion)
#endif
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]+=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
area=PerceptibleReciprocal(area);
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]*=area;
distortion[CompositePixelChannel]/=(double) GetImageChannels(image);
distortion[CompositePixelChannel]=sqrt(distortion[CompositePixelChannel]);
return(status);
}
static MagickBooleanType GetMeanAbsoluteDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
area;
MagickBooleanType
status;
ssize_t
j;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
area=0.0;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,rows,1) reduction(+:area)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distance=QuantumScale*fabs((double) (p[i]-(double)
GetPixelChannel(reconstruct_image,channel,q)));
else
distance=QuantumScale*fabs((double) (Sa*p[i]-Da*
GetPixelChannel(reconstruct_image,channel,q)));
channel_distortion[i]+=distance;
channel_distortion[CompositePixelChannel]+=distance;
}
area++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetMeanAbsoluteError)
#endif
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]+=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
area=PerceptibleReciprocal(area);
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]*=area;
distortion[CompositePixelChannel]/=(double) GetImageChannels(image);
return(status);
}
static MagickBooleanType GetMeanErrorPerPixel(Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
MagickBooleanType
status;
double
area,
maximum_error,
mean_error;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
area=0.0;
maximum_error=0.0;
mean_error=0.0;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distance=fabs((double) (p[i]-(double)
GetPixelChannel(reconstruct_image,channel,q)));
else
distance=fabs((double) (Sa*p[i]-Da*
GetPixelChannel(reconstruct_image,channel,q)));
distortion[i]+=distance;
distortion[CompositePixelChannel]+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
area=PerceptibleReciprocal(area);
image->error.mean_error_per_pixel=area*distortion[CompositePixelChannel];
image->error.normalized_mean_error=area*QuantumScale*QuantumScale*mean_error;
image->error.normalized_maximum_error=QuantumScale*maximum_error;
return(status);
}
static MagickBooleanType GetMeanSquaredDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
area;
MagickBooleanType
status;
ssize_t
j;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
area=0.0;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,rows,1) reduction(+:area)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distance=QuantumScale*(p[i]-GetPixelChannel(reconstruct_image,
channel,q));
else
distance=QuantumScale*(Sa*p[i]-Da*GetPixelChannel(reconstruct_image,
channel,q));
channel_distortion[i]+=distance*distance;
channel_distortion[CompositePixelChannel]+=distance*distance;
}
area++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetMeanSquaredError)
#endif
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]+=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
area=PerceptibleReciprocal(area);
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]*=area;
distortion[CompositePixelChannel]/=GetImageChannels(image);
return(status);
}
static MagickBooleanType GetNormalizedCrossCorrelationDistortion(
const Image *image,const Image *reconstruct_image,double *distortion,
ExceptionInfo *exception)
{
#define SimilarityImageTag "Similarity/Image"
CacheView
*image_view,
*reconstruct_view;
ChannelStatistics
*image_statistics,
*reconstruct_statistics;
double
area;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
channels,
i;
size_t
columns,
rows;
ssize_t
y;
/*
Normalize to account for variation due to lighting and exposure condition.
*/
image_statistics=GetImageStatistics(image,exception);
reconstruct_statistics=GetImageStatistics(reconstruct_image,exception);
if ((image_statistics == (ChannelStatistics *) NULL) ||
(reconstruct_statistics == (ChannelStatistics *) NULL))
{
if (image_statistics != (ChannelStatistics *) NULL)
image_statistics=(ChannelStatistics *) RelinquishMagickMemory(
image_statistics);
if (reconstruct_statistics != (ChannelStatistics *) NULL)
reconstruct_statistics=(ChannelStatistics *) RelinquishMagickMemory(
reconstruct_statistics);
return(MagickFalse);
}
status=MagickTrue;
progress=0;
for (i=0; i <= MaxPixelChannels; i++)
distortion[i]=0.0;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
area=0.0;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) columns; x++)
{
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
area++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
}
area=PerceptibleReciprocal(area);
for (y=0; y < (ssize_t) rows; y++)
{
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distortion[i]+=area*QuantumScale*((double) p[i]-
image_statistics[channel].mean)*(GetPixelChannel(reconstruct_image,
channel,q)-reconstruct_statistics[channel].mean);
else
distortion[i]+=area*QuantumScale*(Sa*p[i]-
image_statistics[channel].mean)*(Da*GetPixelChannel(
reconstruct_image,channel,q)-reconstruct_statistics[channel].mean);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SimilarityImageTag,progress,rows);
if (proceed == MagickFalse)
{
status=MagickFalse;
break;
}
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
/*
Divide by the standard deviation.
*/
channels=0;
distortion[CompositePixelChannel]=0.0;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
gamma;
PixelChannel channel = GetPixelChannelChannel(image,i);
gamma=image_statistics[channel].standard_deviation*
reconstruct_statistics[channel].standard_deviation;
if (fabs(gamma) >= MagickEpsilon)
{
gamma=PerceptibleReciprocal(gamma);
distortion[i]=QuantumRange*gamma*distortion[i];
distortion[CompositePixelChannel]+=distortion[i]*distortion[i];
channels++;
}
}
distortion[CompositePixelChannel]=sqrt(distortion[CompositePixelChannel]/
channels);
/*
Free resources.
*/
reconstruct_statistics=(ChannelStatistics *) RelinquishMagickMemory(
reconstruct_statistics);
image_statistics=(ChannelStatistics *) RelinquishMagickMemory(
image_statistics);
return(status);
}
static MagickBooleanType GetPeakAbsoluteDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
MagickBooleanType
status;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
j,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distance=QuantumScale*fabs((double) (p[i]-(double)
GetPixelChannel(reconstruct_image,channel,q)));
else
distance=QuantumScale*fabs((double) (Sa*p[i]-Da*
GetPixelChannel(reconstruct_image,channel,q)));
if (distance > channel_distortion[i])
channel_distortion[i]=distance;
if (distance > channel_distortion[CompositePixelChannel])
channel_distortion[CompositePixelChannel]=distance;
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetPeakAbsoluteError)
#endif
for (j=0; j <= MaxPixelChannels; j++)
if (channel_distortion[j] > distortion[j])
distortion[j]=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
return(status);
}
static inline double MagickLog10(const double x)
{
#define Log10Epsilon (1.0e-11)
if (fabs(x) < Log10Epsilon)
return(log10(Log10Epsilon));
return(log10(fabs(x)));
}
static MagickBooleanType GetPeakSignalToNoiseRatio(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
MagickBooleanType
status;
ssize_t
i;
status=GetMeanSquaredDistortion(image,reconstruct_image,distortion,exception);
for (i=0; i <= MaxPixelChannels; i++)
if (fabs(distortion[i]) < MagickEpsilon)
distortion[i]=INFINITY;
else
distortion[i]=10.0*MagickLog10(1.0)-10.0*MagickLog10(distortion[i]);
return(status);
}
static MagickBooleanType GetPerceptualHashDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
ChannelPerceptualHash
*channel_phash,
*reconstruct_phash;
const char
*artifact;
MagickBooleanType
normalize;
ssize_t
channel;
/*
Compute perceptual hash in the sRGB colorspace.
*/
channel_phash=GetImagePerceptualHash(image,exception);
if (channel_phash == (ChannelPerceptualHash *) NULL)
return(MagickFalse);
reconstruct_phash=GetImagePerceptualHash(reconstruct_image,exception);
if (reconstruct_phash == (ChannelPerceptualHash *) NULL)
{
channel_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(
channel_phash);
return(MagickFalse);
}
artifact=GetImageArtifact(image,"phash:normalize");
normalize=(artifact == (const char *) NULL) ||
(IsStringTrue(artifact) == MagickFalse) ? MagickFalse : MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (channel=0; channel < MaxPixelChannels; channel++)
{
double
difference;
ssize_t
i;
difference=0.0;
for (i=0; i < MaximumNumberOfImageMoments; i++)
{
double
alpha,
beta;
ssize_t
j;
for (j=0; j < (ssize_t) channel_phash[0].number_colorspaces; j++)
{
alpha=channel_phash[channel].phash[j][i];
beta=reconstruct_phash[channel].phash[j][i];
if (normalize == MagickFalse)
difference+=(beta-alpha)*(beta-alpha);
else
difference=sqrt((beta-alpha)*(beta-alpha)/
channel_phash[0].number_channels);
}
}
distortion[channel]+=difference;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetPerceptualHashDistortion)
#endif
distortion[CompositePixelChannel]+=difference;
}
/*
Free resources.
*/
reconstruct_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(
reconstruct_phash);
channel_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(channel_phash);
return(MagickTrue);
}
static MagickBooleanType GetRootMeanSquaredDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
MagickBooleanType
status;
ssize_t
i;
status=GetMeanSquaredDistortion(image,reconstruct_image,distortion,exception);
for (i=0; i <= MaxPixelChannels; i++)
distortion[i]=sqrt(distortion[i]);
return(status);
}
static MagickBooleanType GetStructuralSimilarityDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
#define SSIMRadius 5.0
#define SSIMSigma 1.5
#define SSIMBlocksize 8
#define SSIMK1 0.01
#define SSIMK2 0.03
#define SSIML 1.0
CacheView
*image_view,
*reconstruct_view;
char
geometry[MagickPathExtent];
const char
*artifact;
double
area,
c1,
c2,
radius,
sigma;
KernelInfo
*kernel_info;
MagickBooleanType
status;
ssize_t
i;
size_t
columns,
rows;
ssize_t
y;
/*
Compute structural similarity index @
https://en.wikipedia.org/wiki/Structural_similarity.
*/
radius=SSIMRadius;
artifact=GetImageArtifact(image,"compare:ssim-radius");
if (artifact != (const char *) NULL)
radius=StringToDouble(artifact,(char **) NULL);
sigma=SSIMSigma;
artifact=GetImageArtifact(image,"compare:ssim-sigma");
if (artifact != (const char *) NULL)
sigma=StringToDouble(artifact,(char **) NULL);
(void) FormatLocaleString(geometry,MagickPathExtent,"gaussian:%.20gx%.20g",
radius,sigma);
kernel_info=AcquireKernelInfo(geometry,exception);
if (kernel_info == (KernelInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
c1=pow(SSIMK1*SSIML,2.0);
artifact=GetImageArtifact(image,"compare:ssim-k1");
if (artifact != (const char *) NULL)
c1=pow(StringToDouble(artifact,(char **) NULL)*SSIML,2.0);
c2=pow(SSIMK2*SSIML,2.0);
artifact=GetImageArtifact(image,"compare:ssim-k2");
if (artifact != (const char *) NULL)
c2=pow(StringToDouble(artifact,(char **) NULL)*SSIML,2.0);
status=MagickTrue;
area=0.0;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,reconstruct_image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
i,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) kernel_info->width/2L),y-
((ssize_t) kernel_info->height/2L),columns+kernel_info->width,
kernel_info->height,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,-((ssize_t) kernel_info->width/
2L),y-((ssize_t) kernel_info->height/2L),columns+kernel_info->width,
kernel_info->height,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
x_pixel_mu[MaxPixelChannels+1],
x_pixel_sigma_squared[MaxPixelChannels+1],
xy_sigma[MaxPixelChannels+1],
y_pixel_mu[MaxPixelChannels+1],
y_pixel_sigma_squared[MaxPixelChannels+1];
const Quantum
*magick_restrict reference,
*magick_restrict target;
MagickRealType
*k;
ssize_t
v;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
(void) memset(x_pixel_mu,0,sizeof(x_pixel_mu));
(void) memset(x_pixel_sigma_squared,0,sizeof(x_pixel_sigma_squared));
(void) memset(xy_sigma,0,sizeof(xy_sigma));
(void) memset(x_pixel_sigma_squared,0,sizeof(y_pixel_sigma_squared));
(void) memset(y_pixel_mu,0,sizeof(y_pixel_mu));
(void) memset(y_pixel_sigma_squared,0,sizeof(y_pixel_sigma_squared));
k=kernel_info->values;
reference=p;
target=q;
for (v=0; v < (ssize_t) kernel_info->height; v++)
{
ssize_t
u;
for (u=0; u < (ssize_t) kernel_info->width; u++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
x_pixel,
y_pixel;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(
reconstruct_image,channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
x_pixel=QuantumScale*reference[i];
x_pixel_mu[i]+=(*k)*x_pixel;
x_pixel_sigma_squared[i]+=(*k)*x_pixel*x_pixel;
y_pixel=QuantumScale*
GetPixelChannel(reconstruct_image,channel,target);
y_pixel_mu[i]+=(*k)*y_pixel;
y_pixel_sigma_squared[i]+=(*k)*y_pixel*y_pixel;
xy_sigma[i]+=(*k)*x_pixel*y_pixel;
}
k++;
reference+=GetPixelChannels(image);
target+=GetPixelChannels(reconstruct_image);
}
reference+=GetPixelChannels(image)*columns;
target+=GetPixelChannels(reconstruct_image)*columns;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
ssim,
x_pixel_mu_squared,
x_pixel_sigmas_squared,
xy_mu,
xy_sigmas,
y_pixel_mu_squared,
y_pixel_sigmas_squared;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(
reconstruct_image,channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
x_pixel_mu_squared=x_pixel_mu[i]*x_pixel_mu[i];
y_pixel_mu_squared=y_pixel_mu[i]*y_pixel_mu[i];
xy_mu=x_pixel_mu[i]*y_pixel_mu[i];
xy_sigmas=xy_sigma[i]-xy_mu;
x_pixel_sigmas_squared=x_pixel_sigma_squared[i]-x_pixel_mu_squared;
y_pixel_sigmas_squared=y_pixel_sigma_squared[i]-y_pixel_mu_squared;
ssim=((2.0*xy_mu+c1)*(2.0*xy_sigmas+c2))/
((x_pixel_mu_squared+y_pixel_mu_squared+c1)*
(x_pixel_sigmas_squared+y_pixel_sigmas_squared+c2));
channel_distortion[i]+=ssim;
channel_distortion[CompositePixelChannel]+=ssim;
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
area++;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetStructuralSimilarityDistortion)
#endif
for (i=0; i <= MaxPixelChannels; i++)
distortion[i]+=channel_distortion[i];
}
image_view=DestroyCacheView(image_view);
reconstruct_view=DestroyCacheView(reconstruct_view);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) || ((traits & UpdatePixelTrait) == 0))
continue;
distortion[i]/=area;
}
distortion[CompositePixelChannel]/=area;
distortion[CompositePixelChannel]/=(double) GetImageChannels(image);
kernel_info=DestroyKernelInfo(kernel_info);
return(status);
}
static MagickBooleanType GetStructuralDisimilarityDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
MagickBooleanType
status;
ssize_t
i;
status=GetStructuralSimilarityDistortion(image,reconstruct_image,
distortion,exception);
for (i=0; i <= MaxPixelChannels; i++)
distortion[i]=(1.0-(distortion[i]))/2.0;
return(status);
}
MagickExport MagickBooleanType GetImageDistortion(Image *image,
const Image *reconstruct_image,const MetricType metric,double *distortion,
ExceptionInfo *exception)
{
double
*channel_distortion;
MagickBooleanType
status;
size_t
length;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
assert(distortion != (double *) NULL);
*distortion=0.0;
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
/*
Get image distortion.
*/
length=MaxPixelChannels+1UL;
channel_distortion=(double *) AcquireQuantumMemory(length,
sizeof(*channel_distortion));
if (channel_distortion == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(channel_distortion,0,length*
sizeof(*channel_distortion));
switch (metric)
{
case AbsoluteErrorMetric:
{
status=GetAbsoluteDistortion(image,reconstruct_image,channel_distortion,
exception);
break;
}
case FuzzErrorMetric:
{
status=GetFuzzDistortion(image,reconstruct_image,channel_distortion,
exception);
break;
}
case MeanAbsoluteErrorMetric:
{
status=GetMeanAbsoluteDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case MeanErrorPerPixelErrorMetric:
{
status=GetMeanErrorPerPixel(image,reconstruct_image,channel_distortion,
exception);
break;
}
case MeanSquaredErrorMetric:
{
status=GetMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case NormalizedCrossCorrelationErrorMetric:
default:
{
status=GetNormalizedCrossCorrelationDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PeakAbsoluteErrorMetric:
{
status=GetPeakAbsoluteDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PeakSignalToNoiseRatioErrorMetric:
{
status=GetPeakSignalToNoiseRatio(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PerceptualHashErrorMetric:
{
status=GetPerceptualHashDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case RootMeanSquaredErrorMetric:
{
status=GetRootMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case StructuralSimilarityErrorMetric:
{
status=GetStructuralSimilarityDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case StructuralDissimilarityErrorMetric:
{
status=GetStructuralDisimilarityDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
}
*distortion=channel_distortion[CompositePixelChannel];
channel_distortion=(double *) RelinquishMagickMemory(channel_distortion);
(void) FormatImageProperty(image,"distortion","%.*g",GetMagickPrecision(),
*distortion);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e D i s t o r t i o n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageDistortions() compares the pixel channels of an image to a
% reconstructed image and returns the specified distortion metric for each
% channel.
%
% The format of the GetImageDistortions method is:
%
% double *GetImageDistortions(const Image *image,
% const Image *reconstruct_image,const MetricType metric,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o metric: the metric.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport double *GetImageDistortions(Image *image,
const Image *reconstruct_image,const MetricType metric,
ExceptionInfo *exception)
{
double
*channel_distortion;
MagickBooleanType
status;
size_t
length;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
/*
Get image distortion.
*/
length=MaxPixelChannels+1UL;
channel_distortion=(double *) AcquireQuantumMemory(length,
sizeof(*channel_distortion));
if (channel_distortion == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(channel_distortion,0,length*
sizeof(*channel_distortion));
status=MagickTrue;
switch (metric)
{
case AbsoluteErrorMetric:
{
status=GetAbsoluteDistortion(image,reconstruct_image,channel_distortion,
exception);
break;
}
case FuzzErrorMetric:
{
status=GetFuzzDistortion(image,reconstruct_image,channel_distortion,
exception);
break;
}
case MeanAbsoluteErrorMetric:
{
status=GetMeanAbsoluteDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case MeanErrorPerPixelErrorMetric:
{
status=GetMeanErrorPerPixel(image,reconstruct_image,channel_distortion,
exception);
break;
}
case MeanSquaredErrorMetric:
{
status=GetMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case NormalizedCrossCorrelationErrorMetric:
default:
{
status=GetNormalizedCrossCorrelationDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PeakAbsoluteErrorMetric:
{
status=GetPeakAbsoluteDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PeakSignalToNoiseRatioErrorMetric:
{
status=GetPeakSignalToNoiseRatio(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PerceptualHashErrorMetric:
{
status=GetRootMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case RootMeanSquaredErrorMetric:
{
status=GetRootMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case StructuralSimilarityErrorMetric:
{
status=GetStructuralSimilarityDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case StructuralDissimilarityErrorMetric:
{
status=GetStructuralDisimilarityDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
}
if (status == MagickFalse)
{
channel_distortion=(double *) RelinquishMagickMemory(channel_distortion);
return((double *) NULL);
}
return(channel_distortion);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s I m a g e s E q u a l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsImagesEqual() compare the pixels of two images and returns immediately
% if any pixel is not identical.
%
% The format of the IsImagesEqual method is:
%
% MagickBooleanType IsImagesEqual(const Image *image,
% const Image *reconstruct_image,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsImagesEqual(const Image *image,
const Image *reconstruct_image,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
size_t
columns,
rows;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
distance=fabs((double) (p[i]-(double) GetPixelChannel(reconstruct_image,
channel,q)));
if (distance >= MagickEpsilon)
break;
}
if (i < (ssize_t) GetPixelChannels(image))
break;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
if (x < (ssize_t) columns)
break;
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
return(y < (ssize_t) rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C o l o r M e t r i c %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageColorMetric() measures the difference between colors at each pixel
% location of two images. A value other than 0 means the colors match
% exactly. Otherwise an error measure is computed by summing over all
% pixels in an image the distance squared in RGB space between each image
% pixel and its corresponding pixel in the reconstruct image. The error
% measure is assigned to these image members:
%
% o mean_error_per_pixel: The mean error for any single pixel in
% the image.
%
% o normalized_mean_error: The normalized mean quantization error for
% any single pixel in the image. This distance measure is normalized to
% a range between 0 and 1. It is independent of the range of red, green,
% and blue values in the image.
%
% o normalized_maximum_error: The normalized maximum quantization
% error for any single pixel in the image. This distance measure is
% normalized to a range between 0 and 1. It is independent of the range
% of red, green, and blue values in your image.
%
% A small normalized mean square error, accessed as
% image->normalized_mean_error, suggests the images are very similar in
% spatial layout and color.
%
% The format of the SetImageColorMetric method is:
%
% MagickBooleanType SetImageColorMetric(Image *image,
% const Image *reconstruct_image,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageColorMetric(Image *image,
const Image *reconstruct_image,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
area,
maximum_error,
mean_error,
mean_error_per_pixel;
MagickBooleanType
status;
size_t
columns,
rows;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
area=0.0;
maximum_error=0.0;
mean_error_per_pixel=0.0;
mean_error=0.0;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
distance=fabs((double) (p[i]-(double) GetPixelChannel(reconstruct_image,
channel,q)));
if (distance >= MagickEpsilon)
{
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
}
area++;
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
image->error.mean_error_per_pixel=(double) (mean_error_per_pixel/area);
image->error.normalized_mean_error=(double) (QuantumScale*QuantumScale*
mean_error/area);
image->error.normalized_maximum_error=(double) (QuantumScale*maximum_error);
status=image->error.mean_error_per_pixel == 0.0 ? MagickTrue : MagickFalse;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S i m i l a r i t y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SimilarityImage() compares the reference image of the image and returns the
% best match offset. In addition, it returns a similarity image such that an
% exact match location is completely white and if none of the pixels match,
% black, otherwise some gray level in-between.
%
% The format of the SimilarityImageImage method is:
%
% Image *SimilarityImage(const Image *image,const Image *reference,
% const MetricType metric,const double similarity_threshold,
% RectangleInfo *offset,double *similarity,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reference: find an area of the image that closely resembles this image.
%
% o metric: the metric.
%
% o similarity_threshold: minimum distortion for (sub)image match.
%
% o offset: the best match offset of the reference image within the image.
%
% o similarity: the computed similarity between the images.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(MAGICKCORE_HDRI_SUPPORT) && defined(MAGICKCORE_FFTW_DELEGATE)
static Image *CrossCorrelationImage(const Image *alpha_image,
const Image *beta_image,ExceptionInfo *exception)
{
Image
*clone_image,
*complex_conjugate,
*complex_multiplication,
*cross_correlation,
*fft_images;
/*
Take the FFT of beta image.
*/
clone_image=CloneImage(beta_image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return(clone_image);
(void) SetImageArtifact(clone_image,"fourier:normalize","inverse");
fft_images=ForwardFourierTransformImage(clone_image,MagickFalse,
exception);
clone_image=DestroyImageList(clone_image);
if (fft_images == (Image *) NULL)
return(fft_images);
/*
Take the complex conjugate of beta image.
*/
complex_conjugate=ComplexImages(fft_images,ConjugateComplexOperator,
exception);
fft_images=DestroyImageList(fft_images);
if (complex_conjugate == (Image *) NULL)
return(complex_conjugate);
/*
Take the FFT of the alpha image.
*/
clone_image=CloneImage(alpha_image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
{
complex_conjugate=DestroyImageList(complex_conjugate);
return(clone_image);
}
(void) SetImageArtifact(clone_image,"fourier:normalize","inverse");
fft_images=ForwardFourierTransformImage(clone_image,MagickFalse,exception);
clone_image=DestroyImageList(clone_image);
if (fft_images == (Image *) NULL)
{
complex_conjugate=DestroyImageList(complex_conjugate);
return(fft_images);
}
complex_conjugate->next->next=fft_images;
/*
Do complex multiplication.
*/
(void) SetImageArtifact(complex_conjugate,"compose:clamp","false");
complex_multiplication=ComplexImages(complex_conjugate,
MultiplyComplexOperator,exception);
complex_conjugate=DestroyImageList(complex_conjugate);
if (fft_images == (Image *) NULL)
return(fft_images);
/*
Do the IFT and return the cross-correlation result.
*/
cross_correlation=InverseFourierTransformImage(complex_multiplication,
complex_multiplication->next,MagickFalse,exception);
complex_multiplication=DestroyImageList(complex_multiplication);
return(cross_correlation);
}
static Image *NCCDivideImage(const Image *alpha_image,const Image *beta_image,
ExceptionInfo *exception)
{
CacheView
*alpha_view,
*beta_view;
Image
*divide_image;
MagickBooleanType
status;
ssize_t
y;
/*
Divide one image into another.
*/
divide_image=CloneImage(alpha_image,0,0,MagickTrue,exception);
if (divide_image == (Image *) NULL)
return(divide_image);
status=MagickTrue;
alpha_view=AcquireAuthenticCacheView(divide_image,exception);
beta_view=AcquireVirtualCacheView(beta_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(beta_image,divide_image,divide_image->rows,1)
#endif
for (y=0; y < (ssize_t) divide_image->rows; y++)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(beta_view,0,y,beta_image->columns,1,
exception);
q=GetCacheViewAuthenticPixels(alpha_view,0,y,divide_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) divide_image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(divide_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(divide_image,i);
PixelTrait traits = GetPixelChannelTraits(divide_image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (fabs(p[i]) >= MagickEpsilon)
q[i]*=PerceptibleReciprocal(QuantumScale*p[i]);
}
p+=GetPixelChannels(beta_image);
q+=GetPixelChannels(divide_image);
}
if (SyncCacheViewAuthenticPixels(alpha_view,exception) == MagickFalse)
status=MagickFalse;
}
beta_view=DestroyCacheView(beta_view);
alpha_view=DestroyCacheView(alpha_view);
if (status == MagickFalse)
divide_image=DestroyImage(divide_image);
return(divide_image);
}
static MagickBooleanType NCCMaximaImage(const Image *image,double *maxima,
RectangleInfo *offset,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
ssize_t
y;
/*
Identify the maxima value in the image and its location.
*/
status=MagickTrue;
*maxima=0.0;
offset->x=0;
offset->y=0;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
sum = 0.0;
ssize_t
channels = 0,
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
sum+=p[i];
channels++;
}
if ((channels != 0) && ((sum/channels) > *maxima))
{
*maxima=sum/channels;
offset->x=x;
offset->y=y;
}
p+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
static MagickBooleanType NCCMultiplyImage(Image *image,const double factor,
const ChannelStatistics *channel_statistics,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
ssize_t
y;
/*
Multiply each pixel by a factor.
*/
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (channel_statistics != (const ChannelStatistics *) NULL)
q[i]*=QuantumScale*channel_statistics[channel].standard_deviation;
q[i]*=factor;
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
static Image *NCCSquareImage(const Image *image,ExceptionInfo *exception)
{
CacheView
*image_view;
Image
*square_image;
MagickBooleanType
status;
ssize_t
y;
/*
Square each pixel in the image.
*/
square_image=CloneImage(image,0,0,MagickTrue,exception);
if (square_image == (Image *) NULL)
return(square_image);
status=MagickTrue;
image_view=AcquireAuthenticCacheView(square_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(square_image,square_image,square_image->rows,1)
#endif
for (y=0; y < (ssize_t) square_image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,square_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) square_image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(square_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(square_image,i);
PixelTrait traits = GetPixelChannelTraits(square_image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]*=QuantumScale*q[i];
}
q+=GetPixelChannels(square_image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
square_image=DestroyImage(square_image);
return(square_image);
}
static Image *NCCSubtractImageMean(const Image *alpha_image,
const Image *beta_image,const ChannelStatistics *channel_statistics,
ExceptionInfo *exception)
{
CacheView
*beta_view,
*image_view;
Image
*gamma_image;
MagickBooleanType
status;
ssize_t
y;
/*
Subtract the image mean and pad.
*/
gamma_image=CloneImage(beta_image,alpha_image->columns,alpha_image->rows,
MagickTrue,exception);
if (gamma_image == (Image *) NULL)
return(gamma_image);
status=MagickTrue;
image_view=AcquireAuthenticCacheView(gamma_image,exception);
beta_view=AcquireVirtualCacheView(beta_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(beta_image,gamma_image,gamma_image->rows,1)
#endif
for (y=0; y < (ssize_t) gamma_image->rows; y++)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(beta_view,0,y,beta_image->columns,1,
exception);
q=GetCacheViewAuthenticPixels(image_view,0,y,gamma_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) gamma_image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(gamma_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(gamma_image,i);
PixelTrait traits = GetPixelChannelTraits(gamma_image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if ((x >= (ssize_t) beta_image->columns) ||
(y >= (ssize_t) beta_image->rows))
q[i]=(Quantum) 0;
else
q[i]=p[i]-channel_statistics[channel].mean;
}
p+=GetPixelChannels(beta_image);
q+=GetPixelChannels(gamma_image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
beta_view=DestroyCacheView(beta_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
gamma_image=DestroyImage(gamma_image);
return(gamma_image);
}
static Image *NCCUnityImage(const Image *alpha_image,const Image *beta_image,
ExceptionInfo *exception)
{
CacheView
*image_view;
Image
*unity_image;
MagickBooleanType
status;
ssize_t
y;
/*
Create a padded unity image.
*/
unity_image=CloneImage(alpha_image,alpha_image->columns,alpha_image->rows,
MagickTrue,exception);
if (unity_image == (Image *) NULL)
return(unity_image);
status=MagickTrue;
image_view=AcquireAuthenticCacheView(unity_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(unity_image,unity_image,unity_image->rows,1)
#endif
for (y=0; y < (ssize_t) unity_image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,unity_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) unity_image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(unity_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(unity_image,i);
PixelTrait traits = GetPixelChannelTraits(unity_image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=QuantumRange;
if ((x >= (ssize_t) beta_image->columns) ||
(y >= (ssize_t) beta_image->rows))
q[i]=(Quantum) 0;
}
q+=GetPixelChannels(unity_image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
unity_image=DestroyImage(unity_image);
return(unity_image);
}
static Image *NCCVarianceImage(Image *alpha_image,const Image *beta_image,
ExceptionInfo *exception)
{
CacheView
*beta_view,
*image_view;
Image
*variance_image;
MagickBooleanType
status;
ssize_t
y;
/*
Compute the variance of the two images.
*/
variance_image=CloneImage(alpha_image,0,0,MagickTrue,exception);
if (variance_image == (Image *) NULL)
return(variance_image);
status=MagickTrue;
image_view=AcquireAuthenticCacheView(variance_image,exception);
beta_view=AcquireVirtualCacheView(beta_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(beta_image,variance_image,variance_image->rows,1)
#endif
for (y=0; y < (ssize_t) variance_image->rows; y++)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(beta_view,0,y,beta_image->columns,1,
exception);
q=GetCacheViewAuthenticPixels(image_view,0,y,variance_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) variance_image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(variance_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(variance_image,i);
PixelTrait traits = GetPixelChannelTraits(variance_image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum((QuantumRange*sqrt(fabs((double) QuantumScale*
(q[i]-p[i])))))/sqrt((double) QuantumRange);
}
p+=GetPixelChannels(beta_image);
q+=GetPixelChannels(variance_image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
beta_view=DestroyCacheView(beta_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
variance_image=DestroyImage(variance_image);
return(variance_image);
}
static Image *NCCSimilarityImage(const Image *image,const Image *reference,
const MetricType metric,const double similarity_threshold,
RectangleInfo *offset,double *similarity_metric,ExceptionInfo *exception)
{
#define DestroySimilarityResources() \
{ \
if (channel_statistics != (ChannelStatistics *) NULL) \
channel_statistics=(ChannelStatistics *) \
RelinquishMagickMemory(channel_statistics); \
if (beta_image != (Image *) NULL) \
beta_image=DestroyImage(beta_image); \
if (gamma_image != (Image *) NULL) \
gamma_image=DestroyImage(gamma_image); \
if (ncc_image != (Image *) NULL) \
ncc_image=DestroyImage(ncc_image); \
if (normalize_image != (Image *) NULL) \
normalize_image=DestroyImage(normalize_image); \
if (square_image != (Image *) NULL) \
square_image=DestroyImage(square_image); \
if (unity_image != (Image *) NULL) \
unity_image=DestroyImage(unity_image); \
}
#define ThrowSimilarityException() \
{ \
DestroySimilarityResources() \
return((Image *) NULL); \
}
ChannelStatistics
*channel_statistics = (ChannelStatistics *) NULL;
double
maxima = 0.0;
Image
*beta_image = (Image *) NULL,
*correlation_image = (Image *) NULL,
*gamma_image = (Image *) NULL,
*ncc_image = (Image *) NULL,
*normalize_image = (Image *) NULL,
*square_image = (Image *) NULL,
*unity_image = (Image *) NULL;
MagickBooleanType
status;
RectangleInfo
geometry;
/*
Accelerated correlation-based image similary using FFT local statistics.
Contributed by Fred Weinhaus.
*/
square_image=NCCSquareImage(image,exception);
if (square_image == (Image *) NULL)
ThrowSimilarityException();
unity_image=NCCUnityImage(image,reference,exception);
if (unity_image == (Image *) NULL)
ThrowSimilarityException();
/*
Compute the cross correlation of the square and unity images.
*/
ncc_image=CrossCorrelationImage(square_image,unity_image,exception);
square_image=DestroyImage(square_image); \
if (ncc_image == (Image *) NULL)
ThrowSimilarityException();
status=NCCMultiplyImage(ncc_image,(double) QuantumRange*reference->columns*
reference->rows,(const ChannelStatistics *) NULL,exception);
if (status == MagickFalse)
ThrowSimilarityException();
/*
Compute the cross correlation of the source and unity images.
*/
gamma_image=CrossCorrelationImage(image,unity_image,exception);
unity_image=DestroyImage(unity_image);
if (gamma_image == (Image *) NULL)
ThrowSimilarityException();
square_image=NCCSquareImage(gamma_image,exception);
gamma_image=DestroyImage(gamma_image);
status=NCCMultiplyImage(square_image,(double) QuantumRange,
(const ChannelStatistics *) NULL,exception);
if (status == MagickFalse)
ThrowSimilarityException();
/*
Compute the variance of the two images.
*/
gamma_image=NCCVarianceImage(ncc_image,square_image,exception);
square_image=DestroyImage(square_image);
ncc_image=DestroyImage(ncc_image);
if (gamma_image == (Image *) NULL)
ThrowSimilarityException();
channel_statistics=GetImageStatistics(reference,exception);
if (channel_statistics == (ChannelStatistics *) NULL)
ThrowSimilarityException();
/*
Subtract the image mean.
*/
status=NCCMultiplyImage(gamma_image,1.0,channel_statistics,exception);
if (status == MagickFalse)
ThrowSimilarityException();
normalize_image=NCCSubtractImageMean(image,reference,channel_statistics,
exception);
if (normalize_image == (Image *) NULL)
ThrowSimilarityException();
ncc_image=CrossCorrelationImage(image,normalize_image,exception);
normalize_image=DestroyImage(normalize_image);
if (ncc_image == (Image *) NULL)
ThrowSimilarityException();
/*
Divide the two images.
*/
beta_image=NCCDivideImage(ncc_image,gamma_image,exception);
ncc_image=DestroyImage(ncc_image);
gamma_image=DestroyImage(gamma_image);
if (beta_image == (Image *) NULL)
ThrowSimilarityException();
(void) ResetImagePage(beta_image,"0x0+0+0");
SetGeometry(image,&geometry);
geometry.width=image->columns-reference->columns;
geometry.height=image->rows-reference->rows;
/*
Crop padding.
*/
correlation_image=CropImage(beta_image,&geometry,exception);
beta_image=DestroyImage(beta_image);
if (correlation_image == (Image *) NULL)
ThrowSimilarityException();
(void) ResetImagePage(correlation_image,"0x0+0+0");
/*
Identify the maxima value in the image and its location.
*/
status=GrayscaleImage(correlation_image,AveragePixelIntensityMethod,
exception);
if (status == MagickFalse)
ThrowSimilarityException();
status=NCCMaximaImage(correlation_image,&maxima,offset,exception);
if (status == MagickFalse)
{
correlation_image=DestroyImage(correlation_image);
ThrowSimilarityException();
}
*similarity_metric=1.0-QuantumScale*maxima;
DestroySimilarityResources();
return(correlation_image);
}
#endif
static double GetSimilarityMetric(const Image *image,const Image *reference,
const MetricType metric,const ssize_t x_offset,const ssize_t y_offset,
ExceptionInfo *exception)
{
double
distortion;
Image
*similarity_image;
MagickBooleanType
status;
RectangleInfo
geometry;
SetGeometry(reference,&geometry);
geometry.x=x_offset;
geometry.y=y_offset;
similarity_image=CropImage(image,&geometry,exception);
if (similarity_image == (Image *) NULL)
return(0.0);
distortion=0.0;
status=GetImageDistortion(similarity_image,reference,metric,&distortion,
exception);
similarity_image=DestroyImage(similarity_image);
if (status == MagickFalse)
return(0.0);
return(distortion);
}
MagickExport Image *SimilarityImage(const Image *image,const Image *reference,
const MetricType metric,const double similarity_threshold,
RectangleInfo *offset,double *similarity_metric,ExceptionInfo *exception)
{
#define SimilarityImageTag "Similarity/Image"
CacheView
*similarity_view;
Image
*similarity_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
assert(offset != (RectangleInfo *) NULL);
SetGeometry(reference,offset);
*similarity_metric=MagickMaximumValue;
#if defined(MAGICKCORE_HDRI_SUPPORT) && defined(MAGICKCORE_FFTW_DELEGATE)
{
const char *artifact = GetImageArtifact(image,"compare:accelerate-ncc");
MagickBooleanType accelerate = (artifact != (const char *) NULL) &&
(IsStringTrue(artifact) == MagickFalse) ? MagickFalse : MagickTrue;
if ((accelerate != MagickFalse) &&
(metric == NormalizedCrossCorrelationErrorMetric))
{
similarity_image=NCCSimilarityImage(image,reference,metric,
similarity_threshold,offset,similarity_metric,exception);
return(similarity_image);
}
}
#endif
similarity_image=CloneImage(image,image->columns-reference->columns+1,
image->rows-reference->rows+1,MagickTrue,exception);
if (similarity_image == (Image *) NULL)
return((Image *) NULL);
status=SetImageStorageClass(similarity_image,DirectClass,exception);
if (status == MagickFalse)
{
similarity_image=DestroyImage(similarity_image);
return((Image *) NULL);
}
(void) SetImageAlphaChannel(similarity_image,DeactivateAlphaChannel,
exception);
/*
Measure similarity of reference image against image.
*/
status=MagickTrue;
progress=0;
similarity_view=AcquireAuthenticCacheView(similarity_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) \
shared(progress,status,similarity_metric) \
magick_number_threads(image,image,image->rows-reference->rows+1,1)
#endif
for (y=0; y < (ssize_t) (image->rows-reference->rows+1); y++)
{
double
similarity;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp flush(similarity_metric)
#endif
if (*similarity_metric <= similarity_threshold)
continue;
q=GetCacheViewAuthenticPixels(similarity_view,0,y,similarity_image->columns,
1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) (image->columns-reference->columns+1); x++)
{
ssize_t
i;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp flush(similarity_metric)
#endif
if (*similarity_metric <= similarity_threshold)
break;
similarity=GetSimilarityMetric(image,reference,metric,x,y,exception);
if ((metric == NormalizedCrossCorrelationErrorMetric) ||
(metric == UndefinedErrorMetric))
similarity=1.0-similarity;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SimilarityImage)
#endif
if (similarity < *similarity_metric)
{
offset->x=x;
offset->y=y;
*similarity_metric=similarity;
}
if (metric == PerceptualHashErrorMetric)
similarity=MagickMin(0.01*similarity,1.0);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait similarity_traits=GetPixelChannelTraits(similarity_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(similarity_traits == UndefinedPixelTrait) ||
((similarity_traits & UpdatePixelTrait) == 0))
continue;
SetPixelChannel(similarity_image,channel,ClampToQuantum(QuantumRange-
QuantumRange*similarity),q);
}
q+=GetPixelChannels(similarity_image);
}
if (SyncCacheViewAuthenticPixels(similarity_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SimilarityImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
similarity_view=DestroyCacheView(similarity_view);
if (status == MagickFalse)
similarity_image=DestroyImage(similarity_image);
return(similarity_image);
}
|
mandelbrot_omp.c | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <omp.h>
// USAGE: mandelbrot_omp <rows> <cols> <x0> <y0> <dx> <dy>
// OUTPUT: PERFORMANCE IN TIME SPENT
#define TRIALS 2
#define OMP_CHUNK_SIZE 100
double get_time()
{
struct timespec tt;
clock_gettime(CLOCK_REALTIME, &tt);
double t = (double)tt.tv_sec * 1.0e9 + (double)tt.tv_nsec;
return t;
}
int main(int argc, char **argv)
{
FILE *fp;
int rows, cols, size, i, j, k, max_iteration, *grid;
double ttot, tstart, tend, tmin;
char filename[] = "results/mandelbrot_omp.dat";
if (argc < 3)
{
printf("Usage: mandelbrot_omp cols rows\n");
return 1;
}
cols = atoi(argv[1]);
rows = atoi(argv[2]);
size = rows * cols;
if (rows < 2 || cols < 2)
{
printf("Error: cols and rows must be > 2\n");
return 1;
}
max_iteration = 100;
double xmin = argc > 3 ? atof(argv[3]) : -2.5;
double ymin = argc > 4 ? atof(argv[4]) : -1;
double xmax = argc > 5 ? xmin + atof(argv[5]) : 1;
double ymax = argc > 6 ? ymin + atof(argv[6]) : 1;
if (xmin >= xmax || ymin >= ymax)
{
printf("Usage: mandelbrot_mpi cols rows x0=-2.5 y0=-1 dx=-1 dy=1\n");
return 1;
}
grid = (int *)malloc(size * sizeof(int));
for (k = 0; k < TRIALS; k++)
{
tmin = 10e10;
tstart = get_time();
#pragma omp parallel for schedule(static, OMP_CHUNK_SIZE)
for (i = 0; i < size; i++)
{
int px = i % rows;
int py = i / rows;
double x0 = (double)px / (rows - 1) * (xmax - xmin) + xmin;
double y0 = (double)py / (cols - 1) * (ymax - ymin) + ymin;
double x = 0;
double y = 0;
int iteration = 0;
while (x * x + y * y < 2 * 2 && iteration < max_iteration)
{
double xtemp = x * x - y * y + x0;
y = 2 * x * y + y0;
x = xtemp;
iteration++;
}
grid[i] = iteration;
}
tend = get_time();
ttot = tend - tstart;
if (ttot < tmin)
tmin = ttot;
}
printf("%.2lf\n", tmin / 10e6);
fp = fopen(filename, "w");
fprintf(fp, "%.2lf %.2lf %.2lf %.2lf\n", xmin, ymin, xmax - xmin, ymax - ymin);
for (i = 0; i < cols; i++)
{
for (j = 0; j < rows; j++)
{
fprintf(fp, "%i ", grid[rows * i + j]);
}
fprintf(fp, "\n");
}
fclose(fp);
free(grid);
return 0;
} |
IO.h | // This code is part of the project "Ligra: A Lightweight Graph Processing
// Framework for Shared Memory", presented at Principles and Practice of
// Parallel Programming, 2013.
// Copyright (c) 2013 Julian Shun and Guy Blelloch
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights (to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
//
// The above copyright notice and this permission notice shall be included
// in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#include <iostream>
#include <fstream>
#include <stdlib.h>
#include <cmath>
#include <sys/mman.h>
#include <stdio.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
#include <parallel/algorithm>
#include <omp.h>
#include <cassert>
#include "parallel.h"
#include "blockRadixSort.h"
#include "quickSort.h"
#include "utils.h"
#include "graph.h"
#include "pvector.h"
#include "timer.h"
#include "sliding_queue.h"
using namespace std;
typedef pair<uintE,uintE> intPair;
typedef pair<uintE, pair<uintE,intE> > intTriple;
template <class E>
struct pairFirstCmp {
bool operator() (pair<uintE,E> a, pair<uintE,E> b) {
return a.first < b.first; }
};
template <class E>
struct getFirst {uintE operator() (pair<uintE,E> a) {return a.first;} };
template <class IntType>
struct pairBothCmp {
bool operator() (pair<uintE,IntType> a, pair<uintE,IntType> b) {
if (a.first != b.first) return a.first < b.first;
return a.second < b.second;
}
};
// A structure that keeps a sequence of strings all allocated from
// the same block of memory
struct words {
long n; // total number of characters
char* Chars; // array storing all strings
long m; // number of substrings
char** Strings; // pointers to strings (all should be null terminated)
words() {}
words(char* C, long nn, char** S, long mm)
: Chars(C), n(nn), Strings(S), m(mm) {}
void del() {free(Chars); free(Strings);}
};
inline bool isSpace(char c) {
switch (c) {
case '\r':
case '\t':
case '\n':
case 0:
case ' ' : return true;
default : return false;
}
}
_seq<char> mmapStringFromFile(const char *filename) {
struct stat sb;
int fd = open(filename, O_RDONLY);
if (fd == -1) {
perror("open");
exit(-1);
}
if (fstat(fd, &sb) == -1) {
perror("fstat");
exit(-1);
}
if (!S_ISREG (sb.st_mode)) {
perror("not a file\n");
exit(-1);
}
char *p = static_cast<char*>(mmap(0, sb.st_size, PROT_READ, MAP_PRIVATE, fd, 0));
if (p == MAP_FAILED) {
perror("mmap");
exit(-1);
}
if (close(fd) == -1) {
perror("close");
exit(-1);
}
size_t n = sb.st_size;
// char *bytes = newA(char, n);
// parallel_for(size_t i=0; i<n; i++) {
// bytes[i] = p[i];
// }
// if (munmap(p, sb.st_size) == -1) {
// perror("munmap");
// exit(-1);
// }
// cout << "mmapped" << endl;
// free(bytes);
// exit(0);
return _seq<char>(p, n);
}
_seq<char> readStringFromFile(char *fileName) {
ifstream file (fileName, ios::in | ios::binary | ios::ate);
if (!file.is_open()) {
std::cout << "Unable to open file: " << fileName << std::endl;
abort();
}
long end = file.tellg();
file.seekg (0, ios::beg);
long n = end - file.tellg();
char* bytes = newA(char,n+1);
assert(bytes != NULL && "Malloc failure\n");
file.read (bytes,n);
file.close();
return _seq<char>(bytes,n);
}
// parallel code for converting a string to words
words stringToWords(char *Str, long n) {
{parallel_for (long i=0; i < n; i++)
if (isSpace(Str[i])) Str[i] = 0; }
// mark start of words
bool *FL = newA(bool,n);
assert(FL != NULL && "Malloc failure\n");
FL[0] = Str[0];
{parallel_for (long i=1; i < n; i++) FL[i] = Str[i] && !Str[i-1];}
// offset for each start of word
_seq<long> Off = sequence::packIndex<long>(FL, n);
free(FL);
long m = Off.n;
long *offsets = Off.A;
// pointer to each start of word
char **SA = newA(char*, m);
assert(SA != NULL && "Malloc failure\n");
{parallel_for (long j=0; j < m; j++) SA[j] = Str+offsets[j];}
free(offsets);
return words(Str,n,SA,m);
}
template <class vertex>
graph<vertex> readGraphFromFile(char* fname, bool isSymmetric, bool mmap) {
Timer t;
t.Start();
words W;
if (mmap) {
_seq<char> S = mmapStringFromFile(fname);
char *bytes = newA(char, S.n);
assert(bytes != NULL && "Malloc failure\n");
// Cannot mutate the graph unless we copy.
parallel_for(size_t i=0; i<S.n; i++) {
bytes[i] = S.A[i];
}
if (munmap(S.A, S.n) == -1) {
perror("munmap");
exit(-1);
}
S.A = bytes;
W = stringToWords(S.A, S.n);
} else {
_seq<char> S = readStringFromFile(fname);
W = stringToWords(S.A, S.n);
}
#ifndef WEIGHTED
if (W.Strings[0] != (string) "AdjacencyGraph") {
#else
if (W.Strings[0] != (string) "WeightedAdjacencyGraph") {
#endif
cout << "Bad input file" << endl;
abort();
}
long len = W.m -1;
long n = atol(W.Strings[1]);
long m = atol(W.Strings[2]);
#ifndef WEIGHTED
if (len != n + m + 2) {
#else
if (len != n + 2*m + 2) {
#endif
cout << "Bad input file" << endl;
abort();
}
uintT* offsets = newA(uintT,n);
assert(offsets != NULL && "Malloc failure\n");
#ifndef WEIGHTED
uintE* edges = newA(uintE,m);
#else
intE* edges = newA(intE,2*m);
#endif
assert(edges != NULL && "Malloc failure\n");
{parallel_for(long i=0; i < n; i++) offsets[i] = atol(W.Strings[i + 3]);}
{parallel_for(long i=0; i<m; i++) {
#ifndef WEIGHTED
edges[i] = atol(W.Strings[i+n+3]);
#else
edges[2*i] = atol(W.Strings[i+n+3]);
edges[2*i+1] = atol(W.Strings[i+n+m+3]);
#endif
}}
//W.del(); // to deal with performance bug in malloc
W.del(); //The original code ^ commented this out
vertex* v = newA(vertex,n);
assert(v != NULL && "Malloc failure\n");
{parallel_for (uintT i=0; i < n; i++) {
uintT o = offsets[i];
uintT l = ((i == n-1) ? m : offsets[i+1])-offsets[i];
v[i].setOutDegree(l);
#ifndef WEIGHTED
v[i].setOutNeighbors(edges+o);
#else
v[i].setOutNeighbors(edges+2*o);
#endif
}}
if(!isSymmetric) {
uintT* tOffsets = newA(uintT,n);
assert(tOffsets != NULL && "Malloc failure\n");
{parallel_for(long i=0;i<n;i++) tOffsets[i] = INT_T_MAX;}
#ifndef WEIGHTED
intPair* temp = newA(intPair,m);
#else
intTriple* temp = newA(intTriple,m);
#endif
assert(temp != NULL && "Malloc failure\n");
{parallel_for(long i=0;i<n;i++){
uintT o = offsets[i];
for(uintT j=0;j<v[i].getOutDegree();j++){
#ifndef WEIGHTED
temp[o+j] = make_pair(v[i].getOutNeighbor(j),i);
#else
temp[o+j] = make_pair(v[i].getOutNeighbor(j),make_pair(i,v[i].getOutWeight(j)));
#endif
}
}}
free(offsets);
#ifndef WEIGHTED
#ifndef LOWMEM
intSort::iSort(temp,m,n+1,getFirst<uintE>());
#else
quickSort(temp,m,pairFirstCmp<uintE>());
#endif
#else
#ifndef LOWMEM
intSort::iSort(temp,m,n+1,getFirst<intPair>());
#else
quickSort(temp,m,pairFirstCmp<intPair>());
#endif
#endif
tOffsets[temp[0].first] = 0;
#ifndef WEIGHTED
uintE* inEdges = newA(uintE,m);
inEdges[0] = temp[0].second;
#else
intE* inEdges = newA(intE,2*m);
inEdges[0] = temp[0].second.first;
inEdges[1] = temp[0].second.second;
#endif
assert(inEdges != NULL && "Malloc failure\n");
{parallel_for(long i=1;i<m;i++) {
#ifndef WEIGHTED
inEdges[i] = temp[i].second;
#else
inEdges[2*i] = temp[i].second.first;
inEdges[2*i+1] = temp[i].second.second;
#endif
if(temp[i].first != temp[i-1].first) {
tOffsets[temp[i].first] = i;
}
}}
free(temp);
//fill in offsets of degree 0 vertices by taking closest non-zero
//offset to the right
sequence::scanIBack(tOffsets,tOffsets,n,minF<uintT>(),(uintT)m);
{parallel_for(long i=0;i<n;i++){
uintT o = tOffsets[i];
uintT l = ((i == n-1) ? m : tOffsets[i+1])-tOffsets[i];
v[i].setInDegree(l);
#ifndef WEIGHTED
v[i].setInNeighbors(inEdges+o);
#else
v[i].setInNeighbors(inEdges+2*o);
#endif
}}
free(tOffsets);
Uncompressed_Mem<vertex>* mem = new Uncompressed_Mem<vertex>(v,n,m,edges,inEdges);
t.Stop();
t.PrintTime("Graph reading time(s)", t.Seconds());
std::cout << "Read directed graph. Num Nodes = " << n << " and Num Edges = " << m << "\n";
return graph<vertex>(v,n,m,mem);
}
else {
free(offsets);
Uncompressed_Mem<vertex>* mem = new Uncompressed_Mem<vertex>(v,n,m,edges);
t.Stop();
t.PrintTime("Graph reading time(s)", t.Seconds());
std::cout << "Read undirected graph. Num Nodes = " << n << " and Num Edges = " << m << "\n";
return graph<vertex>(v,n,m,mem);
}
}
template <class vertex>
graph<vertex> readGraphFromBinary(char* iFile, bool isSymmetric) {
char* config = (char*) ".config";
char* adj = (char*) ".adj";
char* idx = (char*) ".idx";
char configFile[strlen(iFile)+strlen(config)+1];
char adjFile[strlen(iFile)+strlen(adj)+1];
char idxFile[strlen(iFile)+strlen(idx)+1];
*configFile = *adjFile = *idxFile = '\0';
strcat(configFile,iFile);
strcat(adjFile,iFile);
strcat(idxFile,iFile);
strcat(configFile,config);
strcat(adjFile,adj);
strcat(idxFile,idx);
ifstream in(configFile, ifstream::in);
long n;
in >> n;
in.close();
ifstream in2(adjFile,ifstream::in | ios::binary); //stored as uints
in2.seekg(0, ios::end);
long size = in2.tellg();
in2.seekg(0);
#ifdef WEIGHTED
long m = size/(2*sizeof(uint));
#else
long m = size/sizeof(uint);
#endif
char* s = (char *) malloc(size);
in2.read(s,size);
in2.close();
uintE* edges = (uintE*) s;
ifstream in3(idxFile,ifstream::in | ios::binary); //stored as longs
in3.seekg(0, ios::end);
size = in3.tellg();
in3.seekg(0);
if(n != size/sizeof(intT)) { cout << "File size wrong\n"; abort(); }
char* t = (char *) malloc(size);
in3.read(t,size);
in3.close();
uintT* offsets = (uintT*) t;
vertex* v = newA(vertex,n);
#ifdef WEIGHTED
intE* edgesAndWeights = newA(intE,2*m);
{parallel_for(long i=0;i<m;i++) {
edgesAndWeights[2*i] = edges[i];
edgesAndWeights[2*i+1] = edges[i+m];
}}
//free(edges);
#endif
{parallel_for(long i=0;i<n;i++) {
uintT o = offsets[i];
uintT l = ((i==n-1) ? m : offsets[i+1])-offsets[i];
v[i].setOutDegree(l);
#ifndef WEIGHTED
v[i].setOutNeighbors((uintE*)edges+o);
#else
v[i].setOutNeighbors(edgesAndWeights+2*o);
#endif
}}
if(!isSymmetric) {
uintT* tOffsets = newA(uintT,n);
{parallel_for(long i=0;i<n;i++) tOffsets[i] = INT_T_MAX;}
#ifndef WEIGHTED
intPair* temp = newA(intPair,m);
#else
intTriple* temp = newA(intTriple,m);
#endif
{parallel_for(intT i=0;i<n;i++){
uintT o = offsets[i];
for(uintT j=0;j<v[i].getOutDegree();j++){
#ifndef WEIGHTED
temp[o+j] = make_pair(v[i].getOutNeighbor(j),i);
#else
temp[o+j] = make_pair(v[i].getOutNeighbor(j),make_pair(i,v[i].getOutWeight(j)));
#endif
}
}}
free(offsets);
#ifndef WEIGHTED
#ifndef LOWMEM
intSort::iSort(temp,m,n+1,getFirst<uintE>());
#else
quickSort(temp,m,pairFirstCmp<uintE>());
#endif
#else
#ifndef LOWMEM
intSort::iSort(temp,m,n+1,getFirst<intPair>());
#else
quickSort(temp,m,pairFirstCmp<intPair>());
#endif
#endif
tOffsets[temp[0].first] = 0;
#ifndef WEIGHTED
uintE* inEdges = newA(uintE,m);
inEdges[0] = temp[0].second;
#else
intE* inEdges = newA(intE,2*m);
inEdges[0] = temp[0].second.first;
inEdges[1] = temp[0].second.second;
#endif
{parallel_for(long i=1;i<m;i++) {
#ifndef WEIGHTED
inEdges[i] = temp[i].second;
#else
inEdges[2*i] = temp[i].second.first;
inEdges[2*i+1] = temp[i].second.second;
#endif
if(temp[i].first != temp[i-1].first) {
tOffsets[temp[i].first] = i;
}
}}
free(temp);
//fill in offsets of degree 0 vertices by taking closest non-zero
//offset to the right
sequence::scanIBack(tOffsets,tOffsets,n,minF<uintT>(),(uintT)m);
{parallel_for(long i=0;i<n;i++){
uintT o = tOffsets[i];
uintT l = ((i == n-1) ? m : tOffsets[i+1])-tOffsets[i];
v[i].setInDegree(l);
#ifndef WEIGHTED
v[i].setInNeighbors((uintE*)inEdges+o);
#else
v[i].setInNeighbors((intE*)(inEdges+2*o));
#endif
}}
free(tOffsets);
#ifndef WEIGHTED
Uncompressed_Mem<vertex>* mem = new Uncompressed_Mem<vertex>(v,n,m,edges,inEdges);
return graph<vertex>(v,n,m,mem);
#else
Uncompressed_Mem<vertex>* mem = new Uncompressed_Mem<vertex>(v,n,m,edgesAndWeights,inEdges);
return graph<vertex>(v,n,m,mem);
#endif
}
free(offsets);
#ifndef WEIGHTED
Uncompressed_Mem<vertex>* mem = new Uncompressed_Mem<vertex>(v,n,m,edges);
return graph<vertex>(v,n,m,mem);
#else
Uncompressed_Mem<vertex>* mem = new Uncompressed_Mem<vertex>(v,n,m,edgesAndWeights);
return graph<vertex>(v,n,m,mem);
#endif
}
template <class vertex>
graph<vertex> readGraph(char* iFile, bool compressed, bool symmetric, bool binary, bool mmap) {
if(binary) return readGraphFromBinary<vertex>(iFile,symmetric);
else return readGraphFromFile<vertex>(iFile,symmetric,mmap);
}
template <class vertex>
graph<vertex> readCompressedGraph(char* fname, bool isSymmetric, bool mmap) {
char* s;
if (mmap) {
_seq<char> S = mmapStringFromFile(fname);
// Cannot mutate graph unless we copy.
char *bytes = newA(char, S.n);
parallel_for(size_t i=0; i<S.n; i++) {
bytes[i] = S.A[i];
}
if (munmap(S.A, S.n) == -1) {
perror("munmap");
exit(-1);
}
s = bytes;
} else {
ifstream in(fname,ifstream::in |ios::binary);
in.seekg(0,ios::end);
long size = in.tellg();
in.seekg(0);
cout << "size = " << size << endl;
s = (char*) malloc(size);
in.read(s,size);
in.close();
}
long* sizes = (long*) s;
long n = sizes[0], m = sizes[1], totalSpace = sizes[2];
cout << "n = "<<n<<" m = "<<m<<" totalSpace = "<<totalSpace<<endl;
cout << "reading file..."<<endl;
uintT* offsets = (uintT*) (s+3*sizeof(long));
long skip = 3*sizeof(long) + (n+1)*sizeof(intT);
uintE* Degrees = (uintE*) (s+skip);
skip+= n*sizeof(intE);
uchar* edges = (uchar*)(s+skip);
uintT* inOffsets;
uchar* inEdges;
uintE* inDegrees;
if(!isSymmetric){
skip += totalSpace;
uchar* inData = (uchar*)(s + skip);
sizes = (long*) inData;
long inTotalSpace = sizes[0];
cout << "inTotalSpace = "<<inTotalSpace<<endl;
skip += sizeof(long);
inOffsets = (uintT*) (s + skip);
skip += (n+1)*sizeof(uintT);
inDegrees = (uintE*)(s+skip);
skip += n*sizeof(uintE);
inEdges = (uchar*)(s + skip);
} else {
inOffsets = offsets;
inEdges = edges;
inDegrees = Degrees;
}
vertex *V = newA(vertex,n);
parallel_for(long i=0;i<n;i++) {
long o = offsets[i];
uintT d = Degrees[i];
V[i].setOutDegree(d);
V[i].setOutNeighbors(edges+o);
}
if(sizeof(vertex) == sizeof(compressedAsymmetricVertex)){
parallel_for(long i=0;i<n;i++) {
long o = inOffsets[i];
uintT d = inDegrees[i];
V[i].setInDegree(d);
V[i].setInNeighbors(inEdges+o);
}
}
cout << "creating graph..."<<endl;
Compressed_Mem<vertex>* mem = new Compressed_Mem<vertex>(V, s);
graph<vertex> G(V,n,m,mem);
return G;
}
/* prefix sum used by the preprocess function defined below */
static pvector<uintT> ParallelPrefixSum (const pvector<uintT> °rees) {
const size_t block_size = 1<<20;
const size_t num_blocks = (degrees.size() + block_size - 1) / block_size;
pvector<uintT> local_sums(num_blocks);
#pragma omp parallel for
for (size_t block=0; block < num_blocks; block++) {
uintT lsum = 0;
size_t block_end = std::min((block + 1) * block_size, degrees.size());
for (size_t i=block * block_size; i < block_end; i++)
lsum += degrees[i];
local_sums[block] = lsum;
}
pvector<uintT> bulk_prefix(num_blocks+1);
uintT total = 0;
for (size_t block=0; block < num_blocks; block++) {
bulk_prefix[block] = total;
total += local_sums[block];
}
bulk_prefix[num_blocks] = total;
pvector<uintT> prefix(degrees.size() + 1);
#pragma omp parallel for
for (size_t block=0; block < num_blocks; block++) {
uintT local_total = bulk_prefix[block];
size_t block_end = std::min((block + 1) * block_size, degrees.size());
for (size_t i=block * block_size; i < block_end; i++) {
prefix[i] = local_total;
local_total += degrees[i];
}
}
prefix[degrees.size()] = bulk_prefix[num_blocks];
return prefix;
}
/*
Preprocess a graph based on outdegrees or indegrees
PageRank Optimizations for directed graphs -
1) We do not create a new outNeighbors list (because it pull-only)
2) We only create new out-degrees because PR uses it during computation
NOTE: This implements a cheaper variant of frequency based clustering
that does not require sorting
*/
template <class vertex>
graph<vertex> preprocessGraph(graph<vertex> GA, bool isSym, bool useOutdeg,
pvector<uintE>& new_ids, bool isPageRank = false,
bool isDenseWrite = false)
{
Timer t;
t.Start();
auto numVertices = GA.n;
auto numEdges = GA.m;
vertex *origG = GA.V;
typedef std::pair<uintT, uintE> degree_nodeid_t;
pvector<degree_nodeid_t> degree_id_pairs(numVertices);
uintT avgDegree = numEdges / numVertices;
uintT hubCount {0};
if (!isSym) {
/* directed graph */
/* STEP I - identify number and position of hubs in each threads partition*/
const int PADDING = 64 / sizeof(uintE);
uintE* localOffsets = new uintE[omp_get_max_threads() * PADDING]();
uintE partitionSz = numVertices / omp_get_max_threads();
#pragma omp parallel
{
int tid = omp_get_thread_num();
uintE startID = partitionSz * tid;
uintE stopID = partitionSz * (tid + 1);
if (tid == omp_get_max_threads() - 1) {
stopID = numVertices;
}
for (uintE n = startID; n < stopID; ++n) {
vertex vtx = origG[n];
if (useOutdeg) {
if (vtx.getOutDegree() > avgDegree) {
++localOffsets[tid * PADDING];
new_ids[n] = 1;
}
}
else {
if (vtx.getInDegree() > avgDegree) {
++localOffsets[tid * PADDING];
new_ids[n] = 1;
}
}
}
}
uintE sum {0};
for (int tid = 0; tid < omp_get_max_threads(); ++tid) {
auto origCount = localOffsets[tid * PADDING];
localOffsets[tid * PADDING] = sum;
sum += origCount;
}
/* Step II - assign a remap for the hub vertices first */
#pragma omp parallel
{
uintE localCtr {0};
int tid = omp_get_thread_num();
uintE startID = partitionSz * tid;
uintE stopID = partitionSz * (tid + 1);
if (tid == omp_get_max_threads() - 1) {
stopID = numVertices;
}
for (uintE n = startID; n < stopID; ++n) {
if (new_ids[n] != UINT_E_MAX) {
new_ids[n] = localOffsets[tid * PADDING] + localCtr;
++localCtr;
}
}
}
delete[] localOffsets;
/* Step III - assigning a remap for (easy) non hub vertices */
auto numHubs = sum;
SlidingQueue<uintE> queue(numHubs);
#pragma omp parallel
{
assert(omp_get_max_threads() == 56);
QueueBuffer<uintE> lqueue(queue, numHubs / omp_get_max_threads());
#pragma omp for
for (uintE n = numHubs; n < numVertices; ++n) {
if (new_ids[n] == UINT_E_MAX) {
// This steps preserves the ordering of the original graph (as much as possible)
new_ids[n] = n;
}
else {
uintE remappedTo = new_ids[n];
if (new_ids[remappedTo] == UINT_E_MAX) {
// safe to swap Ids because the original vertex is a non-hub
new_ids[remappedTo] = n;
}
else {
// Cannot swap ids because original vertex was a hub (swapping
// would disturb sorted ordering of hubs - not allowed)
lqueue.push_back(n);
}
}
}
lqueue.flush();
}
queue.slide_window(); //the queue keeps a list of vertices where a simple swap of locations is not possible
/* Step IV - assigning remaps for remaining non hubs */
uintE unassignedCtr {0};
auto q_iter = queue.begin();
#pragma omp parallel for
for (uintE n = 0; n < numHubs; ++n) {
if (new_ids[n] == UINT_E_MAX) {
uintE u = *(q_iter + __sync_fetch_and_add(&unassignedCtr, 1));
new_ids[n] = u;
}
}
/* Step V - generate degree list for new graph */
pvector<uintT> degrees(numVertices);
pvector<uintT> inv_degrees(numVertices);
#pragma omp parallel for
for (uintE v = 0; v < numVertices; ++v) {
auto newID = new_ids[v];
if (useOutdeg) {
vertex vtx = origG[v];
degrees[newID] = vtx.getOutDegree();
inv_degrees[newID] = vtx.getInDegree();
}
else {
vertex vtx = origG[v];
degrees[newID] = vtx.getInDegree();
inv_degrees[newID] = vtx.getOutDegree();
}
}
/* Step VI - make a new vertex list for the new graph */
pvector<uintT> offsets = ParallelPrefixSum(degrees);
pvector<uintT> inv_offsets = ParallelPrefixSum(inv_degrees);
//clearing space from degree lists
pvector<uintT>().swap(degrees);
pvector<uintT>().swap(inv_degrees);
#ifndef WEIGHTED
uintE* outEdges = newA(uintE, numEdges);
uintE* inEdges = newA(uintE, numEdges);
#else
intE* outEdges = newA(intE, 2 * numEdges);
intE* inEdges = newA(intE, 2 * numEdges);
#endif
vertex* newV = newA(vertex, numVertices);
#pragma omp parallel for schedule (dynamic, 1024)
for (uintE v = 0; v < numVertices; ++v) {
/* note that vertex IDs u and v belong to the space of original vertex IDs */
if (!isPageRank) {
//copy out-neighbors
auto newID = new_ids[v];
newV[newID].setOutDegree(origG[v].getOutDegree());
#ifndef WEIGHTED
if (useOutdeg)
newV[newID].setOutNeighbors(outEdges + offsets[newID]);
else
newV[newID].setOutNeighbors(outEdges + inv_offsets[newID]);
#else
if (useOutdeg)
newV[newID].setOutNeighbors(outEdges + 2 * offsets[newID]);
else
newV[newID].setOutNeighbors(outEdges + 2 * inv_offsets[newID]);
#endif
for (uintE u = 0; u < origG[v].getOutDegree(); ++u) {
auto origNgh = origG[v].getOutNeighbor(u);
newV[newID].setOutNeighbor(u, new_ids[origNgh]);
#ifdef WEIGHTED
newV[newID].setOutWeight(u, origG[v].getOutWeight(u));
#endif
}
if (!isDenseWrite) {
/* for dense-write pushonly apps we dont need in-neighbors */
//copy in-neighbors
newV[newID].setInDegree(origG[v].getInDegree());
#ifndef WEIGHTED
if (useOutdeg)
newV[newID].setInNeighbors(inEdges + inv_offsets[newID]);
else
newV[newID].setInNeighbors(inEdges + offsets[newID]);
#else
if (useOutdeg)
newV[newID].setInNeighbors(inEdges + 2 * inv_offsets[newID]);
else
newV[newID].setInNeighbors(inEdges + 2 * offsets[newID]);
#endif
for (uintE u = 0; u < origG[v].getInDegree(); ++u) {
auto origNgh = origG[v].getInNeighbor(u);
newV[newID].setInNeighbor(u, new_ids[origNgh]);
#ifdef WEIGHTED
newV[newID].setInWeight(u, origG[v].getInWeight(u));
#endif
}
}
}
else {
/* PageRank - no need to apply weighted conditionals */
//copy in-neighbors
auto newID = new_ids[v];
newV[newID].setInDegree(origG[v].getInDegree());
if (useOutdeg)
newV[newID].setInNeighbors(inEdges + inv_offsets[newID]);
else
newV[newID].setInNeighbors(inEdges + offsets[newID]);
for (uintE u = 0; u < origG[v].getInDegree(); ++u) {
auto origNgh = origG[v].getInNeighbor(u);
newV[newID].setInNeighbor(u, new_ids[origNgh]);
}
//only set out-degrees
newV[newID].setOutDegree(origG[v].getOutDegree());
}
}
/* Step VII - make the new graph */
Uncompressed_Mem<vertex>* mem = new Uncompressed_Mem<vertex>(newV,numVertices,numEdges,outEdges,inEdges);
t.Stop();
t.PrintTime("HubCluster Time", t.Seconds());
return graph<vertex>(newV,numVertices,numEdges,mem);
}
else {
/* undirected graph */
/* STEP I - collect degrees of all vertices */
const int PADDING = 64 / sizeof(uintE);
uintE* localOffsets = new uintE[omp_get_max_threads() * PADDING]();
uintE partitionSz = numVertices / omp_get_max_threads();
#pragma omp parallel
{
int tid = omp_get_thread_num();
uintE startID = partitionSz * tid;
uintE stopID = partitionSz * (tid + 1);
if (tid == omp_get_max_threads() - 1) {
stopID = numVertices;
}
for (uintE n = startID; n < stopID; ++n) {
vertex vtx = origG[n];
if (vtx.getOutDegree() > avgDegree) {
++localOffsets[tid * PADDING];
new_ids[n] = 1;
}
}
}
uintE sum {0};
for (int tid = 0; tid < omp_get_max_threads(); ++tid) {
auto origCount = localOffsets[tid * PADDING];
localOffsets[tid * PADDING] = sum;
sum += origCount;
}
/* Step II - assign a remap for the hub vertices*/
#pragma omp parallel
{
uintE localCtr {0};
int tid = omp_get_thread_num();
uintE startID = partitionSz * tid;
uintE stopID = partitionSz * (tid + 1);
if (tid == omp_get_max_threads() - 1) {
stopID = numVertices;
}
for (uintE n = startID; n < stopID; ++n) {
if (new_ids[n] != UINT_E_MAX) {
new_ids[n] = localOffsets[tid * PADDING] + localCtr;
++localCtr;
}
}
}
delete[] localOffsets;
#pragma omp parallel for
for (uintE v = 0; v < hubCount; ++v) {
new_ids[degree_id_pairs[v].second] = v;
}
//clearing space from degree pairs
pvector<degree_nodeid_t>().swap(degree_id_pairs);
/* Step III - assigning remap for (easy) non hub vertices */
auto numHubs = sum;
SlidingQueue<uintE> queue(numHubs);
#pragma omp parallel
{
assert(omp_get_max_threads() == 56);
QueueBuffer<uintE> lqueue(queue, numHubs / omp_get_max_threads());
#pragma omp for
for (uintE n = numHubs; n < numVertices; ++n) {
if (new_ids[n] == UINT_E_MAX) {
new_ids[n] = n;
}
else {
uintE remappedTo = new_ids[n];
if (new_ids[remappedTo] == UINT_E_MAX) {
new_ids[remappedTo] = n;
}
else {
lqueue.push_back(n);
}
}
}
lqueue.flush();
}
queue.slide_window();
/* Step IV - assigning remaps for remaining non hubs */
uintE unassignedCtr {0};
auto q_iter = queue.begin();
#pragma omp parallel for
for (uintE n = 0; n < numHubs; ++n) {
if (new_ids[n] == UINT_E_MAX) {
uintE u = *(q_iter + __sync_fetch_and_add(&unassignedCtr, 1));
new_ids[n] = u;
}
}
/* Step VI - generate degree list for new graph */
pvector<uintT> degrees(numVertices);
#pragma omp parallel for
for (uintE v = 0; v < numVertices; ++v) {
auto newID = new_ids[v];
vertex vtx = origG[v];
degrees[newID] = vtx.getOutDegree();
}
/* Step VII - make a new vertex list for the new graph */
pvector<uintT> offsets = ParallelPrefixSum(degrees);
//clearing space from degrees
pvector<uintT>().swap(degrees);
#ifndef WEIGHTED
uintE* outEdges = newA(uintE, numEdges);
#else
intE* outEdges = newA(intE, 2 * numEdges);
#endif
vertex* newV = newA(vertex, numVertices);
#pragma omp parallel for schedule (dynamic, 1024)
for (uintE v = 0; v < numVertices; ++v) {
/* note that vertex IDs u and v belong to the space of original vertex IDs */
//copy neighbors
auto newID = new_ids[v];
newV[newID].setOutDegree(origG[v].getOutDegree());
#ifndef WEIGHTED
newV[newID].setOutNeighbors(outEdges + offsets[newID]);
#else
newV[newID].setOutNeighbors(outEdges + 2 * offsets[newID]);
#endif
for (uintE u = 0; u < origG[v].getOutDegree(); ++u) {
auto origNgh = origG[v].getOutNeighbor(u);
newV[newID].setOutNeighbor(u, new_ids[origNgh]);
#ifdef WEIGHTED
newV[newID].setOutWeight(u, origG[v].getOutWeight(u));
#endif
}
}
/* Step V - make the new graph */
Uncompressed_Mem<vertex>* mem = new Uncompressed_Mem<vertex>(newV,numVertices,numEdges,outEdges);
t.Stop();
t.PrintTime("HubCluster Time", t.Seconds());
return graph<vertex>(newV,numVertices,numEdges,mem);
}
}
|
sgbtrf.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zgbtrf.c, normal z -> s, Fri Sep 28 17:38:04 2018
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_tuning.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
/***************************************************************************//**
*
* @ingroup plasma_gbtrf
*
* Computes an LU factorization of a real m-by-n band matrix A
* using partial pivoting with row interchanges.
*
*******************************************************************************
*
* @param[in] m
* The number of rows of the matrix A. n >= 0.
*
* @param[in] n
* The number of columns of the matrix A. n >= 0.
*
* @param[in] kl
* The number of subdiagonals within the band of A. kl >= 0.
*
* @param[in] ku
* The number of superdiagonals within the band of A. ku >= 0.
*
* @param[in,out] AB
* Details of the LU factorization of the band matrix A, as
* computed by plasma_sgbtrf.
*
* @param[in] ldab
* The leading dimension of the array AB.
*
* @param[out] ipiv
* The pivot indices; for 1 <= i <= min(m,n), row i of the
* matrix was interchanged with row ipiv(i).
*
******************************************************************************/
int plasma_sgbtrf(int m, int n, int kl, int ku,
float *pAB, int ldab, int *ipiv)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_fatal_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
if (m < 0) {
plasma_error("illegal value of m");
return -1;
}
if (n < 0) {
plasma_error("illegal value of n");
return -2;
}
if (kl < 0) {
plasma_error("illegal value of kl");
return -3;
}
if (ku < 0) {
plasma_error("illegal value of ku");
return -4;
}
if (ldab < imax(1, 1+kl+ku)) {
plasma_error("illegal value of ldab");
return -6;
}
// quick return
// Tune parameters.
if (plasma->tuning)
plasma_tune_gbtrf(plasma, PlasmaRealFloat, n, kl+ku+1);
// Set tiling parameters.
int nb = plasma->nb;
// Initialize barrier.
plasma_barrier_init(&plasma->barrier);
// Create tile matrix.
plasma_desc_t AB;
int tku = (ku+kl+nb-1)/nb; // number of tiles in upper band (not including diagonal)
int tkl = (kl+nb-1)/nb; // number of tiles in lower band (not including diagonal)
int lm = (tku+tkl+1)*nb; // since we use sgetrf on panel, we pivot back within panel.
// this could fill the last tile of the panel,
// and we need extra NB space on the bottom
int retval;
retval = plasma_desc_general_band_create(PlasmaRealFloat, PlasmaGeneral,
nb, nb, lm, n, 0, 0, m, n, kl, ku, &AB);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval;
}
// Initialize sequence.
plasma_sequence_t sequence;
retval = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval = plasma_request_init(&request);
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_spb2desc(pAB, ldab, AB, &sequence, &request);
}
#pragma omp parallel
#pragma omp master
{
// Call the tile async function.
plasma_omp_sgbtrf(AB, ipiv, &sequence, &request);
}
#pragma omp parallel
#pragma omp master
{
// Translate back to LAPACK layout.
plasma_omp_sdesc2pb(AB, pAB, ldab, &sequence, &request);
}
// Free matrix A in tile layout.
plasma_desc_destroy(&AB);
// Return status.
int status = sequence.status;
return status;
}
/***************************************************************************//**
*
* Computes an LU factorization of a real m-by-n band matrix A
* using partial pivoting with row interchanges.
* Non-blocking tile version of plasma_sgbsv().
* Operates on matrices stored by tiles.
* All matrices are passed through descriptors.
* All dimensions are taken from the descriptors.
* Allows for pipelining of operations at runtime.
*
*******************************************************************************
*
* @param[in,out] AB
* Descriptor of matrix A.
*
* @param[out] ipiv
* The pivot indices; for 1 <= i <= min(m,n), row i of the
* matrix was interchanged with row ipiv(i).
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes). Check
* the sequence->status for errors.
*
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
* @retval void
* Errors are returned by setting sequence->status and
* request->status to error values. The sequence->status and
* request->status should never be set to PlasmaSuccess (the
* initial values) since another async call may be setting a
* failure value at the same time.
*
******************************************************************************/
void plasma_omp_sgbtrf(plasma_desc_t AB, int *ipiv,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_fatal_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if (plasma_desc_check(AB) != PlasmaSuccess) {
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
plasma_error("invalid AB");
return;
}
if (sequence == NULL) {
plasma_fatal_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_fatal_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Call the parallel function.
plasma_psgbtrf(AB, ipiv, sequence, request);
}
|
Dijkstra.c | // OpenMP example program: Dijkstra shortest-path finder in a
// bidirectional graph
// serves as a tutorial to OpenMP; see notes in comments at the end of
// the file
// each thread handles one chunk of vertices
// usage: dijkstra
#include <stdio.h>
#define LARGEINT 2<<30-1 // "infinity"
#define NV 6
// global variables, all shared by all threads by default
int ohd[NV][NV], // 1-hop distances between vertices
mind[NV], // min distances found so far
notdone[NV], // vertices not checked yet
nth, // number of threads
chunk, // number of vertices handled by each thread
md, // current min over all threads
mv; // vertex which achieves that min
void init(int ac, char **av)
{ int i,j;
for (i = 0; i < NV; i++)
for (j = 0; j < NV; j++) {
if (j == i) ohd[i][i] = 0;
else ohd[i][j] = LARGEINT;
}
ohd[0][1] = ohd[1][0] = 40;
ohd[0][2] = ohd[2][0] = 15;
ohd[1][2] = ohd[2][1] = 20;
ohd[1][3] = ohd[3][1] = 10;
ohd[1][4] = ohd[4][1] = 25;
ohd[2][3] = ohd[3][2] = 100;
ohd[1][5] = ohd[5][1] = 6;
ohd[4][5] = ohd[5][4] = 8;
for (i = 1; i < NV; i++) {
notdone[i] = 1;
mind[i] = ohd[0][i];
}
}
// finds closest to 0 among notdone, among s through e
void findmymin(int s, int e, int *d, int *v)
{ int i;
*d = LARGEINT;
for (i = s; i <= e; i++)
if (notdone[i] && mind[i] < *d) {
*d = ohd[0][i];
*v = i;
}
}
// for each i in [s,e], ask whether a shorter path to i exists, through
// mv
void updateohd(int s, int e)
{ int i;
for (i = s; i <= e; i++)
if (mind[mv] + ohd[mv][i] < mind[i])
mind[i] = mind[mv] + ohd[mv][i];
}
void dowork()
{
#pragma omp parallel // Note 1
{ int startv,endv, // start, end vertices for this thread
step, // whole procedure goes NV steps
mymd, // min value found by this thread
mymv, // vertex which attains that value
me = omp_get_thread_num(); // my thread number
#pragma omp single // Note 2
{ nth = omp_get_num_threads(); chunk = NV/nth;
printf("there are %d threads\n",nth); }
// Note 3
startv = me * chunk;
endv = startv + chunk - 1;
for (step = 0; step < NV; step++) {
// find closest vertex to 0 among notdone; each thread finds
// closest in its group, then we find overall closest
#pragma omp single
{ md = LARGEINT; mv = 0; }
findmymin(startv,endv,&mymd,&mymv);
// update overall min if mine is smaller
#pragma omp critical // Note 4
{ if (mymd < md)
{ md = mymd; mv = mymv; }
}
// mark new vertex as done
#pragma omp single
{ notdone[mv] = 0; }
// now update my section of ohd
updateohd(startv,endv);
#pragma omp barrier
}
}
}
int main(int argc, char **argv)
{ int i;
init(argc,argv);
dowork();
// back to single thread now
printf("minimum distances:\n");
for (i = 1; i < NV; i++)
printf("%d\n",mind[i]);
}
// tutorial notes:
// 1. OpenMP works via a preprocessor, which translates pragmas to
// threads calls. Note that the sharp sign ('#') must be the first
// character in the line, other than blanks.
//
// The "parallel" clause says, "Have each thread do this block"
// (enclosed by braces). Code not in a block with a "parallel"
// pragma is done only by the master thread.
// 2. The "single" clause says, "Have only one thread (whichever hits
// this line first) execute the following block."
// In this case, we are calling the OMP function
// omp_get_num_threads(), which of course returns the number of
// threads. Since we assign the return value to the global variable
// nth, only one thread needs to do this, so we use "single". And
// thought there would be no harm (other than a delay) if all
// threads did this, in some applications we would need to limit an
// action to just one thread.
// 3. The "barrier" clause does the standard barrier operation. Note
// carefully that there are also implicit barriers following blocks
// to which various OpenMP pragmas apply, such as "for" and
// "single". One can override those implicit barriers by using the
// "nowait" clause. On platforms with nonsequential memory
// consistency, you can also use the "flush" directive to force a
// memory update.
// 4. The "critical" clause sets up a critical section, with invisible
// lock/unlock operations. Note carefully that the clause may be
// followed by an optional name, which is crucial in some
// applications. All critical sections with the same name
// are guarded by the same (invisible) locks. Those with
// no name are also guarded by the same locks, so the programmer
// could really lose parallelism if he/she were not aware of this.
// Certain very specialized one-statement critical sections can be
// handled more simply and efficiently using the "atomic"
// directive, e.g.
// #pragma omp atomic
// y += x;
// Note that that statment can NOT be a block.
|
custom_functions.h | //
// Project Name: Kratos
// Last Modified by: $Author: G.Casas (gcasas@cimmne.upc.edu) $
// Date: $Date: 2011-6-13 08:56:42 $
// Revision: $Revision: 1.5 $
//
//
//README::::look to the key word "VERSION" if you want to find all the points where you have to change something so that you can pass from a kdtree to a bin data search structure;
#if !defined(KRATOS_CUSTOM_FUNCTIONS)
#define KRATOS_CUSTOM_FUNCTIONS
// /* External includes */
#ifdef _OPENMP
#include <omp.h>
#endif
// System includes
#include <vector>
// Project includes
#include "includes/model_part.h"
#include "utilities/timer.h"
#include "utilities/openmp_utils.h"
#include "processes/find_elements_neighbours_process.h"
#include "processes/find_nodal_neighbours_process.h"
//Database includes
#include "custom_utilities/search/discrete_particle_configure.h"
#include "includes/define.h"
#include "custom_elements/discrete_element.h"
#include "custom_elements/swimming_particle.h"
#include "custom_utilities/AuxiliaryFunctions.h"
#include "custom_elements/spheric_particle.h"
#include "swimming_DEM_application.h"
#include "utilities/geometry_utilities.h"
namespace Kratos
{
template <std::size_t TDim>
class CustomFunctionsCalculator
{
public:
typedef ModelPart::ElementsContainerType::iterator ElementIterator;
typedef ModelPart::NodesContainerType::iterator NodeIterator;
typedef ModelPart::NodesContainerType NodesArrayType;
KRATOS_CLASS_POINTER_DEFINITION(CustomFunctionsCalculator);
CustomFunctionsCalculator(): mPressuresFilled(false), mFirstGradientRecovery(true), mFirstLaplacianRecovery(true), mSomeCloudsDontWork(false), mCalculatingTheGradient(false), mCalculatingTheLaplacian(false), mFirstTimeAppending(true){}
/// Calculator
virtual ~CustomFunctionsCalculator(){}
/// Default calculator
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
void CalculatePressureGradient(ModelPart& r_model_part)
{
for (NodeIterator inode = r_model_part.NodesBegin(); inode != r_model_part.NodesEnd(); ++inode){
noalias(inode->FastGetSolutionStepValue(PRESSURE_GRADIENT)) = ZeroVector(3);
}
array_1d <double, 3> grad = ZeroVector(3); // its dimension is always 3
array_1d <double, TDim + 1 > elemental_pressures;
array_1d <double, TDim + 1 > N; // shape functions vector
BoundedMatrix<double, TDim + 1, TDim> DN_DX;
for (ModelPart::ElementIterator ielem = r_model_part.ElementsBegin(); ielem != r_model_part.ElementsEnd(); ++ielem){
// computing the shape function derivatives
Geometry<Node<3> >& geom = ielem->GetGeometry();
double Volume;
GeometryUtils::CalculateGeometryData(geom, DN_DX, N, Volume);
// getting the pressure gradients;
for (unsigned int i = 0; i < TDim + 1; ++i){
elemental_pressures[i] = geom[i].FastGetSolutionStepValue(PRESSURE);
}
array_1d <double, TDim> grad_aux = prod(trans(DN_DX), elemental_pressures); // its dimension may be 2
for (unsigned int i = 0; i < TDim; ++i){
grad[i] = grad_aux[i];
}
double nodal_area = Volume / static_cast<double>(TDim + 1);
grad *= nodal_area;
for (unsigned int i = 0; i < TDim + 1; ++i){
geom[i].FastGetSolutionStepValue(PRESSURE_GRADIENT) += grad;
}
}
for (NodeIterator inode = r_model_part.NodesBegin(); inode != r_model_part.NodesEnd(); ++inode){
inode->FastGetSolutionStepValue(PRESSURE_GRADIENT) /= inode->FastGetSolutionStepValue(NODAL_AREA);
}
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
// This function assesses the stationarity based on the pressure field variation.
// Its tolerance applies to the non-dimensional pressure variation between consecutive
// measurements.
bool AssessStationarity(ModelPart& r_model_part, const double& tol)
{
if (!mPressuresFilled){
PerformFirstStepComputations(r_model_part);
return(false);
}
else {
double max_pressure_change_rate = 0.0; // measure of stationarity
double mean_celerity = 0.0; // used to adimensionalize the time step
// filling up mPressures and calculating the mean velocities and the maximum nodal pressure change
unsigned int i = 0;
for (NodeIterator inode = r_model_part.NodesBegin(); inode != r_model_part.NodesEnd(); ++inode){
const array_1d<double, 3>& velocity = inode->FastGetSolutionStepValue(VELOCITY);
mean_celerity += SWIMMING_MODULUS_3(velocity);
const double new_pressure = inode->FastGetSolutionStepValue(PRESSURE);
double& old_pressure = mPressures[i];
const double delta_p = std::abs(new_pressure - old_pressure);
max_pressure_change_rate = std::max(delta_p, max_pressure_change_rate);
old_pressure = new_pressure;
++i;
}
mean_celerity /= i;
const double delta_t = r_model_part.GetProcessInfo()[TIME] - mLastMeasurementTime;
if (delta_t > 0.0){
max_pressure_change_rate /= delta_t;
// calculating coefficients for adimensionalization of the pressure change rate
const double characteristic_length = std::pow(mTotalDomainVolume, 1.0 / 3); // characteristic length of the model. Should be improved: a hydraulic radius or such
const double reciprocal_of_characteristic_time = mean_celerity / characteristic_length;
const double pressure_spatial_variation = GetRangeWithinVector(mPressures);
mLastPressureVariation = pressure_spatial_variation;
const double characteristic_pressure_variation = 0.5 * (pressure_spatial_variation + mLastPressureVariation);
if (std::abs(characteristic_pressure_variation) < std::numeric_limits<double>::epsilon() || std::abs(reciprocal_of_characteristic_time) < std::numeric_limits<double>::epsilon()){ // unlikely
std::cout << "Uniform problem: stationarity check being performed with dimensional values...! " << "\n";
if (max_pressure_change_rate <= tol){ // go with the absolute value
return true;
}
}
max_pressure_change_rate /= reciprocal_of_characteristic_time * characteristic_pressure_variation ;
}
else {
KRATOS_ERROR << "Trying to calculate pressure variations between two coincident time steps! (null time variation since last recorded time)" << std::endl;
}
std::cout << "++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++" << "\n";
std::cout << "The stationarity condition tolerance is " << "\n";
KRATOS_INFO("SwimmingDEM") << tol << std::endl;
std::cout << "The stationarity residual is now " << "\n";
KRATOS_INFO("SwimmingDEM") << max_pressure_change_rate << std::endl;
std::cout << "++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++" << "\n";
return max_pressure_change_rate <= tol;
}
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
double CalculateDomainVolume(ModelPart& r_fluid_model_part)
{
OpenMPUtils::CreatePartition(ParallelUtilities::GetNumThreads(), r_fluid_model_part.GetCommunicator().LocalMesh().Elements().size(), mElementsPartition);
double added_volume = 0.0;
#pragma omp parallel for reduction(+ : added_volume)
for (int k = 0; k < ParallelUtilities::GetNumThreads(); ++k){
for (ElementIterator it = GetElementPartitionBegin(r_fluid_model_part, k); it != GetElementPartitionEnd(r_fluid_model_part, k); ++it){
added_volume += CalculateElementalVolume(it->GetGeometry());
}
}
return added_volume;
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
// this function assumes linear elements are used
void CalculateTotalHydrodynamicForceOnParticles(ModelPart& r_dem_model_part, array_1d <double, 3>& force)
{
OpenMPUtils::CreatePartition(ParallelUtilities::GetNumThreads(), r_dem_model_part.GetCommunicator().LocalMesh().Elements().size(), mElementsPartition);
std::vector<array_1d <double, 3> > added_force_vect;
added_force_vect.resize(ParallelUtilities::GetNumThreads());
for (unsigned int k = 0; k < added_force_vect.size(); ++k){
added_force_vect[k] = ZeroVector(3);
}
#pragma omp parallel for
for (int k = 0; k < ParallelUtilities::GetNumThreads(); ++k){
for (ElementIterator it = GetElementPartitionBegin(r_dem_model_part, k); it != GetElementPartitionEnd(r_dem_model_part, k); ++it){
Geometry< Node<3> >& geom = it->GetGeometry();
array_1d <double, 3> element_force;
if (geom[0].SolutionStepsDataHas(HYDRODYNAMIC_FORCE)){
element_force = geom[0].FastGetSolutionStepValue(HYDRODYNAMIC_FORCE);
}
else {
element_force = ZeroVector(3);
}
added_force_vect[k] += element_force;
}
}
force = added_force_vect[0];
for (unsigned int k = 1; k < added_force_vect.size(); ++k){
force += added_force_vect[k];
}
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
// this function assumes linear elements are used
void CalculateTotalHydrodynamicForceOnFluid(ModelPart& r_fluid_model_part, array_1d <double, 3>& instantaneous_force, array_1d <double, 3>& mean_force)
{
OpenMPUtils::CreatePartition(ParallelUtilities::GetNumThreads(), r_fluid_model_part.GetCommunicator().LocalMesh().Elements().size(), mElementsPartition);
std::vector<array_1d <double, 3> > added_force_vect;
added_force_vect.resize(ParallelUtilities::GetNumThreads());
std::vector<array_1d <double, 3> > added_mean_force_vect;
added_mean_force_vect.resize(ParallelUtilities::GetNumThreads());
for (unsigned int k = 0; k < added_force_vect.size(); ++k){
added_force_vect[k] = ZeroVector(3);
added_mean_force_vect[k] = ZeroVector(3);
}
#pragma omp parallel for
for (int k = 0; k < ParallelUtilities::GetNumThreads(); ++k){
for (ElementIterator it = GetElementPartitionBegin(r_fluid_model_part, k); it != GetElementPartitionEnd(r_fluid_model_part, k); ++it){
Geometry< Node<3> >& geom = it->GetGeometry();
double element_volume;
array_1d <double, 3> element_force;
array_1d <double, 3> element_mean_force;
if (geom[0].SolutionStepsDataHas(HYDRODYNAMIC_REACTION) && geom[0].SolutionStepsDataHas(FLUID_FRACTION)){
element_force = CalculateVectorIntegralOfLinearInterpolationPerUnitFluidMass(geom, HYDRODYNAMIC_REACTION, element_volume);
}
else {
element_force = ZeroVector(3);
}
if (geom[0].SolutionStepsDataHas(MEAN_HYDRODYNAMIC_REACTION) && geom[0].SolutionStepsDataHas(FLUID_FRACTION)){
element_mean_force = CalculateVectorIntegralOfLinearInterpolationPerUnitFluidMass(geom, MEAN_HYDRODYNAMIC_REACTION, element_volume);
}
else {
element_mean_force = ZeroVector(3);
}
added_force_vect[k] += element_force;
added_mean_force_vect[k] += element_mean_force;
}
}
instantaneous_force = added_force_vect[0];
mean_force = added_force_vect[0];
for (unsigned int k = 1; k < added_force_vect.size(); ++k){
instantaneous_force += added_force_vect[k];
mean_force += added_mean_force_vect[k];
}
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
// this function assumes linear elements are used
double CalculateGlobalFluidVolume(ModelPart& r_fluid_model_part)
{
OpenMPUtils::CreatePartition(ParallelUtilities::GetNumThreads(), r_fluid_model_part.GetCommunicator().LocalMesh().Elements().size(), mElementsPartition);
double added_fluid_volume = 0.0;
#pragma omp parallel for reduction(+ : added_fluid_volume)
for (int k = 0; k < ParallelUtilities::GetNumThreads(); ++k){
for (ElementIterator it = GetElementPartitionBegin(r_fluid_model_part, k); it != GetElementPartitionEnd(r_fluid_model_part, k); ++it){
Geometry< Node<3> >& geom = it->GetGeometry();
double element_volume;
double element_fluid_volume;
if (geom[0].SolutionStepsDataHas(FLUID_FRACTION)){
element_fluid_volume = CalculateScalarIntegralOfLinearInterpolation(geom, FLUID_FRACTION, element_volume);
}
else {
element_fluid_volume = CalculateElementalVolume(geom);
}
added_fluid_volume += element_fluid_volume;
}
}
return added_fluid_volume;
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
template<class matrix_T>
double determinant(boost::numeric::ublas::matrix_expression<matrix_T> const& mat_r)
{
double det = 1.0;
matrix_T mLu(mat_r() );
boost::numeric::ublas::permutation_matrix<std::size_t> pivots(mat_r().size1() );
int is_singular = lu_factorize(mLu, pivots);
if (!is_singular)
{
for (std::size_t i=0; i < pivots.size(); ++i)
{
if (pivots(i) != i)
det *= -1.0;
det *= mLu(i,i);
}
}
else
det = 0.0;
return det;
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
const DenseMatrix<double> Inverse(
const DenseMatrix<double>& m)
{
assert(m.size1() == m.size2() && "Can only calculate the inverse of square matrices");
switch(m.size1())
{
case 1:
{
assert(m.size1() == 1 && m.size2() == 1 && "Only for 1x1 matrices");
const double determinant = CalcDeterminant(m);
assert(determinant != 0.0);
assert(m(0,0) != 0.0 && "Cannot take the inverse of matrix [0]");
DenseMatrix<double> n(1,1);
n(0,0) = 1.0 / determinant;
return n;
}
case 2:
{
assert(m.size1() == 2 && m.size2() == 2 && "Only for 2x2 matrices");
const double determinant = CalcDeterminant(m);
assert(determinant != 0.0);
const double a = m(0,0);
const double b = m(0,1);
const double c = m(1,0);
const double d = m(1,1);
DenseMatrix<double> n(2,2);
n(0,0) = d / determinant;
n(0,1) = -b / determinant;
n(1,0) = -c / determinant;
n(1,1) = a / determinant;
return n;
}
case 3:
{
assert(m.size1() == 3 && m.size2() == 3 && "Only for 3x3 matrices");
const double determinant = CalcDeterminant(m);
assert(determinant != 0.0);
const double a = m(0,0);
const double b = m(0,1);
const double c = m(0,2);
const double d = m(1,0);
const double e = m(1,1);
const double f = m(1,2);
const double g = m(2,0);
const double h = m(2,1);
const double k = m(2,2);
DenseMatrix<double> n(3,3);
const double new_a = ((e*k)-(f*h)) / determinant;
const double new_b = -((d*k)-(f*g)) / determinant;
const double new_c = ((d*h)-(e*g)) / determinant;
const double new_d = -((b*k)-(c*h)) / determinant;
const double new_e = ((a*k)-(c*g)) / determinant;
const double new_f = -((a*h)-(b*g)) / determinant;
const double new_g = ((b*f)-(c*e)) / determinant;
const double new_h = -((a*f)-(c*d)) / determinant;
const double new_k = ((a*e)-(b*d)) / determinant;
n(0,0) = new_a;
n(1,0) = new_b;
n(2,0) = new_c;
n(0,1) = new_d;
n(1,1) = new_e;
n(2,1) = new_f;
n(0,2) = new_g;
n(1,2) = new_h;
n(2,2) = new_k;
return n;
}
default:
{
//Use blockwise inversion
//Matrix::Chop returns a std::vector
//[ A at [0] B at [1] ]
//[ C at [2] D at [4] ]
const std::vector<DenseMatrix<double> > v = Chop(m);
const DenseMatrix<double>& a = v[0];
assert(a.size1() == a.size2());
const DenseMatrix<double> a_inv = Inverse(a);
const DenseMatrix<double>& b = v[1];
const DenseMatrix<double>& c = v[2];
const DenseMatrix<double>& d = v[3];
const DenseMatrix<double> term
= d
- prod(
DenseMatrix<double>(prod(c,a_inv)),
b
);
const DenseMatrix<double> term_inv = Inverse(term);
const DenseMatrix<double> new_a
= a_inv
+ DenseMatrix<double>(prod(
DenseMatrix<double>(prod(
DenseMatrix<double>(prod(
DenseMatrix<double>(prod(
a_inv,
b)),
term_inv)),
c)),
a_inv));
const DenseMatrix<double> new_b
=
- DenseMatrix<double>(prod(
DenseMatrix<double>(prod(
a_inv,
b)),
term_inv));
const DenseMatrix<double> new_c
=
- DenseMatrix<double>(prod(
DenseMatrix<double>(prod(
term_inv,
c)),
a_inv));
const DenseMatrix<double> new_d = term_inv;
std::vector<DenseMatrix<double> > w;
w.push_back(new_a);
w.push_back(new_b);
w.push_back(new_c);
w.push_back(new_d);
const DenseMatrix<double> result = Unchop(w);
return result;
}
}
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
void CopyValuesFromFirstToSecond(ModelPart& r_model_part, const Variable<double>& origin_variable, const Variable<double>& destination_variable)
{
#pragma omp parallel for
for (int i = 0; i < (int)r_model_part.Nodes().size(); ++i){
ModelPart::NodesContainerType::iterator i_particle = r_model_part.NodesBegin() + i;
Node<3>::Pointer p_node = *(i_particle.base());
double& destination_value = p_node->FastGetSolutionStepValue(destination_variable);
const double& origin_value = p_node->FastGetSolutionStepValue(origin_variable);
destination_value = origin_value;
}
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
void CopyValuesFromFirstToSecond(ModelPart& r_model_part, const Variable<array_1d<double, 3>>& origin_variable, const Variable<array_1d<double, 3>>& destination_variable)
{
#pragma omp parallel for
for (int i = 0; i < (int)r_model_part.Nodes().size(); ++i){
ModelPart::NodesContainerType::iterator i_particle = r_model_part.NodesBegin() + i;
Node<3>::Pointer p_node = *(i_particle.base());
array_1d<double, 3>& destination_value = p_node->FastGetSolutionStepValue(destination_variable);
const array_1d<double, 3>& origin_value = p_node->FastGetSolutionStepValue(origin_variable);
noalias(destination_value) = origin_value;
}
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
void SetValueOfAllNotes(ModelPart& r_model_part, const double& value, const Variable<double>& destination_variable)
{
#pragma omp parallel for
for (int i = 0; i < (int)r_model_part.Nodes().size(); ++i){
ModelPart::NodesContainerType::iterator i_particle = r_model_part.NodesBegin() + i;
Node<3>::Pointer p_node = *(i_particle.base());
double& destination_value = p_node->FastGetSolutionStepValue(destination_variable);
destination_value = value;
}
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
void SetValueOfAllNotes(ModelPart& r_model_part, const array_1d<double, 3>& value, const Variable<array_1d<double, 3>>& destination_variable)
{
#pragma omp parallel for
for (int i = 0; i < (int)r_model_part.Nodes().size(); ++i){
ModelPart::NodesContainerType::iterator i_particle = r_model_part.NodesBegin() + i;
Node<3>::Pointer p_node = *(i_particle.base());
array_1d<double, 3>& destination_value = p_node->FastGetSolutionStepValue(destination_variable);
noalias(destination_value) = value;
}
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
private:
bool mPressuresFilled;
bool mFirstGradientRecovery;
bool mFirstLaplacianRecovery;
bool mSomeCloudsDontWork;
bool mCalculatingTheGradient;
bool mCalculatingTheLaplacian;
bool mFirstTimeAppending;
double mLastMeasurementTime;
double mLastPressureVariation;
double mTotalDomainVolume;
std::vector<double> mPressures;
std::vector<DenseVector<double> > mFirstRowsOfB;
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
inline double CalculateArea(const double x0, const double y0,
const double x1, const double y1,
const double x2, const double y2)
{
const double x10 = x1 - x0;
const double y10 = y1 - y0;
const double x20 = x2 - x0;
const double y20 = y2 - y0;
const double area = 0.5 * std::abs(x10 * y20 - x20 * y10);
return area;
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
inline double CalculateVol(const double x0, const double y0, const double z0,
const double x1, const double y1, const double z1,
const double x2, const double y2, const double z2,
const double x3, const double y3, const double z3)
{
double x10 = x1 - x0;
double y10 = y1 - y0;
double z10 = z1 - z0;
double x20 = x2 - x0;
double y20 = y2 - y0;
double z20 = z2 - z0;
double x30 = x3 - x0;
double y30 = y3 - y0;
double z30 = z3 - z0;
double detJ = x10 * y20 * z30 - x10 * y30 * z20 +
y10 * z20 * x30 - y10 * x20 * z30 +
z10 * x20 * y30 - z10 * y20 * x30;
return detJ * 0.1666666666666666666666667;
}
//***************************************************************************************************************
//***************************************************************************************************************
double CalculateElementalVolume(const Geometry<Node <3> >& geom)
{
double vol;
if (TDim == 2){
double x0 = geom[0].X();
double y0 = geom[0].Y();
double x1 = geom[1].X();
double y1 = geom[1].Y();
double x2 = geom[2].X();
double y2 = geom[2].Y();
vol = CalculateArea(x0, y0, x1, y1, x2, y2);
}
else {
double x0 = geom[0].X();
double y0 = geom[0].Y();
double z0 = geom[0].Z();
double x1 = geom[1].X();
double y1 = geom[1].Y();
double z1 = geom[1].Z();
double x2 = geom[2].X();
double y2 = geom[2].Y();
double z2 = geom[2].Z();
double x3 = geom[3].X();
double y3 = geom[3].Y();
double z3 = geom[3].Z();
vol = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, x3, y3, z3);
}
if (std::abs(vol) < std::numeric_limits<double>::epsilon()){
KRATOS_ERROR << "Element with zero area found with the current geometry "<< geom << std::endl;
}
return vol;
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
double CalculateScalarIntegralOfLinearInterpolation(const Geometry<Node < 3 > >& geom, const Variable<double>& r_var, double& vol)
{
array_1d<double, 4> N;
double x0 = geom[0].X();
double y0 = geom[0].Y();
double z0 = geom[0].Z();
double x1 = geom[1].X();
double y1 = geom[1].Y();
double z1 = geom[1].Z();
double x2 = geom[2].X();
double y2 = geom[2].Y();
double z2 = geom[2].Z();
double x3 = geom[3].X();
double y3 = geom[3].Y();
double z3 = geom[3].Z();
double xc = 0.25 * (x0 + x1 + x2 + x3);
double yc = 0.25 * (y0 + y1 + y2 + y3);
double zc = 0.25 * (z0 + z1 + z2 + z3);
vol = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, x3, y3, z3);
KRATOS_ERROR_IF(std::abs(vol) < std::numeric_limits<double>::epsilon()) << "Element with zero area found. Its geometry is given by "<< geom << std::endl;
N[0] = CalculateVol(x1, y1, z1, x3, y3, z3, x2, y2, z2, xc, yc, zc);
N[1] = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, xc, yc, zc);
N[2] = CalculateVol(x3, y3, z3, x1, y1, z1, x0, y0, z0, xc, yc, zc);
N[3] = CalculateVol(x3, y3, z3, x0, y0, z0, x2, y2, z2, xc, yc, zc);
double value_at_gauss_point = N[0] * geom[0].FastGetSolutionStepValue(r_var);
for (unsigned int i = 1; i != 4; ++i){
value_at_gauss_point += N[i] * geom[i].FastGetSolutionStepValue(r_var, 0);
}
return value_at_gauss_point;
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
array_1d <double, 3> CalculateVectorIntegralOfLinearInterpolation(const Geometry<Node < 3 > >& geom, const Variable<array_1d <double, 3> >& r_var, double& vol)
{
array_1d<double, 4> N;
double x0 = geom[0].X();
double y0 = geom[0].Y();
double z0 = geom[0].Z();
double x1 = geom[1].X();
double y1 = geom[1].Y();
double z1 = geom[1].Z();
double x2 = geom[2].X();
double y2 = geom[2].Y();
double z2 = geom[2].Z();
double x3 = geom[3].X();
double y3 = geom[3].Y();
double z3 = geom[3].Z();
double xc = 0.25 * (x0 + x1 + x2 + x3);
double yc = 0.25 * (y0 + y1 + y2 + y3);
double zc = 0.25 * (z0 + z1 + z2 + z3);
vol = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, x3, y3, z3);
KRATOS_ERROR_IF(std::abs(vol) < std::numeric_limits<double>::epsilon()) << "Element with zero area found. Its geometry is given by " << geom << std::endl;
N[0] = CalculateVol(x1, y1, z1, x3, y3, z3, x2, y2, z2, xc, yc, zc);
N[1] = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, xc, yc, zc);
N[2] = CalculateVol(x3, y3, z3, x1, y1, z1, x0, y0, z0, xc, yc, zc);
N[3] = CalculateVol(x3, y3, z3, x0, y0, z0, x2, y2, z2, xc, yc, zc);
array_1d <double, 3> value_at_gauss_point = N[0] * geom[0].FastGetSolutionStepValue(r_var);
for (unsigned int i = 1; i != 4; ++i){
value_at_gauss_point += N[i] * geom[i].FastGetSolutionStepValue(r_var);
}
return value_at_gauss_point;
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
array_1d <double, 3> CalculateVectorIntegralOfLinearInterpolationPerUnitFluidMass(const Geometry<Node < 3 > >& geom, const Variable<array_1d <double, 3> >& r_var, double& vol)
{
array_1d<double, 4> N;
double x0 = geom[0].X();
double y0 = geom[0].Y();
double z0 = geom[0].Z();
double x1 = geom[1].X();
double y1 = geom[1].Y();
double z1 = geom[1].Z();
double x2 = geom[2].X();
double y2 = geom[2].Y();
double z2 = geom[2].Z();
double x3 = geom[3].X();
double y3 = geom[3].Y();
double z3 = geom[3].Z();
double xc = 0.25 * (x0 + x1 + x2 + x3);
double yc = 0.25 * (y0 + y1 + y2 + y3);
double zc = 0.25 * (z0 + z1 + z2 + z3);
vol = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, x3, y3, z3);
KRATOS_ERROR_IF(std::abs(vol) < std::numeric_limits<double>::epsilon()) << "Element with zero area found. Its geometry is given by " << geom << std::endl;
N[0] = CalculateVol(x1, y1, z1, x3, y3, z3, x2, y2, z2, xc, yc, zc);
N[1] = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, xc, yc, zc);
N[2] = CalculateVol(x3, y3, z3, x1, y1, z1, x0, y0, z0, xc, yc, zc);
N[3] = CalculateVol(x3, y3, z3, x0, y0, z0, x2, y2, z2, xc, yc, zc);
array_1d <double, 3> value_at_gauss_point = N[0] * geom[0].FastGetSolutionStepValue(r_var) * geom[0].FastGetSolutionStepValue(DENSITY) * geom[0].FastGetSolutionStepValue(FLUID_FRACTION);
for (unsigned int i = 1; i != 4; ++i){
value_at_gauss_point += N[i] * geom[i].FastGetSolutionStepValue(r_var) * geom[i].FastGetSolutionStepValue(DENSITY) * geom[i].FastGetSolutionStepValue(FLUID_FRACTION);
}
return value_at_gauss_point;
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
void PerformFirstStepComputations(ModelPart& r_model_part)
{
mTotalDomainVolume = CalculateDomainVolume(r_model_part);
mPressures.resize(r_model_part.Nodes().size());
mLastMeasurementTime = r_model_part.GetProcessInfo()[TIME];
unsigned int i = 0;
for (NodeIterator inode = r_model_part.NodesBegin(); inode != r_model_part.NodesEnd(); ++inode) {
mPressures[i] = inode->FastGetSolutionStepValue(PRESSURE);
++i;
}
mPressuresFilled = true;
mLastPressureVariation = GetRangeWithinVector(mPressures);
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
struct IsCloser{
bool operator()(std::pair<unsigned int, double> const& first_pair, std::pair<unsigned int, double> const& second_pair)
{
return(first_pair.second < second_pair.second || (first_pair.second == second_pair.second && first_pair.first < second_pair.first));
}
};
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
inline int Factorial(const unsigned int n){
if (n == 0){
return 1;
}
unsigned int k = n;
for (unsigned int i = n - 1; i > 0; --i){
k *= i;
}
return k;
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
double CalculateTheMaximumEdgeLength(ModelPart& r_model_part)
{
double max_distance_yet = 0.0;
for (ModelPart::ElementIterator ielem = r_model_part.ElementsBegin(); ielem != r_model_part.ElementsEnd(); ++ielem){
Geometry<Node<3> >& geom = ielem->GetGeometry();
unsigned int n_nodes = static_cast<unsigned int>(TDim + 1);
for (unsigned int k = 1; k < n_nodes - 1; ++k){
for (unsigned int i = k; i < n_nodes; ++i){
array_1d <double, 3> delta_i = geom[k - 1] - geom[i];
double distance_2 = DEM_INNER_PRODUCT_3(delta_i, delta_i);
max_distance_yet = max_distance_yet > distance_2 ? max_distance_yet : distance_2;
}
}
}
return(std::sqrt(max_distance_yet));
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
double CalculateTheMinumumEdgeLength(ModelPart& r_model_part)
{
double min_distance_yet = 0.0;
bool first_node = true;
for (ModelPart::ElementIterator ielem = r_model_part.ElementsBegin(); ielem != r_model_part.ElementsEnd(); ++ielem){
Geometry<Node<3> >& geom = ielem->GetGeometry();
if (first_node){ // assign the distance (squared) between any two nodes to min_distance_yet
array_1d <double, 3> delta = geom[0] - geom[1];
double distance_2 = DEM_INNER_PRODUCT_3(delta, delta);
min_distance_yet = distance_2;
}
unsigned int n_nodes = static_cast<unsigned int>(TDim + 1);
for (unsigned int k = 1; k < n_nodes - 1; ++k){
for (unsigned int i = k; i < n_nodes; ++i){
array_1d <double, 3> delta_i = geom[k - 1] - geom[i];
double distance_2 = DEM_INNER_PRODUCT_3(delta_i, delta_i);
min_distance_yet = min_distance_yet < distance_2 ? min_distance_yet : distance_2;
}
}
}
return(std::sqrt(min_distance_yet));
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
// The following block of functions is used to calculate explicit matrix inverses and was taken from
// Richel BilderBeek's website (http://www.richelbilderbeek.nl/CppUblasMatrixExample6.htm), and it is
// transcribed here with a very minor modification
double CalcDeterminant(const DenseMatrix<double>& m)
{
assert(m.size1() == m.size2() && "Can only calculate the determinant of square matrices");
switch(m.size1())
{
case 1:
{
return m(0,0);
}
case 2:
{
const double a = m(0,0);
const double b = m(0,1);
const double c = m(1,0);
const double d = m(1,1);
const double determinant = (a * d) - (b * c);
return determinant;
}
case 3:
{
assert(m.size1() == 3 && m.size2() == 3 && "Only for 3x3 matrices");
const double a = m(0,0);
const double b = m(0,1);
const double c = m(0,2);
const double d = m(1,0);
const double e = m(1,1);
const double f = m(1,2);
const double g = m(2,0);
const double h = m(2,1);
const double k = m(2,2);
const double determinant
= (a * ((e*k) - (f*h)))
- (b * ((k*d) - (f*g)))
+ (c * ((d*h) - (e*g)));
return determinant;
}
default:
assert(!"Should not get here: unsupported matrix size");
throw std::runtime_error("Unsupported matrix size");
}
}
///Chop returns a std::vector of sub-matrices
//[ A at [0] B at [1] ]
//[ C at [2] D at [4] ]
const std::vector<DenseMatrix<double> > Chop(
const DenseMatrix<double>& m)
{
using boost::numeric::ublas::range;
using boost::numeric::ublas::matrix_range;
std::vector<matrix<double> > v;
v.reserve(4);
const int midy = m.size1() / 2;
const int midx = m.size2() / 2;
const matrix_range<const matrix<double> > top_left( m,range(0 ,midy ),range(0 ,midx ));
const matrix_range<const matrix<double> > bottom_left( m,range(midy,m.size1()),range(0 ,midx ));
const matrix_range<const matrix<double> > top_right( m,range(0 ,midy ),range(midx,m.size2()));
const matrix_range<const matrix<double> > bottom_right(m,range(midy,m.size1()),range(midx,m.size2()));
v.push_back(matrix<double>(top_left));
v.push_back(matrix<double>(top_right));
v.push_back(matrix<double>(bottom_left));
v.push_back(matrix<double>(bottom_right));
return v;
}
///Unchop merges the 4 std::vector of sub-matrices produced by Chop
const DenseMatrix<double> Unchop(
const std::vector<DenseMatrix<double> >& v)
{
//Chop returns a std::vector of sub-matrices
//[ A at [0] B at [1] ]
//[ C at [2] D at [4] ]
using boost::numeric::ublas::range;
using boost::numeric::ublas::matrix_range;
assert(v.size() == 4);
assert(v[0].size1() == v[1].size1());
assert(v[2].size1() == v[3].size1());
assert(v[0].size2() == v[2].size2());
assert(v[1].size2() == v[3].size2());
DenseMatrix<double> m(v[0].size1() + v[2].size1(),v[0].size2() + v[1].size2());
for (int quadrant=0; quadrant!=4; ++quadrant)
{
const DenseMatrix<double>& w = v[quadrant];
const std::size_t n_rows = v[quadrant].size1();
const std::size_t n_cols = v[quadrant].size2();
const int offset_x = quadrant % 2 ? v[0].size2() : 0;
const int offset_y = quadrant / 2 ? v[0].size1() : 0;
for (std::size_t row=0; row!=n_rows; ++row)
{
for (std::size_t col=0; col!=n_cols; ++col)
{
m(offset_y + row, offset_x + col) = w(row,col);
}
}
}
assert(v[0].size1() + v[2].size1() == m.size1());
assert(v[1].size1() + v[3].size1() == m.size1());
assert(v[0].size2() + v[1].size2() == m.size2());
assert(v[2].size2() + v[3].size2() == m.size2());
return m;
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
///@}
///@name Member r_variables
///@{
DenseVector<unsigned int> mElementsPartition;
///@}
///@name Un accessible methods
///@{
double GetRangeWithinVector(const std::vector<double>& vector)
{
double min = vector[0];
double max = vector[0];
for (unsigned int i = 0; i != vector.size(); ++i){
min = std::min(min, mPressures[i]);
max = std::max(max, mPressures[i]);
}
return (max - min);
}
DenseVector<unsigned int>& GetElementPartition()
{
return mElementsPartition;
}
ElementIterator GetElementPartitionBegin(ModelPart& r_model_part, unsigned int k)
{
return r_model_part.GetCommunicator().LocalMesh().Elements().ptr_begin() + mElementsPartition[k];
}
ElementIterator GetElementPartitionEnd(ModelPart& r_model_part, unsigned int k)
{
return r_model_part.GetCommunicator().LocalMesh().Elements().ptr_begin() + mElementsPartition[k + 1];
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
}; // Class CustomFunctionsCalculator
} // namespace Kratos.
#endif // KRATOS_CREATE_AND_DESTROY defined
|
simd_mthrd.c | #include "types.h"
#include "options.h"
#include <omp.h>
#include "builtins.h"
//#include "veclib.1.0.4/include/veclib_types.h"
//#include "veclib.1.0.4/include/vec256dp.h"
#define load4(x) vec_xld2(0,x)
#define store4(x,y) vec_xstd2(y,0,x)
#define make4(x) (vector double)(x)
#define loadV(x,y,z) vec_xld2( 2*z + 2*Lkk*y + 2*LkkLjj*x , baseV )
#define loadS(x,y,z,dir) vec_xld2(2*z + 2*(Lkk-2)*y + 2*Lkk_2Ljj_2*x + 2*Lkk_2Ljj_2Lii_1 * dir , baseS)
#define storeV(x,y,z,v) vec_xstd2(v,2*z + 2*Lkk*y + 2*LkkLjj*x,baseST)
typedef vector double vector4;
void simd_diff_cpu_thr(Real* __restrict__ d_psi, Real* __restrict__ d_npsi, Real* __restrict__ d_sigmaX, Real* __restrict__ d_sigmaY, Real* __restrict__ d_sigmaZ,int Lii, int Ljj, int Lkk,int numtimes)
{
//rough sizing may be needed
//map z dir to SIMD
//z is the fastest varying direction
//2d decomposition
//15x15 in y z direction
#define sigmaX(x,y,z,dir) d_sigmaX[2*( z + (Lkk-2) * ( y + (Ljj-2) * ( x + (Lii-1) * dir ) ) )]
#define sigmaY(x,y,z,dir) d_sigmaY[2*( z + (Lkk-2) * ( y + (Ljj-1) * ( x + (Lii-2) * dir ) ) )]
#define sigmaZ(x,y,z,dir) d_sigmaZ[2*( z + (Lkk-1) * ( y + (Ljj-2) * ( x + (Lii-2) * dir ) ) )]
#define psi(x,y,z) d_psi[2*( z + Lkk * ( y + Ljj * x ) )]
#define npsi(x,y,z) d_npsi[2*( z + Lkk * ( y + Ljj * x ) )]
#define V(x,y,z) psi(x,y,z)
if ((Ljj-2)%15 != 0 ) printf("Ljj-2 must be multiple of 15. This will be fixed, later\n");
if ((Lkk-2)%15 != 0 ) printf("Lkk-2 must be multiple of 15. This will be fixed, later\n");
Lii = Lii/2;
const int LkkLjj = Lkk * Ljj;
const int Lkk_2Ljj_2 = (Lkk-2)*(Ljj-2);
const int Lkk_2Ljj_2Lii_1 = (Lkk-2)*(Ljj-2)*(Lii-1);
int kblocks = (Lkk-2)/15;
int t_tile = (Ljj-2)*kblocks/15;
//printf("totla number of threads : %d\n", omp_get_num_threads());
#pragma omp parallel
{
vector4 xm[15][16]; //temporary array
vector4 ym[16]; //temporary array
for (int ii=0; ii<numtimes; ii++)
{
// for(int tid=0;tid<t_tile;tid++)
int tid = omp_get_thread_num();
{
int cjj = tid / kblocks;
int ckk = tid % kblocks;
cjj = cjj*15 + 1;
ckk = ckk*15 + 1;
//todo: when Ljj is not multiple of 15
// for(int cjj=1;cjj<Ljj-1;cjj+=15)
// for(int ckk=1;ckk<Lkk-1;ckk+=15)
{
__memset(xm,0,sizeof(xm));
// for(int tjj=0;tjj<15;tjj++)
// for(int tkk=0;tkk<15;tkk++)
// xm[tjj][tkk] = make4(0);
for(int x=1;x<Lii-1;x++)
{
//z direction: all loads are un-aligned load
//
for(int tjj=0;tjj<15;tjj++)
{
vector4 last = make4(0);
double *baseV =(double*)(&V(x,cjj+tjj,ckk));
double *baseS = (double*)(&sigmaZ(x,cjj+tjj,ckk,0));
double *baseST = (double*)(&npsi(x,cjj+tjj,ckk));
for(int tkk=0;tkk<16;tkk++)
{
int z = tkk;
vector4 v00 = loadV(0,0,z-1) ;
vector4 v0p = loadV(0,0+1,z-1) ;
vector4 v0m = loadV(0,0-1,z-1) ;
vector4 vp0 = loadV(0+1,0,z-1) ;
vector4 vm0 = loadV(0-1,0,z-1) ;
vector4 w00 = loadV(0,0,z) ;
vector4 w0p = loadV(0,0+1,z) ;
vector4 w0m = loadV(0,0-1,z) ;
vector4 wp0 = loadV(0+1,0,z) ;
vector4 wm0 = loadV(0-1,0,z) ;
vector4 sX = loadS( 0,0,z-1,0) ;
vector4 sY = loadS( 0,0,z-1,1) ;
vector4 sZ = loadS( 0,0,z-1,2) ;
vector4 zzm = sZ * (w00 - v00);
vector4 zxm = sX * ( vp0 - vm0 + wp0 - wm0 ) * make4(0.5);
vector4 zym = sY * ( v0p - v0m + w0p - w0m ) * make4(0.5);
vector4 tmp = zzm + zzm + zym;
storeV(0,0,z, tmp + last);
last = tmp;
}
}
//x direction: all loads are un-aligned load
for(int tjj=0;tjj<15;tjj++)
{
double *baseV =(double*)(&V(x,cjj+tjj,ckk));
double *baseS = (double*)(&sigmaX(x,cjj+tjj,ckk,0));
double *baseST = (double*)(&npsi(x,cjj+tjj,ckk));
for(int tkk=0;tkk<16;tkk++)
{
int z = tkk;
vector4 v0m = loadV( 0,0,z-1) ;
vector4 v0p = loadV( 0,0,z+1) ;
vector4 v00 = loadV( 0,0,z) ;
vector4 vp0 = loadV( 0,0+1,z) ;
vector4 vm0 = loadV( 0,0-1,z) ;
vector4 w0m = loadV( 0+1,0,z-1) ;
vector4 w0p = loadV( 0+1,0,z+1) ;
vector4 w00 = loadV( 0+1,0,z) ;
vector4 wp0 = loadV( 0+1,0+1,z) ;
vector4 wm0 = loadV( 0+1,0-1,z) ;
vector4 sX = loadS( 0,0,z,0) ;
vector4 sY = loadS( 0,0,z,1) ;
vector4 sZ = loadS( 0,0,z,2) ;
vector4 xxm = sX * (w00 - v00);
vector4 xym = sY * (vp0 - vm0 + wp0 - wm0 ) * make4(0.5);
vector4 xzm = sZ * (v0p - v0m + w0p - w0m ) * make4(0.5);
vector4 tmp = xxm + xym + xzm;
storeV(0,0,z, xm[tjj][tkk] + tmp);
xm[tjj][tkk] = tmp;
}
}
//y direction:
__memset(ym,0,sizeof(ym));
// for(int tkk=0;tkk<16;tkk++) ym[tkk]=make4(0);
for(int tjj=0;tjj<15;tjj++)
{
double *baseV =(double*)(&V(x,cjj+tjj,ckk));
double *baseS = (double*)(&sigmaY(x,cjj+tjj,ckk,0));
double *baseST = (double*)(&npsi(x,cjj+tjj,ckk));
for(int tkk=0;tkk<16;tkk++)
{
int z = tkk;
vector4 v0m = loadV( 0,0,z-1) ;
vector4 v0p = loadV( 0,0,z+1) ;
vector4 v00 = loadV( 0,0,z) ;
vector4 vp0 = loadV( 0+1,0,z) ;
vector4 vm0 = loadV( 0-1,0,z) ;
vector4 w0m = loadV( 0,0+1,z-1) ;
vector4 w0p = loadV( 0,0+1,z+1) ;
vector4 w00 = loadV( 0,0+1,z) ;
vector4 wp0 = loadV( 0+1,0+1,z) ;
vector4 wm0 = loadV( 0-1,0+1,z) ;
vector4 sX = loadS (0,0,z,0) ;
vector4 sY = loadS (0,0,z,1) ;
vector4 sZ = loadS (0,0,z,2) ;
vector4 yxm = sX * (vp0 - vm0 + wp0 - wm0 ) * make4(0.5);
vector4 yym = sY * (w00 - v00);
vector4 yzm = sZ * (v0p - v0m + w0p - w0m ) * make4(0.5);
vector4 tmp = yxm + yym + yzm;
storeV(0,0,z, ym[tkk] + tmp);
ym[tkk] = tmp;
}
}
} //x loop
} //cjj,ckk
} //end of block
}
}
}
|
openMP.c | #include <stdio.h>
#include <stdbool.h>
#include <stdlib.h>
#include "omp.h"
// Set the matrix sixe, number of threads and chunk size
const int MATRIX_SIZE = 8192;
const int NUMBER_OF_THREADS = 128;
const int CHUNK_SIZE = 2;
//iterate through the array and print the matrix in matrix readable fashion
void printSquareMatrixInt(int n, int** matrix) {
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
printf("%d \t", matrix[i][j]);
}
printf("\n");
}
}
void naiveSolutionInt(int** matrix) {
int temp;
for (int i = 0; i < MATRIX_SIZE - 1; i++) {
for (int j = i + 1; j < MATRIX_SIZE; j++) {
temp = matrix[i][j];
matrix[i][j] = matrix[j][i];
matrix[j][i] = temp;
}
}
}
void openMPsolution(int** matrix) {
int i, j, temp;
#pragma omp parallel shared(matrix) private(temp, i, j)
{
#pragma omp for schedule(dynamic, CHUNK_SIZE) nowait
for (i = 0; i < MATRIX_SIZE - 1; i++) {
for (j = i + 1; j < MATRIX_SIZE; j++) {
temp = matrix[i][j];
matrix[i][j] = matrix[j][i];
matrix[j][i] = temp;
}
}
}
}
int main() {
omp_set_num_threads(NUMBER_OF_THREADS);
// Create dynamic array and populate it
int ** matrix;
matrix = malloc(MATRIX_SIZE * sizeof(int *));
for(int i = 0; i < MATRIX_SIZE; i++)
matrix[i] = (int *)malloc(MATRIX_SIZE * sizeof(int));
int count = 0;
for(int i = 0; i < MATRIX_SIZE; i++) {
for(int j = 0; j < MATRIX_SIZE; j++) {
matrix[i][j] = count;
count++;
}
}
printf("Size %c %c: %d\n", ' ',' ', MATRIX_SIZE);
printf("Threads %c: %d\n", ' ', NUMBER_OF_THREADS );
double dtime;
// printSquareMatrixInt(MATRIX_SIZE, matrix);
dtime = omp_get_wtime();
naiveSolutionInt(matrix);
dtime = omp_get_wtime() - dtime;
printf("Naive %c %c: %f\n", ' ',' ', dtime);
// printf(" \n --------------------------------------- \n");
// printSquareMatrixInt(MATRIX_SIZE, matrix);
dtime = omp_get_wtime();
openMPsolution(matrix);
dtime = omp_get_wtime() - dtime;
printf("Parallel : %f\n", dtime);
// printf(" \n --------------------------------------- \n");
// printSquareMatrixInt(MATRIX_SIZE, matrix);
}
|
convolution_3x3_pack8to4_int8.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#if !(__AVX512VNNI__ || __AVXVNNI__ || __AVX2__ || __XOP__)
#if NCNN_RUNTIME_CPU && NCNN_AVX512VNNI && __AVX512F__ && !__AVX512VNNI__
void conv3x3s1_winograd42_transform_kernel_pack8to4_int8_sse_avx512vnni(const Mat& kernel, Mat& kernel_tm_pack8, int inch, int outch, const Option& opt);
void conv3x3s1_winograd42_pack8to4_int8_sse_avx512vnni(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt);
#endif
#if NCNN_RUNTIME_CPU && NCNN_AVXVNNI && __AVX2__ && !__AVXVNNI__
void conv3x3s1_winograd42_transform_kernel_pack8to4_int8_sse_avxvnni(const Mat& kernel, Mat& kernel_tm_pack8, int inch, int outch, const Option& opt);
void conv3x3s1_winograd42_pack8to4_int8_sse_avxvnni(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt);
#endif
#if NCNN_RUNTIME_CPU && NCNN_AVX2 && __AVX__ && !__AVX2__
void conv3x3s1_winograd42_transform_kernel_pack8to4_int8_sse_avx2(const Mat& kernel, Mat& kernel_tm_pack8, int inch, int outch, const Option& opt);
void conv3x3s1_winograd42_pack8to4_int8_sse_avx2(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt);
#endif
#if NCNN_RUNTIME_CPU && NCNN_XOP && __SSE2__ && !__XOP__
void conv3x3s1_winograd42_transform_kernel_pack8to4_int8_sse_xop(const Mat& kernel, Mat& kernel_tm_pack8, int inch, int outch, const Option& opt);
void conv3x3s1_winograd42_pack8to4_int8_sse_xop(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt);
#endif
#endif
static void conv3x3s1_winograd42_transform_kernel_pack8to4_int8_sse(const Mat& kernel, Mat& kernel_tm_pack8, int inch, int outch, const Option& opt)
{
#if !(__AVX512VNNI__ || __AVXVNNI__ || __AVX2__ || __XOP__)
#if NCNN_RUNTIME_CPU && NCNN_AVX512VNNI && __AVX512F__ && !__AVX512VNNI__
if (ncnn::cpu_support_x86_avx512_vnni())
{
conv3x3s1_winograd42_transform_kernel_pack8to4_int8_sse_avx512vnni(kernel, kernel_tm_pack8, inch, outch, opt);
return;
}
#endif
#if NCNN_RUNTIME_CPU && NCNN_AVXVNNI && __AVX2__ && !__AVXVNNI__
if (ncnn::cpu_support_x86_avx_vnni())
{
conv3x3s1_winograd42_transform_kernel_pack8to4_int8_sse_avxvnni(kernel, kernel_tm_pack8, inch, outch, opt);
return;
}
#endif
#if NCNN_RUNTIME_CPU && NCNN_AVX2 && __AVX__ && !__AVX2__
if (ncnn::cpu_support_x86_avx2())
{
conv3x3s1_winograd42_transform_kernel_pack8to4_int8_sse_avx2(kernel, kernel_tm_pack8, inch, outch, opt);
return;
}
#endif
#if NCNN_RUNTIME_CPU && NCNN_XOP && __SSE2__ && !__XOP__
if (ncnn::cpu_support_x86_xop())
{
conv3x3s1_winograd42_transform_kernel_pack8to4_int8_sse_xop(kernel, kernel_tm_pack8, inch, outch, opt);
return;
}
#endif
#endif
// winograd42 transform kernel
Mat kernel_tm(6 * 6, inch, outch, (size_t)2u);
const short ktm[6][3] = {
{6, 0, 0},
{-4, -4, -4},
{-4, 4, -4},
{1, 2, 4},
{1, -2, 4},
{0, 0, 6}
};
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const signed char* kernel0 = (const signed char*)kernel + p * inch * 9 + q * 9;
short* kernel_tm0 = kernel_tm.channel(p).row<short>(q);
// transform kernel
const signed char* k0 = kernel0;
const signed char* k1 = kernel0 + 3;
const signed char* k2 = kernel0 + 6;
// h
short tmp[6][3];
for (int i = 0; i < 6; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j = 0; j < 6; j++)
{
short* tmpp = &tmp[j][0];
for (int i = 0; i < 6; i++)
{
kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
// interleave
// src = 36-inch-outch
// dst = 4b-8a-inch/8a-36-outch/4b
kernel_tm_pack8.create(inch / 8, 36, outch / 4, (size_t)2u * 32, 32);
int q = 0;
for (; q + 3 < outch; q += 4)
{
const Mat k0 = kernel_tm.channel(q);
const Mat k1 = kernel_tm.channel(q + 1);
const Mat k2 = kernel_tm.channel(q + 2);
const Mat k3 = kernel_tm.channel(q + 3);
Mat kernel_tm = kernel_tm_pack8.channel(q / 4);
for (int k = 0; k < 36; k++)
{
short* g00 = kernel_tm.row<short>(k);
for (int p = 0; p + 7 < inch; p += 8)
{
#if __AVXVNNI__ || __AVX512VNNI__ || __XOP__
for (int i = 0; i < 4; i++)
{
const short* k00 = k0.row<const short>(p + i * 2);
const short* k10 = k1.row<const short>(p + i * 2);
const short* k20 = k2.row<const short>(p + i * 2);
const short* k30 = k3.row<const short>(p + i * 2);
const short* k01 = k0.row<const short>(p + i * 2 + 1);
const short* k11 = k1.row<const short>(p + i * 2 + 1);
const short* k21 = k2.row<const short>(p + i * 2 + 1);
const short* k31 = k3.row<const short>(p + i * 2 + 1);
g00[0] = k00[k];
g00[1] = k01[k];
g00[2] = k10[k];
g00[3] = k11[k];
g00[4] = k20[k];
g00[5] = k21[k];
g00[6] = k30[k];
g00[7] = k31[k];
g00 += 8;
}
#else
for (int i = 0; i < 8; i++)
{
const short* k00 = k0.row<const short>(p + i);
const short* k10 = k1.row<const short>(p + i);
const short* k20 = k2.row<const short>(p + i);
const short* k30 = k3.row<const short>(p + i);
g00[0] = k00[k];
g00[1] = k10[k];
g00[2] = k20[k];
g00[3] = k30[k];
g00 += 4;
}
#endif
}
}
}
}
static void conv3x3s1_winograd42_pack8to4_int8_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Option& opt)
{
#if !(__AVX512VNNI__ || __AVXVNNI__ || __AVX2__ || __XOP__)
#if NCNN_RUNTIME_CPU && NCNN_AVX512VNNI && __AVX512F__ && !__AVX512VNNI__
if (ncnn::cpu_support_x86_avx512_vnni())
{
conv3x3s1_winograd42_pack8to4_int8_sse_avx512vnni(bottom_blob, top_blob, kernel_tm, opt);
return;
}
#endif
#if NCNN_RUNTIME_CPU && NCNN_AVXVNNI && __AVX2__ && !__AVXVNNI__
if (ncnn::cpu_support_x86_avx_vnni())
{
conv3x3s1_winograd42_pack8to4_int8_sse_avxvnni(bottom_blob, top_blob, kernel_tm, opt);
return;
}
#endif
#if NCNN_RUNTIME_CPU && NCNN_AVX2 && __AVX__ && !__AVX2__
if (ncnn::cpu_support_x86_avx2())
{
conv3x3s1_winograd42_pack8to4_int8_sse_avx2(bottom_blob, top_blob, kernel_tm, opt);
return;
}
#endif
#if NCNN_RUNTIME_CPU && NCNN_XOP && __SSE2__ && !__XOP__
if (ncnn::cpu_support_x86_xop())
{
conv3x3s1_winograd42_pack8to4_int8_sse_xop(bottom_blob, top_blob, kernel_tm, opt);
return;
}
#endif
#endif
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
// size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 4n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 3) / 4 * 4;
outh = (outh + 3) / 4 * 4;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt);
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
const int tiles = w_tm / 6 * h_tm / 6;
bottom_blob_tm.create(tiles, 36, inch, 2u * elempack, elempack, opt.workspace_allocator);
// const float itm[4][4] = {
// {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f},
// {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f},
// {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f},
// {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f}
// };
// 0 = 4 * r00 - 5 * r02 + r04
// 1 = -4 * (r01 + r02) + r04 + r03
// 2 = 4 * (r01 - r02) + r04 - r03
// 3 = -2 * (r01 - r03) + r04 - r02
// 4 = 2 * (r01 - r03) + r04 - r02
// 5 = 4 * r01 - 5 * r03 + r05
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const Mat img0 = bottom_blob_bordered.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
short tmp[6][6][8];
// tile
for (int i = 0; i < h_tm / 6; i++)
{
for (int j = 0; j < w_tm / 6; j++)
{
const signed char* r0 = img0.row<const signed char>(i * 4) + (j * 4) * 8;
for (int m = 0; m < 6; m++)
{
// TODO use _mm_cvtepi8_epi16 on sse4.1
__m128i _r00_01 = _mm_loadu_si128((const __m128i*)r0);
__m128i _r02_03 = _mm_loadu_si128((const __m128i*)(r0 + 16));
__m128i _r04_05 = _mm_loadu_si128((const __m128i*)(r0 + 32));
__m128i _extr0001 = _mm_cmpgt_epi8(_mm_setzero_si128(), _r00_01);
__m128i _extr0203 = _mm_cmpgt_epi8(_mm_setzero_si128(), _r02_03);
__m128i _extr0405 = _mm_cmpgt_epi8(_mm_setzero_si128(), _r04_05);
__m128i _r00 = _mm_unpacklo_epi8(_r00_01, _extr0001);
__m128i _r01 = _mm_unpackhi_epi8(_r00_01, _extr0001);
__m128i _r02 = _mm_unpacklo_epi8(_r02_03, _extr0203);
__m128i _r03 = _mm_unpackhi_epi8(_r02_03, _extr0203);
__m128i _r04 = _mm_unpacklo_epi8(_r04_05, _extr0405);
__m128i _r05 = _mm_unpackhi_epi8(_r04_05, _extr0405);
__m128i _v5 = _mm_set1_epi16(5);
__m128i _tmp0m = _mm_sub_epi16(_mm_add_epi16(_mm_slli_epi16(_r00, 2), _r04), _mm_mullo_epi16(_r02, _v5));
__m128i _tmp1m = _mm_sub_epi16(_mm_add_epi16(_r04, _r03), _mm_slli_epi16(_mm_add_epi16(_r01, _r02), 2));
__m128i _tmp2m = _mm_add_epi16(_mm_sub_epi16(_r04, _r03), _mm_slli_epi16(_mm_sub_epi16(_r01, _r02), 2));
__m128i _tmp3m = _mm_sub_epi16(_mm_sub_epi16(_r04, _r02), _mm_slli_epi16(_mm_sub_epi16(_r01, _r03), 1));
__m128i _tmp4m = _mm_add_epi16(_mm_sub_epi16(_r04, _r02), _mm_slli_epi16(_mm_sub_epi16(_r01, _r03), 1));
__m128i _tmp5m = _mm_sub_epi16(_mm_add_epi16(_mm_slli_epi16(_r01, 2), _r05), _mm_mullo_epi16(_r03, _v5));
_mm_storeu_si128((__m128i*)tmp[0][m], _tmp0m);
_mm_storeu_si128((__m128i*)tmp[1][m], _tmp1m);
_mm_storeu_si128((__m128i*)tmp[2][m], _tmp2m);
_mm_storeu_si128((__m128i*)tmp[3][m], _tmp3m);
_mm_storeu_si128((__m128i*)tmp[4][m], _tmp4m);
_mm_storeu_si128((__m128i*)tmp[5][m], _tmp5m);
r0 += w * 8;
}
short* r0_tm_0 = (short*)img0_tm + (i * w_tm / 6 + j) * 8;
short* r0_tm_1 = r0_tm_0 + tiles * 8;
short* r0_tm_2 = r0_tm_0 + tiles * 16;
short* r0_tm_3 = r0_tm_0 + tiles * 24;
short* r0_tm_4 = r0_tm_0 + tiles * 32;
short* r0_tm_5 = r0_tm_0 + tiles * 40;
for (int m = 0; m < 6; m++)
{
__m128i _tmp00 = _mm_loadu_si128((const __m128i*)tmp[m][0]);
__m128i _tmp01 = _mm_loadu_si128((const __m128i*)tmp[m][1]);
__m128i _tmp02 = _mm_loadu_si128((const __m128i*)tmp[m][2]);
__m128i _tmp03 = _mm_loadu_si128((const __m128i*)tmp[m][3]);
__m128i _tmp04 = _mm_loadu_si128((const __m128i*)tmp[m][4]);
__m128i _tmp05 = _mm_loadu_si128((const __m128i*)tmp[m][5]);
__m128i _v5 = _mm_set1_epi16(5);
__m128i _r0tm0 = _mm_sub_epi16(_mm_add_epi16(_mm_slli_epi16(_tmp00, 2), _tmp04), _mm_mullo_epi16(_tmp02, _v5));
__m128i _r0tm1 = _mm_sub_epi16(_mm_add_epi16(_tmp04, _tmp03), _mm_slli_epi16(_mm_add_epi16(_tmp01, _tmp02), 2));
__m128i _r0tm2 = _mm_add_epi16(_mm_sub_epi16(_tmp04, _tmp03), _mm_slli_epi16(_mm_sub_epi16(_tmp01, _tmp02), 2));
__m128i _r0tm3 = _mm_sub_epi16(_mm_sub_epi16(_tmp04, _tmp02), _mm_slli_epi16(_mm_sub_epi16(_tmp01, _tmp03), 1));
__m128i _r0tm4 = _mm_add_epi16(_mm_sub_epi16(_tmp04, _tmp02), _mm_slli_epi16(_mm_sub_epi16(_tmp01, _tmp03), 1));
__m128i _r0tm5 = _mm_sub_epi16(_mm_add_epi16(_mm_slli_epi16(_tmp01, 2), _tmp05), _mm_mullo_epi16(_tmp03, _v5));
_mm_storeu_si128((__m128i*)r0_tm_0, _r0tm0);
_mm_storeu_si128((__m128i*)r0_tm_1, _r0tm1);
_mm_storeu_si128((__m128i*)r0_tm_2, _r0tm2);
_mm_storeu_si128((__m128i*)r0_tm_3, _r0tm3);
_mm_storeu_si128((__m128i*)r0_tm_4, _r0tm4);
_mm_storeu_si128((__m128i*)r0_tm_5, _r0tm5);
r0_tm_0 += tiles * 48;
r0_tm_1 += tiles * 48;
r0_tm_2 += tiles * 48;
r0_tm_3 += tiles * 48;
r0_tm_4 += tiles * 48;
r0_tm_5 += tiles * 48;
}
}
}
}
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
const int tiles = h_tm / 6 * w_tm / 6;
// permute
// bottom_blob_tm.create(tiles, 36, inch, elemsize, elempack, opt.workspace_allocator);
Mat bottom_blob_tm2;
#if __AVX2__
if (tiles >= 4)
bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 36, 2u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 2)
bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 36, 2u * elempack, elempack, opt.workspace_allocator);
else // if (tiles >= 1)
bottom_blob_tm2.create(1 * inch, tiles, 36, 2u * elempack, elempack, opt.workspace_allocator);
#else
if (tiles >= 2)
bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 36, 2u * elempack, elempack, opt.workspace_allocator);
else // if (tiles >= 1)
bottom_blob_tm2.create(1 * inch, tiles, 36, 2u * elempack, elempack, opt.workspace_allocator);
#endif
#pragma omp parallel for num_threads(opt.num_threads)
for (int r = 0; r < 36; r++)
{
Mat tm2 = bottom_blob_tm2.channel(r);
// tile
int i = 0;
#if __AVX2__
for (; i + 3 < tiles; i += 4)
{
short* tmpptr = tm2.row<short>(i / 4);
const short* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
__m256i _r0 = _mm256_loadu_si256((const __m256i*)r0);
__m256i _r1 = _mm256_loadu_si256((const __m256i*)(r0 + 16));
_mm256_storeu_si256((__m256i*)tmpptr, _r0);
_mm256_storeu_si256((__m256i*)(tmpptr + 16), _r1);
r0 += bottom_blob_tm.cstep * 8;
tmpptr += 32;
}
}
#endif
for (; i + 1 < tiles; i += 2)
{
#if __AVX2__
short* tmpptr = tm2.row<short>(i / 4 + (i % 4) / 2);
#else
short* tmpptr = tm2.row<short>(i / 2);
#endif
const short* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
__m128i _r0 = _mm_loadu_si128((const __m128i*)r0);
__m128i _r1 = _mm_loadu_si128((const __m128i*)(r0 + 8));
_mm_storeu_si128((__m128i*)tmpptr, _r0);
_mm_storeu_si128((__m128i*)(tmpptr + 8), _r1);
r0 += bottom_blob_tm.cstep * 8;
tmpptr += 16;
}
}
for (; i < tiles; i++)
{
#if __AVX2__
short* tmpptr = tm2.row<short>(i / 4 + (i % 4) / 2 + i % 2);
#else
short* tmpptr = tm2.row<short>(i / 2 + i % 2);
#endif
const short* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
__m128i _r0 = _mm_loadu_si128((const __m128i*)r0);
_mm_storeu_si128((__m128i*)tmpptr, _r0);
r0 += bottom_blob_tm.cstep * 8;
tmpptr += 8;
}
}
}
bottom_blob_tm = Mat();
// permute end
top_blob_tm.create(tiles, 36, outch, 4u * 4, 4, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
int* output0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p);
for (int r = 0; r < 36; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
#if __AVX2__
for (; i + 3 < tiles; i += 4)
{
const short* r0 = bb2.row<const short>(i / 4);
const short* k0 = kernel0_tm.row<const short>(r);
int nn = inch; // inch always > 0
__m256i _sum0_1 = _mm256_setzero_si256();
__m256i _sum2_3 = _mm256_setzero_si256();
__m256i _sum4_5 = _mm256_setzero_si256();
__m256i _sum6_7 = _mm256_setzero_si256();
for (int j = 0; j < nn; j++)
{
// 0 1 2 3 4 5 6 7 8 9 a b c d e f
__m256i _val0 = _mm256_loadu_si256((const __m256i*)r0);
__m256i _w01 = _mm256_loadu_si256((const __m256i*)k0);
__m256i _w23 = _mm256_loadu_si256((const __m256i*)(k0 + 16));
#if __AVXVNNI__ || __AVX512VNNI__
__m256i _val0_0123 = _mm256_permutevar8x32_epi32(_val0, _mm256_set_epi32(1, 1, 1, 1, 0, 0, 0, 0));
__m256i _val0_4567 = _mm256_permutevar8x32_epi32(_val0, _mm256_set_epi32(3, 3, 3, 3, 2, 2, 2, 2));
__m256i _val0_89ab = _mm256_permutevar8x32_epi32(_val0, _mm256_set_epi32(5, 5, 5, 5, 4, 4, 4, 4));
__m256i _val0_cdef = _mm256_permutevar8x32_epi32(_val0, _mm256_set_epi32(7, 7, 7, 7, 6, 6, 6, 6));
_sum0_1 = _mm256_dpwssd_epi32(_sum0_1, _w01, _val0_0123);
_sum2_3 = _mm256_dpwssd_epi32(_sum2_3, _w01, _val0_89ab);
_sum0_1 = _mm256_dpwssd_epi32(_sum0_1, _w23, _val0_4567);
_sum2_3 = _mm256_dpwssd_epi32(_sum2_3, _w23, _val0_cdef);
#else
// 0 0 1 1 2 2 3 3 8 8 9 9 a a b b
// 4 4 5 5 6 6 7 7 c c d d e e f f
__m256i _val0_0123_89ab = _mm256_unpacklo_epi16(_val0, _val0);
__m256i _val0_4567_cdef = _mm256_unpackhi_epi16(_val0, _val0);
__m256i _val0_0123 = _mm256_permutevar8x32_epi32(_val0_0123_89ab, _mm256_set_epi32(3, 3, 2, 2, 1, 1, 0, 0));
__m256i _val0_4567 = _mm256_permutevar8x32_epi32(_val0_4567_cdef, _mm256_set_epi32(3, 3, 2, 2, 1, 1, 0, 0));
__m256i _val0_89ab = _mm256_permutevar8x32_epi32(_val0_0123_89ab, _mm256_set_epi32(7, 7, 6, 6, 5, 5, 4, 4));
__m256i _val0_cdef = _mm256_permutevar8x32_epi32(_val0_4567_cdef, _mm256_set_epi32(7, 7, 6, 6, 5, 5, 4, 4));
__m256i _sl00_01 = _mm256_mullo_epi16(_w01, _val0_0123);
__m256i _sh00_01 = _mm256_mulhi_epi16(_w01, _val0_0123);
__m256i _sl10_11 = _mm256_mullo_epi16(_w01, _val0_89ab);
__m256i _sh10_11 = _mm256_mulhi_epi16(_w01, _val0_89ab);
__m256i _sl02_03 = _mm256_mullo_epi16(_w23, _val0_4567);
__m256i _sh02_03 = _mm256_mulhi_epi16(_w23, _val0_4567);
__m256i _sl12_13 = _mm256_mullo_epi16(_w23, _val0_cdef);
__m256i _sh12_13 = _mm256_mulhi_epi16(_w23, _val0_cdef);
_sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpacklo_epi16(_sl00_01, _sh00_01));
_sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_unpacklo_epi16(_sl10_11, _sh10_11));
_sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpacklo_epi16(_sl02_03, _sh02_03));
_sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_unpacklo_epi16(_sl12_13, _sh12_13));
_sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpackhi_epi16(_sl00_01, _sh00_01));
_sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_unpackhi_epi16(_sl10_11, _sh10_11));
_sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpackhi_epi16(_sl02_03, _sh02_03));
_sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_unpackhi_epi16(_sl12_13, _sh12_13));
#endif
__m256i _val1 = _mm256_loadu_si256((const __m256i*)(r0 + 16));
#if __AVXVNNI__ || __AVX512VNNI__
__m256i _val1_0123 = _mm256_permutevar8x32_epi32(_val1, _mm256_set_epi32(1, 1, 1, 1, 0, 0, 0, 0));
__m256i _val1_4567 = _mm256_permutevar8x32_epi32(_val1, _mm256_set_epi32(3, 3, 3, 3, 2, 2, 2, 2));
__m256i _val1_89ab = _mm256_permutevar8x32_epi32(_val1, _mm256_set_epi32(5, 5, 5, 5, 4, 4, 4, 4));
__m256i _val1_cdef = _mm256_permutevar8x32_epi32(_val1, _mm256_set_epi32(7, 7, 7, 7, 6, 6, 6, 6));
_sum4_5 = _mm256_dpwssd_epi32(_sum4_5, _w01, _val1_0123);
_sum6_7 = _mm256_dpwssd_epi32(_sum6_7, _w01, _val1_89ab);
_sum4_5 = _mm256_dpwssd_epi32(_sum4_5, _w23, _val1_4567);
_sum6_7 = _mm256_dpwssd_epi32(_sum6_7, _w23, _val1_cdef);
#else
__m256i _val1_0123_89ab = _mm256_unpacklo_epi16(_val1, _val1);
__m256i _val1_4567_cdef = _mm256_unpackhi_epi16(_val1, _val1);
__m256i _val1_0123 = _mm256_permutevar8x32_epi32(_val1_0123_89ab, _mm256_set_epi32(3, 3, 2, 2, 1, 1, 0, 0));
__m256i _val1_4567 = _mm256_permutevar8x32_epi32(_val1_4567_cdef, _mm256_set_epi32(3, 3, 2, 2, 1, 1, 0, 0));
__m256i _val1_89ab = _mm256_permutevar8x32_epi32(_val1_0123_89ab, _mm256_set_epi32(7, 7, 6, 6, 5, 5, 4, 4));
__m256i _val1_cdef = _mm256_permutevar8x32_epi32(_val1_4567_cdef, _mm256_set_epi32(7, 7, 6, 6, 5, 5, 4, 4));
__m256i _sl04_05 = _mm256_mullo_epi16(_w01, _val1_0123);
__m256i _sh04_05 = _mm256_mulhi_epi16(_w01, _val1_0123);
__m256i _sl14_15 = _mm256_mullo_epi16(_w01, _val1_89ab);
__m256i _sh14_15 = _mm256_mulhi_epi16(_w01, _val1_89ab);
__m256i _sl06_07 = _mm256_mullo_epi16(_w23, _val1_4567);
__m256i _sh06_07 = _mm256_mulhi_epi16(_w23, _val1_4567);
__m256i _sl16_17 = _mm256_mullo_epi16(_w23, _val1_cdef);
__m256i _sh16_17 = _mm256_mulhi_epi16(_w23, _val1_cdef);
_sum4_5 = _mm256_add_epi32(_sum4_5, _mm256_unpacklo_epi16(_sl04_05, _sh04_05));
_sum6_7 = _mm256_add_epi32(_sum6_7, _mm256_unpacklo_epi16(_sl14_15, _sh14_15));
_sum4_5 = _mm256_add_epi32(_sum4_5, _mm256_unpacklo_epi16(_sl06_07, _sh06_07));
_sum6_7 = _mm256_add_epi32(_sum6_7, _mm256_unpacklo_epi16(_sl16_17, _sh16_17));
_sum4_5 = _mm256_add_epi32(_sum4_5, _mm256_unpackhi_epi16(_sl04_05, _sh04_05));
_sum6_7 = _mm256_add_epi32(_sum6_7, _mm256_unpackhi_epi16(_sl14_15, _sh14_15));
_sum4_5 = _mm256_add_epi32(_sum4_5, _mm256_unpackhi_epi16(_sl06_07, _sh06_07));
_sum6_7 = _mm256_add_epi32(_sum6_7, _mm256_unpackhi_epi16(_sl16_17, _sh16_17));
#endif
r0 += 32;
k0 += 32;
}
__m256i _sum0_2 = _mm256_permute2x128_si256(_sum0_1, _sum2_3, _MM_SHUFFLE(0, 2, 0, 0));
__m256i _sum1_3 = _mm256_permute2x128_si256(_sum0_1, _sum2_3, _MM_SHUFFLE(0, 3, 0, 1));
_sum0_2 = _mm256_add_epi32(_sum0_2, _sum1_3);
__m256i _sum4_6 = _mm256_permute2x128_si256(_sum4_5, _sum6_7, _MM_SHUFFLE(0, 2, 0, 0));
__m256i _sum5_7 = _mm256_permute2x128_si256(_sum4_5, _sum6_7, _MM_SHUFFLE(0, 3, 0, 1));
_sum4_6 = _mm256_add_epi32(_sum4_6, _sum5_7);
_mm256_storeu_si256((__m256i*)output0_tm, _sum0_2);
_mm256_storeu_si256((__m256i*)(output0_tm + 8), _sum4_6);
output0_tm += 16;
}
#endif
for (; i + 1 < tiles; i += 2)
{
#if __AVX2__
const short* r0 = bb2.row<const short>(i / 4 + (i % 4) / 2);
#else
const short* r0 = bb2.row<const short>(i / 2);
#endif
const short* k0 = kernel0_tm.row<const short>(r);
int nn = inch; // inch always > 0
#if __AVX2__
__m256i _sum0_1 = _mm256_setzero_si256();
__m256i _sum2_3 = _mm256_setzero_si256();
#else
__m128i _sum0 = _mm_setzero_si128();
__m128i _sum1 = _mm_setzero_si128();
__m128i _sum2 = _mm_setzero_si128();
__m128i _sum3 = _mm_setzero_si128();
#endif
for (int j = 0; j < nn; j++)
{
#if __AVX2__
// 0 1 2 3 4 5 6 7 8 9 a b c d e f
__m256i _val = _mm256_loadu_si256((const __m256i*)r0);
__m256i _w01 = _mm256_loadu_si256((const __m256i*)k0);
__m256i _w23 = _mm256_loadu_si256((const __m256i*)(k0 + 16));
#if __AVXVNNI__ || __AVX512VNNI__
__m256i _val_0123 = _mm256_permutevar8x32_epi32(_val, _mm256_set_epi32(1, 1, 1, 1, 0, 0, 0, 0));
__m256i _val_4567 = _mm256_permutevar8x32_epi32(_val, _mm256_set_epi32(3, 3, 3, 3, 2, 2, 2, 2));
__m256i _val_89ab = _mm256_permutevar8x32_epi32(_val, _mm256_set_epi32(5, 5, 5, 5, 4, 4, 4, 4));
__m256i _val_cdef = _mm256_permutevar8x32_epi32(_val, _mm256_set_epi32(7, 7, 7, 7, 6, 6, 6, 6));
_sum0_1 = _mm256_dpwssd_epi32(_sum0_1, _w01, _val_0123);
_sum2_3 = _mm256_dpwssd_epi32(_sum2_3, _w01, _val_89ab);
_sum0_1 = _mm256_dpwssd_epi32(_sum0_1, _w23, _val_4567);
_sum2_3 = _mm256_dpwssd_epi32(_sum2_3, _w23, _val_cdef);
#else
__m256i _val_0123_89ab = _mm256_unpacklo_epi16(_val, _val);
__m256i _val_4567_cdef = _mm256_unpackhi_epi16(_val, _val);
__m256i _val_0123 = _mm256_permutevar8x32_epi32(_val_0123_89ab, _mm256_set_epi32(3, 3, 2, 2, 1, 1, 0, 0));
__m256i _val_4567 = _mm256_permutevar8x32_epi32(_val_4567_cdef, _mm256_set_epi32(3, 3, 2, 2, 1, 1, 0, 0));
__m256i _val_89ab = _mm256_permutevar8x32_epi32(_val_0123_89ab, _mm256_set_epi32(7, 7, 6, 6, 5, 5, 4, 4));
__m256i _val_cdef = _mm256_permutevar8x32_epi32(_val_4567_cdef, _mm256_set_epi32(7, 7, 6, 6, 5, 5, 4, 4));
__m256i _sl00_01 = _mm256_mullo_epi16(_w01, _val_0123);
__m256i _sh00_01 = _mm256_mulhi_epi16(_w01, _val_0123);
__m256i _sl10_11 = _mm256_mullo_epi16(_w01, _val_89ab);
__m256i _sh10_11 = _mm256_mulhi_epi16(_w01, _val_89ab);
__m256i _sl02_03 = _mm256_mullo_epi16(_w23, _val_4567);
__m256i _sh02_03 = _mm256_mulhi_epi16(_w23, _val_4567);
__m256i _sl12_13 = _mm256_mullo_epi16(_w23, _val_cdef);
__m256i _sh12_13 = _mm256_mulhi_epi16(_w23, _val_cdef);
_sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpacklo_epi16(_sl00_01, _sh00_01));
_sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_unpacklo_epi16(_sl10_11, _sh10_11));
_sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpacklo_epi16(_sl02_03, _sh02_03));
_sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_unpacklo_epi16(_sl12_13, _sh12_13));
_sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpackhi_epi16(_sl00_01, _sh00_01));
_sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_unpackhi_epi16(_sl10_11, _sh10_11));
_sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpackhi_epi16(_sl02_03, _sh02_03));
_sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_unpackhi_epi16(_sl12_13, _sh12_13));
#endif
#else
// 0 1 2 3 4 5 6 7
__m128i _val0 = _mm_loadu_si128((const __m128i*)r0);
__m128i _val1 = _mm_loadu_si128((const __m128i*)(r0 + 8));
__m128i _w0 = _mm_loadu_si128((const __m128i*)k0);
__m128i _w1 = _mm_loadu_si128((const __m128i*)(k0 + 8));
__m128i _w2 = _mm_loadu_si128((const __m128i*)(k0 + 16));
__m128i _w3 = _mm_loadu_si128((const __m128i*)(k0 + 24));
#if __XOP__
__m128i _val0_01 = _mm_shuffle_epi32(_val0, _MM_SHUFFLE(0, 0, 0, 0));
__m128i _val0_23 = _mm_shuffle_epi32(_val0, _MM_SHUFFLE(1, 1, 1, 1));
__m128i _val0_45 = _mm_shuffle_epi32(_val0, _MM_SHUFFLE(2, 2, 2, 2));
__m128i _val0_67 = _mm_shuffle_epi32(_val0, _MM_SHUFFLE(3, 3, 3, 3));
__m128i _val1_01 = _mm_shuffle_epi32(_val1, _MM_SHUFFLE(0, 0, 0, 0));
__m128i _val1_23 = _mm_shuffle_epi32(_val1, _MM_SHUFFLE(1, 1, 1, 1));
__m128i _val1_45 = _mm_shuffle_epi32(_val1, _MM_SHUFFLE(2, 2, 2, 2));
__m128i _val1_67 = _mm_shuffle_epi32(_val1, _MM_SHUFFLE(3, 3, 3, 3));
_sum0 = _mm_maddd_epi16(_val0_01, _w0, _sum0);
_sum1 = _mm_maddd_epi16(_val0_23, _w1, _sum1);
_sum2 = _mm_maddd_epi16(_val1_01, _w0, _sum2);
_sum3 = _mm_maddd_epi16(_val1_23, _w1, _sum3);
_sum0 = _mm_maddd_epi16(_val0_45, _w2, _sum0);
_sum1 = _mm_maddd_epi16(_val0_67, _w3, _sum1);
_sum2 = _mm_maddd_epi16(_val1_45, _w2, _sum2);
_sum3 = _mm_maddd_epi16(_val1_67, _w3, _sum3);
#else
// 0 0 1 1 2 2 3 3
// 4 4 5 5 6 6 7 7
__m128i _val0_0123 = _mm_unpacklo_epi16(_val0, _val0);
__m128i _val0_4567 = _mm_unpackhi_epi16(_val0, _val0);
__m128i _val1_0123 = _mm_unpacklo_epi16(_val1, _val1);
__m128i _val1_4567 = _mm_unpackhi_epi16(_val1, _val1);
__m128i _val0_01 = _mm_unpacklo_epi32(_val0_0123, _val0_0123);
__m128i _val0_23 = _mm_unpackhi_epi32(_val0_0123, _val0_0123);
__m128i _val0_45 = _mm_unpacklo_epi32(_val0_4567, _val0_4567);
__m128i _val0_67 = _mm_unpackhi_epi32(_val0_4567, _val0_4567);
__m128i _val1_01 = _mm_unpacklo_epi32(_val1_0123, _val1_0123);
__m128i _val1_23 = _mm_unpackhi_epi32(_val1_0123, _val1_0123);
__m128i _val1_45 = _mm_unpacklo_epi32(_val1_4567, _val1_4567);
__m128i _val1_67 = _mm_unpackhi_epi32(_val1_4567, _val1_4567);
__m128i _sl00 = _mm_mullo_epi16(_w0, _val0_01);
__m128i _sh00 = _mm_mulhi_epi16(_w0, _val0_01);
__m128i _sl10 = _mm_mullo_epi16(_w0, _val1_01);
__m128i _sh10 = _mm_mulhi_epi16(_w0, _val1_01);
__m128i _sl01 = _mm_mullo_epi16(_w1, _val0_23);
__m128i _sh01 = _mm_mulhi_epi16(_w1, _val0_23);
__m128i _sl11 = _mm_mullo_epi16(_w1, _val1_23);
__m128i _sh11 = _mm_mulhi_epi16(_w1, _val1_23);
__m128i _sl02 = _mm_mullo_epi16(_w2, _val0_45);
__m128i _sh02 = _mm_mulhi_epi16(_w2, _val0_45);
__m128i _sl12 = _mm_mullo_epi16(_w2, _val1_45);
__m128i _sh12 = _mm_mulhi_epi16(_w2, _val1_45);
__m128i _sl03 = _mm_mullo_epi16(_w3, _val0_67);
__m128i _sh03 = _mm_mulhi_epi16(_w3, _val0_67);
__m128i _sl13 = _mm_mullo_epi16(_w3, _val1_67);
__m128i _sh13 = _mm_mulhi_epi16(_w3, _val1_67);
_sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl00, _sh00));
_sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl00, _sh00));
_sum2 = _mm_add_epi32(_sum2, _mm_unpacklo_epi16(_sl10, _sh10));
_sum3 = _mm_add_epi32(_sum3, _mm_unpackhi_epi16(_sl10, _sh10));
_sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl01, _sh01));
_sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl01, _sh01));
_sum2 = _mm_add_epi32(_sum2, _mm_unpacklo_epi16(_sl11, _sh11));
_sum3 = _mm_add_epi32(_sum3, _mm_unpackhi_epi16(_sl11, _sh11));
_sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl02, _sh02));
_sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl02, _sh02));
_sum2 = _mm_add_epi32(_sum2, _mm_unpacklo_epi16(_sl12, _sh12));
_sum3 = _mm_add_epi32(_sum3, _mm_unpackhi_epi16(_sl12, _sh12));
_sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl03, _sh03));
_sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl03, _sh03));
_sum2 = _mm_add_epi32(_sum2, _mm_unpacklo_epi16(_sl13, _sh13));
_sum3 = _mm_add_epi32(_sum3, _mm_unpackhi_epi16(_sl13, _sh13));
#endif
#endif
r0 += 16;
k0 += 32;
}
#if __AVX2__
__m256i _sum0_2 = _mm256_permute2x128_si256(_sum0_1, _sum2_3, _MM_SHUFFLE(0, 2, 0, 0));
__m256i _sum1_3 = _mm256_permute2x128_si256(_sum0_1, _sum2_3, _MM_SHUFFLE(0, 3, 0, 1));
_sum0_2 = _mm256_add_epi32(_sum0_2, _sum1_3);
_mm256_storeu_si256((__m256i*)output0_tm, _sum0_2);
#else
_sum0 = _mm_add_epi32(_sum0, _sum1);
_sum2 = _mm_add_epi32(_sum2, _sum3);
_mm_storeu_si128((__m128i*)output0_tm, _sum0);
_mm_storeu_si128((__m128i*)(output0_tm + 4), _sum2);
#endif
output0_tm += 8;
}
for (; i < tiles; i++)
{
#if __AVX2__
const short* r0 = bb2.row<const short>(i / 4 + (i % 4) / 2 + i % 2);
#else
const short* r0 = bb2.row<const short>(i / 2 + i % 2);
#endif
const short* k0 = kernel0_tm.row<const short>(r);
int nn = inch; // inch always > 0
#if __AVX2__
__m256i _sum0_1 = _mm256_setzero_si256();
#else
__m128i _sum0 = _mm_setzero_si128();
__m128i _sum1 = _mm_setzero_si128();
#endif
for (int j = 0; j < nn; j++)
{
// 0 1 2 3 4 5 6 7
__m128i _val = _mm_loadu_si128((const __m128i*)r0);
#if __AVX2__
__m256i _w01 = _mm256_loadu_si256((const __m256i*)k0);
__m256i _w23 = _mm256_loadu_si256((const __m256i*)(k0 + 16));
#if __AVXVNNI__ || __AVX512VNNI__
// 0 1 0 1 x x x x
// 0 1 0 1 0 1 0 1
__m128i _val_01 = _mm_shuffle_epi32(_val, _MM_SHUFFLE(0, 0, 0, 0));
__m128i _val_23 = _mm_shuffle_epi32(_val, _MM_SHUFFLE(1, 1, 1, 1));
__m128i _val_45 = _mm_shuffle_epi32(_val, _MM_SHUFFLE(2, 2, 2, 2));
__m128i _val_67 = _mm_shuffle_epi32(_val, _MM_SHUFFLE(3, 3, 3, 3));
__m256i _val_0123 = _mm256_inserti128_si256(_mm256_castsi128_si256(_val_01), _val_23, 1);
__m256i _val_4567 = _mm256_inserti128_si256(_mm256_castsi128_si256(_val_45), _val_67, 1);
_sum0_1 = _mm256_dpwssd_epi32(_sum0_1, _w01, _val_0123);
_sum0_1 = _mm256_dpwssd_epi32(_sum0_1, _w23, _val_4567);
#else
// 0 0 1 1 2 2 3 3
// 4 4 5 5 6 6 7 7
__m256i _val_0123 = _mm256_castsi128_si256(_mm_unpacklo_epi16(_val, _val));
__m256i _val_4567 = _mm256_castsi128_si256(_mm_unpackhi_epi16(_val, _val));
_val_0123 = _mm256_permutevar8x32_epi32(_val_0123, _mm256_set_epi32(3, 3, 2, 2, 1, 1, 0, 0));
_val_4567 = _mm256_permutevar8x32_epi32(_val_4567, _mm256_set_epi32(3, 3, 2, 2, 1, 1, 0, 0));
__m256i _sl00_01 = _mm256_mullo_epi16(_w01, _val_0123);
__m256i _sh00_01 = _mm256_mulhi_epi16(_w01, _val_0123);
__m256i _sl02_03 = _mm256_mullo_epi16(_w23, _val_4567);
__m256i _sh02_03 = _mm256_mulhi_epi16(_w23, _val_4567);
_sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpacklo_epi16(_sl00_01, _sh00_01));
_sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpacklo_epi16(_sl02_03, _sh02_03));
_sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpackhi_epi16(_sl00_01, _sh00_01));
_sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpackhi_epi16(_sl02_03, _sh02_03));
#endif
#else
__m128i _w0 = _mm_loadu_si128((const __m128i*)k0);
__m128i _w1 = _mm_loadu_si128((const __m128i*)(k0 + 8));
__m128i _w2 = _mm_loadu_si128((const __m128i*)(k0 + 16));
__m128i _w3 = _mm_loadu_si128((const __m128i*)(k0 + 24));
#if __XOP__
__m128i _val01 = _mm_shuffle_epi32(_val, _MM_SHUFFLE(0, 0, 0, 0));
__m128i _val23 = _mm_shuffle_epi32(_val, _MM_SHUFFLE(1, 1, 1, 1));
__m128i _val45 = _mm_shuffle_epi32(_val, _MM_SHUFFLE(2, 2, 2, 2));
__m128i _val67 = _mm_shuffle_epi32(_val, _MM_SHUFFLE(3, 3, 3, 3));
_sum0 = _mm_maddd_epi16(_val01, _w0, _sum0);
_sum1 = _mm_maddd_epi16(_val23, _w1, _sum1);
_sum0 = _mm_maddd_epi16(_val45, _w2, _sum0);
_sum1 = _mm_maddd_epi16(_val67, _w3, _sum1);
#else
// 0 0 1 1 2 2 3 3
// 4 4 5 5 6 6 7 7
__m128i _val_0123 = _mm_unpacklo_epi16(_val, _val);
__m128i _val_4567 = _mm_unpackhi_epi16(_val, _val);
__m128i _val01 = _mm_unpacklo_epi32(_val_0123, _val_0123);
__m128i _val23 = _mm_unpackhi_epi32(_val_0123, _val_0123);
__m128i _val45 = _mm_unpacklo_epi32(_val_4567, _val_4567);
__m128i _val67 = _mm_unpackhi_epi32(_val_4567, _val_4567);
__m128i _sl0 = _mm_mullo_epi16(_w0, _val01);
__m128i _sh0 = _mm_mulhi_epi16(_w0, _val01);
__m128i _sl1 = _mm_mullo_epi16(_w1, _val23);
__m128i _sh1 = _mm_mulhi_epi16(_w1, _val23);
__m128i _sl2 = _mm_mullo_epi16(_w2, _val45);
__m128i _sh2 = _mm_mulhi_epi16(_w2, _val45);
__m128i _sl3 = _mm_mullo_epi16(_w3, _val67);
__m128i _sh3 = _mm_mulhi_epi16(_w3, _val67);
_sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl0, _sh0));
_sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl0, _sh0));
_sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl1, _sh1));
_sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl1, _sh1));
_sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl2, _sh2));
_sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl2, _sh2));
_sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl3, _sh3));
_sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl3, _sh3));
#endif
#endif
r0 += 8;
k0 += 32;
}
#if __AVX2__
__m128i _sum0 = _mm256_extracti128_si256(_sum0_1, 0);
__m128i _sum1 = _mm256_extracti128_si256(_sum0_1, 1);
#endif
_sum0 = _mm_add_epi32(_sum0, _sum1);
_mm_storeu_si128((__m128i*)output0_tm, _sum0);
output0_tm += 4;
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, 4u * 4, 4, opt.workspace_allocator);
}
{
// const float otm[4][6] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f}
// };
// 0 = r00 + (r01 + r02) + (r03 + r04)
// 1 = (r01 - r02) + (r03 - r04) * 2
// 2 = (r01 + r02) + (r03 + r04) * 4
// 3 = r05 + (r01 - r02) + (r03 - r04) * 8
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
const int tiles = w_tm / 6 * h_tm / 6;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob_bordered.channel(p);
int tmp[4][6][4];
// tile
for (int i = 0; i < outh / 4; i++)
{
for (int j = 0; j < outw / 4; j++)
{
// top_blob_tm.create(tiles, 36, outch, elemsize, elempack);
const int* output0_tm_0 = (const int*)out0_tm + (i * w_tm / 6 + j) * 4;
const int* output0_tm_1 = output0_tm_0 + tiles * 4;
const int* output0_tm_2 = output0_tm_0 + tiles * 8;
const int* output0_tm_3 = output0_tm_0 + tiles * 12;
const int* output0_tm_4 = output0_tm_0 + tiles * 16;
const int* output0_tm_5 = output0_tm_0 + tiles * 20;
int* output0 = out0.row<int>(i * 4) + (j * 4) * 4;
// TODO sse optimize
for (int m = 0; m < 5; m++)
{
__m128i _out0tm0 = _mm_loadu_si128((const __m128i*)output0_tm_0);
__m128i _out0tm1 = _mm_loadu_si128((const __m128i*)output0_tm_1);
__m128i _out0tm2 = _mm_loadu_si128((const __m128i*)output0_tm_2);
__m128i _out0tm3 = _mm_loadu_si128((const __m128i*)output0_tm_3);
__m128i _out0tm4 = _mm_loadu_si128((const __m128i*)output0_tm_4);
__m128i _out0tm5 = _mm_loadu_si128((const __m128i*)output0_tm_5);
__m128i _tmp02a = _mm_add_epi32(_out0tm1, _out0tm2);
__m128i _tmp13a = _mm_sub_epi32(_out0tm1, _out0tm2);
__m128i _tmp02b = _mm_add_epi32(_out0tm3, _out0tm4);
__m128i _tmp13b = _mm_sub_epi32(_out0tm3, _out0tm4);
__m128i _tmp0m = _mm_add_epi32(_mm_add_epi32(_out0tm0, _tmp02a), _tmp02b);
__m128i _tmp1m = _mm_add_epi32(_tmp13a, _mm_slli_epi32(_tmp13b, 1));
__m128i _tmp2m = _mm_add_epi32(_tmp02a, _mm_slli_epi32(_tmp02b, 2));
__m128i _tmp3m = _mm_add_epi32(_mm_add_epi32(_tmp13a, _mm_slli_epi32(_out0tm5, 2)), _mm_slli_epi32(_tmp13b, 3));
_mm_storeu_si128((__m128i*)tmp[0][m], _tmp0m);
_mm_storeu_si128((__m128i*)tmp[1][m], _tmp1m);
_mm_storeu_si128((__m128i*)tmp[2][m], _tmp2m);
_mm_storeu_si128((__m128i*)tmp[3][m], _tmp3m);
output0_tm_0 += tiles * 24;
output0_tm_1 += tiles * 24;
output0_tm_2 += tiles * 24;
output0_tm_3 += tiles * 24;
output0_tm_4 += tiles * 24;
output0_tm_5 += tiles * 24;
}
for (int m = 5; m < 6; m++)
{
__m128i _out0tm0 = _mm_loadu_si128((const __m128i*)output0_tm_0);
__m128i _out0tm1 = _mm_loadu_si128((const __m128i*)output0_tm_1);
__m128i _out0tm2 = _mm_loadu_si128((const __m128i*)output0_tm_2);
__m128i _out0tm3 = _mm_loadu_si128((const __m128i*)output0_tm_3);
__m128i _out0tm4 = _mm_loadu_si128((const __m128i*)output0_tm_4);
__m128i _out0tm5 = _mm_loadu_si128((const __m128i*)output0_tm_5);
__m128i _tmp02a = _mm_add_epi32(_out0tm1, _out0tm2);
__m128i _tmp13a = _mm_sub_epi32(_out0tm1, _out0tm2);
__m128i _tmp02b = _mm_add_epi32(_out0tm3, _out0tm4);
__m128i _tmp13b = _mm_sub_epi32(_out0tm3, _out0tm4);
__m128i _tmp0m = _mm_add_epi32(_mm_add_epi32(_out0tm0, _tmp02a), _tmp02b);
__m128i _tmp1m = _mm_add_epi32(_tmp13a, _mm_slli_epi32(_tmp13b, 1));
__m128i _tmp2m = _mm_add_epi32(_tmp02a, _mm_slli_epi32(_tmp02b, 2));
__m128i _tmp3m = _mm_add_epi32(_mm_add_epi32(_tmp13a, _mm_slli_epi32(_out0tm5, 2)), _mm_slli_epi32(_tmp13b, 3));
_tmp0m = _mm_slli_epi32(_tmp0m, 2);
_tmp1m = _mm_slli_epi32(_tmp1m, 2);
_tmp2m = _mm_slli_epi32(_tmp2m, 2);
_tmp3m = _mm_slli_epi32(_tmp3m, 2);
_mm_storeu_si128((__m128i*)tmp[0][m], _tmp0m);
_mm_storeu_si128((__m128i*)tmp[1][m], _tmp1m);
_mm_storeu_si128((__m128i*)tmp[2][m], _tmp2m);
_mm_storeu_si128((__m128i*)tmp[3][m], _tmp3m);
output0_tm_0 += tiles * 24;
output0_tm_1 += tiles * 24;
output0_tm_2 += tiles * 24;
output0_tm_3 += tiles * 24;
output0_tm_4 += tiles * 24;
output0_tm_5 += tiles * 24;
}
for (int m = 0; m < 4; m++)
{
__m128i _tmp00 = _mm_loadu_si128((const __m128i*)tmp[m][0]);
__m128i _tmp01 = _mm_loadu_si128((const __m128i*)tmp[m][1]);
__m128i _tmp02 = _mm_loadu_si128((const __m128i*)tmp[m][2]);
__m128i _tmp03 = _mm_loadu_si128((const __m128i*)tmp[m][3]);
__m128i _tmp04 = _mm_loadu_si128((const __m128i*)tmp[m][4]);
__m128i _tmp05 = _mm_loadu_si128((const __m128i*)tmp[m][5]);
__m128i _tmp02a = _mm_add_epi32(_tmp01, _tmp02);
__m128i _tmp13a = _mm_sub_epi32(_tmp01, _tmp02);
__m128i _tmp02b = _mm_add_epi32(_tmp03, _tmp04);
__m128i _tmp13b = _mm_sub_epi32(_tmp03, _tmp04);
__m128i _out00 = _mm_add_epi32(_mm_add_epi32(_tmp00, _tmp02a), _tmp02b);
__m128i _out01 = _mm_add_epi32(_tmp13a, _mm_slli_epi32(_tmp13b, 1));
__m128i _out02 = _mm_add_epi32(_tmp02a, _mm_slli_epi32(_tmp02b, 2));
__m128i _out03 = _mm_add_epi32(_mm_add_epi32(_tmp05, _tmp13a), _mm_slli_epi32(_tmp13b, 3));
// TODO use integer trick for division by 576
__m128 _v576 = _mm_set1_ps(1.0 / 576);
_out00 = _mm_cvttps_epi32(_mm_mul_ps(_mm_cvtepi32_ps(_out00), _v576));
_out01 = _mm_cvttps_epi32(_mm_mul_ps(_mm_cvtepi32_ps(_out01), _v576));
_out02 = _mm_cvttps_epi32(_mm_mul_ps(_mm_cvtepi32_ps(_out02), _v576));
_out03 = _mm_cvttps_epi32(_mm_mul_ps(_mm_cvtepi32_ps(_out03), _v576));
_mm_storeu_si128((__m128i*)output0, _out00);
_mm_storeu_si128((__m128i*)(output0 + 4), _out01);
_mm_storeu_si128((__m128i*)(output0 + 8), _out02);
_mm_storeu_si128((__m128i*)(output0 + 12), _out03);
output0 += outw * 4;
}
}
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
|
lbfgsbsolver.h | // CppNumericalSolver
#include <iostream>
#include <list>
#include <Eigen/LU>
#include "isolver.h"
#include "../boundedproblem.h"
#include "../linesearch/morethuente.h"
#ifndef LBFGSBSOLVER_H
#define LBFGSBSOLVER_H
namespace cppoptlib {
template<typename TProblem>
class LbfgsbSolver : public ISolver<TProblem, 1> {
public:
using Superclass = ISolver<TProblem, 1>;
using typename Superclass::Scalar;
using typename Superclass::TVector;
using MatrixType = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>;
using VariableTVector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>;
protected:
// workspace matrices
MatrixType W, M;
Scalar theta;
int DIM;
int m_historySize = 5;
/**
* @brief sort pairs (k,v) according v ascending
* @details [long description]
*
* @param v [description]
* @return [description]
*/
std::vector<int> sort_indexes(const std::vector< std::pair<int, Scalar> > &v) {
std::vector<int> idx(v.size());
for (size_t i = 0; i != idx.size(); ++i)
idx[i] = v[i].first;
sort(idx.begin(), idx.end(), [&v](size_t i1, size_t i2) {
return v[i1].second < v[i2].second;
});
return idx;
}
/**
* @brief Algorithm CP: Computation of the generalized Cauchy point
* @details PAGE 8
*
* @param c [description]
*/
void getGeneralizedCauchyPoint(const TProblem &problem, const TVector &x, const TVector &g, TVector &x_cauchy, VariableTVector &c) {
const int DIM = x.rows();
// Given x,l,u,g, and B = \theta I-WMW
// {all t_i} = { (idx,value), ... }
// TODO: use "std::set" ?
std::vector<std::pair<int, Scalar> > SetOfT;
// the feasible set is implicitly given by "SetOfT - {t_i==0}"
TVector d = -g;
// n operations
for (int j = 0; j < DIM; j++) {
if (g(j) == 0) {
SetOfT.push_back(std::make_pair(j, std::numeric_limits<Scalar>::max()));
} else {
Scalar tmp = 0;
if (g(j) < 0) {
tmp = (x(j) - problem.upperBound()(j)) / g(j);
} else {
tmp = (x(j) - problem.lowerBound()(j)) / g(j);
}
SetOfT.push_back(std::make_pair(j, tmp));
if (tmp == 0) d(j) = 0;
}
}
// sortedindices [1,0,2] means the minimal element is on the 1-st entry
std::vector<int> sortedIndices = sort_indexes(SetOfT);
x_cauchy = x;
// Initialize
// p := W^Scalar*p
VariableTVector p = (W.transpose() * d); // (2mn operations)
// c := 0
c = VariableTVector::Zero(W.cols());
// f' := g^Scalar*d = -d^Td
Scalar f_prime = -d.dot(d); // (n operations)
// f'' := \theta*d^Scalar*d-d^Scalar*W*M*W^Scalar*d = -\theta*f' - p^Scalar*M*p
Scalar f_doubleprime = (Scalar)(-1.0 * theta) * f_prime - p.dot(M * p); // (O(m^2) operations)
f_doubleprime = std::max(std::numeric_limits<Scalar>::epsilon(), f_doubleprime);
Scalar f_dp_orig = f_doubleprime;
// \delta t_min := -f'/f''
Scalar dt_min = -f_prime / f_doubleprime;
// t_old := 0
Scalar t_old = 0;
// b := argmin {t_i , t_i >0}
int i = 0;
for (int j = 0; j < DIM; j++) {
i = j;
if (SetOfT[sortedIndices[j]].second > 0)
break;
}
int b = sortedIndices[i];
// see below
// t := min{t_i : i in F}
Scalar t = SetOfT[b].second;
// \delta Scalar := t - 0
Scalar dt = t ;
// examination of subsequent segments
while ((dt_min >= dt) && (i < DIM)) {
if (d(b) > 0)
x_cauchy(b) = problem.upperBound()(b);
else if (d(b) < 0)
x_cauchy(b) = problem.lowerBound()(b);
// z_b = x_p^{cp} - x_b
Scalar zb = x_cauchy(b) - x(b);
// c := c +\delta t*p
c += dt * p;
// cache
VariableTVector wbt = W.row(b);
f_prime += dt * f_doubleprime + (Scalar) g(b) * g(b) + (Scalar) theta * g(b) * zb - (Scalar) g(b) *
wbt.transpose() * (M * c);
f_doubleprime += (Scalar) - 1.0 * theta * g(b) * g(b)
- (Scalar) 2.0 * (g(b) * (wbt.dot(M * p)))
- (Scalar) g(b) * g(b) * wbt.transpose() * (M * wbt);
f_doubleprime = std::max(std::numeric_limits<Scalar>::epsilon() * f_dp_orig, f_doubleprime);
p += g(b) * wbt.transpose();
d(b) = 0;
dt_min = -f_prime / f_doubleprime;
t_old = t;
++i;
if (i < DIM) {
b = sortedIndices[i];
t = SetOfT[b].second;
dt = t - t_old;
}
}
dt_min = std::max(dt_min, (Scalar)0.0);
t_old += dt_min;
#pragma omp parallel for
for (int ii = i; ii < x_cauchy.rows(); ii++) {
x_cauchy(sortedIndices[ii]) = x(sortedIndices[ii]) + t_old * d(sortedIndices[ii]);
}
c += dt_min * p;
}
/**
* @brief find alpha* = max {a : a <= 1 and l_i-xc_i <= a*d_i <= u_i-xc_i}
* @details [long description]
*
* @param FreeVariables [description]
* @return [description]
*/
Scalar findAlpha(const TProblem &problem, TVector &x_cp, VariableTVector &du, std::vector<int> &FreeVariables) {
Scalar alphastar = 1;
const unsigned int n = FreeVariables.size();
assert(du.rows() == n);
for (unsigned int i = 0; i < n; i++) {
if (du(i) > 0) {
alphastar = std::min(alphastar, (problem.upperBound()(FreeVariables[i]) - x_cp(FreeVariables[i])) / du(i));
} else {
alphastar = std::min(alphastar, (problem.lowerBound()(FreeVariables[i]) - x_cp(FreeVariables[i])) / du(i));
}
}
return alphastar;
}
/**
* @brief solving unbounded probelm
* @details [long description]
*
* @param SubspaceMin [description]
*/
void SubspaceMinimization(const TProblem &problem, TVector &x_cauchy, TVector &x, VariableTVector &c, TVector &g,
TVector &SubspaceMin) {
Scalar theta_inverse = 1 / theta;
std::vector<int> FreeVariablesIndex;
for (int i = 0; i < x_cauchy.rows(); i++) {
if ((x_cauchy(i) != problem.upperBound()(i)) && (x_cauchy(i) != problem.lowerBound()(i))) {
FreeVariablesIndex.push_back(i);
}
}
const int FreeVarCount = FreeVariablesIndex.size();
MatrixType WZ = MatrixType::Zero(W.cols(), FreeVarCount);
for (int i = 0; i < FreeVarCount; i++)
WZ.col(i) = W.row(FreeVariablesIndex[i]);
TVector rr = (g + theta * (x_cauchy - x) - W * (M * c));
// r=r(FreeVariables);
MatrixType r = MatrixType::Zero(FreeVarCount, 1);
for (int i = 0; i < FreeVarCount; i++)
r.row(i) = rr.row(FreeVariablesIndex[i]);
// STEP 2: "v = w^T*Z*r" and STEP 3: "v = M*v"
VariableTVector v = M * (WZ * r);
// STEP 4: N = 1/theta*W^T*Z*(W^T*Z)^T
MatrixType N = theta_inverse * WZ * WZ.transpose();
// N = I - MN
N = MatrixType::Identity(N.rows(), N.rows()) - M * N;
// STEP: 5
// v = N^{-1}*v
v = N.lu().solve(v);
// STEP: 6
// HERE IS A MISTAKE IN THE ORIGINAL PAPER!
VariableTVector du = -theta_inverse * r - theta_inverse * theta_inverse * WZ.transpose() * v;
// STEP: 7
Scalar alpha_star = findAlpha(problem, x_cauchy, du, FreeVariablesIndex);
// STEP: 8
VariableTVector dStar = alpha_star * du;
SubspaceMin = x_cauchy;
for (int i = 0; i < FreeVarCount; i++) {
SubspaceMin(FreeVariablesIndex[i]) = SubspaceMin(FreeVariablesIndex[i]) + dStar(i);
}
}
public:
void setHistorySize(const int hs) { m_historySize = hs; }
void minimize(TProblem &problem, TVector &x0) {
DIM = x0.rows();
theta = 1.0;
W = MatrixType::Zero(DIM, 0);
M = MatrixType::Zero(0, 0);
MatrixType yHistory = MatrixType::Zero(DIM, 0);
MatrixType sHistory = MatrixType::Zero(DIM, 0);
TVector x = x0, g = x0;
Scalar f = problem.value(x);
problem.gradient(x, g);
// conv. crit.
auto noConvergence =
[&](TVector &x, TVector &g)->bool {
return (((x - g).cwiseMax(problem.lowerBound()).cwiseMin(problem.upperBound()) - x).template lpNorm<Eigen::Infinity>() >= 1e-4);
};
this->m_current.reset();
this->m_status = Status::Continue;
while (problem.callback(this->m_current, x) && noConvergence(x, g) && (this->m_status == Status::Continue)) {
Scalar f_old = f;
TVector x_old = x;
TVector g_old = g;
// STEP 2: compute the cauchy point
TVector CauchyPoint = TVector::Zero(DIM);
VariableTVector c = VariableTVector::Zero(W.cols());
getGeneralizedCauchyPoint(problem, x, g, CauchyPoint, c);
// STEP 3: compute a search direction d_k by the primal method for the sub-problem
TVector SubspaceMin;
SubspaceMinimization(problem, CauchyPoint, x, c, g, SubspaceMin);
// STEP 4: perform linesearch and STEP 5: compute gradient
Scalar alpha_init = 1.0;
const Scalar rate = MoreThuente<TProblem, 1>::linesearch(x, SubspaceMin-x , problem, alpha_init);
// update current guess and function information
x = x - rate*(x-SubspaceMin);
f = problem.value(x);
problem.gradient(x, g);
// prepare for next iteration
TVector newY = g - g_old;
TVector newS = x - x_old;
// STEP 6:
Scalar test = newS.dot(newY);
test = (test < 0) ? -1.0 * test : test;
if (test > 1e-7 * newY.squaredNorm()) {
if (yHistory.cols() < m_historySize) {
yHistory.conservativeResize(DIM, yHistory.cols() + 1);
sHistory.conservativeResize(DIM, sHistory.cols() + 1);
} else {
yHistory.leftCols(m_historySize - 1) = yHistory.rightCols(m_historySize - 1).eval();
sHistory.leftCols(m_historySize - 1) = sHistory.rightCols(m_historySize - 1).eval();
}
yHistory.rightCols(1) = newY;
sHistory.rightCols(1) = newS;
// STEP 7:
theta = (Scalar)(newY.transpose() * newY) / (newY.transpose() * newS);
W = MatrixType::Zero(yHistory.rows(), yHistory.cols() + sHistory.cols());
W << yHistory, (theta * sHistory);
MatrixType A = sHistory.transpose() * yHistory;
MatrixType L = A.template triangularView<Eigen::StrictlyLower>();
MatrixType MM(A.rows() + L.rows(), A.rows() + L.cols());
MatrixType D = -1 * A.diagonal().asDiagonal();
MM << D, L.transpose(), L, ((sHistory.transpose() * sHistory) * theta);
M = MM.inverse();
}
if (fabs(f_old - f) < 1e-8) {
// successive function values too similar
break;
}
++this->m_current.iterations;
this->m_current.gradNorm = g.norm();
this->m_status = checkConvergence(this->m_stop, this->m_current);
}
x0 = x;
if (this->m_debug > DebugLevel::None) {
std::cout << "Stop status was: " << this->m_status << std::endl;
std::cout << "Stop criteria were: " << std::endl << this->m_stop << std::endl;
std::cout << "Current values are: " << std::endl << this->m_current << std::endl;
}
}
};
}
/* namespace cppoptlib */
#endif /* LBFGSBSOLVER_H_ */
|
MathTools.h | /**
* \file
* \copyright
* Copyright (c) 2012-2020, OpenGeoSys Community (http://www.opengeosys.org)
* Distributed under a Modified BSD License.
* See accompanying file LICENSE.txt or
* http://www.opengeosys.org/project/license
*/
#pragma once
#include <Eigen/Eigen>
#include <cstddef>
#ifdef _OPENMP
#include <omp.h>
#endif
namespace MathLib
{
template <typename T, std::size_t DIM> class TemplatePoint;
using Point3d = MathLib::TemplatePoint<double, 3>;
/**
* standard inner product in R^N
* \param v0 array of type T representing the vector
* \param v1 array of type T representing the vector
* */
template<typename T, int N> inline
T scalarProduct(T const * const v0, T const * const v1)
{
T res (v0[0] * v1[0]);
#pragma omp parallel for reduction (+:res)
for (int k = 1; k < N; k++)
{
res += v0[k] * v1[k];
}
return res;
}
template <> inline
double scalarProduct<double,3>(double const * const v0, double const * const v1)
{
double res (v0[0] * v1[0]);
for (std::size_t k(1); k < 3; k++)
{
res += v0[k] * v1[k];
}
return res;
}
template <typename T>
inline T scalarProduct(T const* const v0, T const* const v1, int const n)
{
T res (v0[0] * v1[0]);
#pragma omp parallel for reduction (+:res)
for (int k = 1; k < n; k++)
{
res += v0[k] * v1[k];
}
return res;
}
/**
* calcProjPntToLineAndDists computes the orthogonal projection
* of a point p to the line described by the points a and b,
* \f$g(\lambda) = a + \lambda (b - a)\f$,
* the distance between p and the projected point
* and the distances between the projected point and the end
* points pa, pb of the line
* \param pp the (mesh) point
* \param pa first point of line
* \param pb second point of line
* \param lambda the projected point described by the line equation above
* \param d0 distance to the line point a
* \returns the distance between pp and the orthogonal projection of pp
*/
double calcProjPntToLineAndDists(MathLib::Point3d const& pp,
MathLib::Point3d const& pa,
MathLib::Point3d const& pb, double& lambda,
double& d0);
/**
* Let \f$p_0, p_1, p_2 \in R^3\f$. The function getAngle
* computes the angle between the edges \f$(p_0,p_1)\f$ and \f$(p_1,p_2)\f$
* @param p0 start point of edge 0
* @param p1 end point of edge 0 and start point of edge 1
* @param p2 end point of edge 1
* @return the angle between the edges
*/
double getAngle(Point3d const& p0, Point3d const& p1, Point3d const& p2);
/// Calculates the scalar triple (u x v) . w
double scalarTriple(Eigen::Vector3d const& u, Eigen::Vector3d const& v,
Eigen::Vector3d const& w);
} // namespace MathLib
|
c-decl.c | /* Process declarations and variables for C compiler.
Copyright (C) 1988-2020 Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
/* Process declarations and symbol lookup for C front end.
Also constructs types; the standard scalar types at initialization,
and structure, union, array and enum types when they are declared. */
/* ??? not all decl nodes are given the most useful possible
line numbers. For example, the CONST_DECLs for enum values. */
#include "config.h"
#define INCLUDE_STRING
#define INCLUDE_UNIQUE_PTR
#include "system.h"
#include "coretypes.h"
#include "target.h"
#include "function.h"
#include "c-tree.h"
#include "timevar.h"
#include "stringpool.h"
#include "cgraph.h"
#include "intl.h"
#include "print-tree.h"
#include "stor-layout.h"
#include "varasm.h"
#include "attribs.h"
#include "toplev.h"
#include "debug.h"
#include "c-family/c-objc.h"
#include "c-family/c-pragma.h"
#include "c-family/c-ubsan.h"
#include "c-lang.h"
#include "langhooks.h"
#include "tree-iterator.h"
#include "dumpfile.h"
#include "plugin.h"
#include "c-family/c-ada-spec.h"
#include "builtins.h"
#include "spellcheck-tree.h"
#include "gcc-rich-location.h"
#include "asan.h"
#include "c-family/name-hint.h"
#include "c-family/known-headers.h"
#include "c-family/c-spellcheck.h"
#include "tree-pretty-print.h"
/* In grokdeclarator, distinguish syntactic contexts of declarators. */
enum decl_context
{ NORMAL, /* Ordinary declaration */
FUNCDEF, /* Function definition */
PARM, /* Declaration of parm before function body */
FIELD, /* Declaration inside struct or union */
TYPENAME}; /* Typename (inside cast or sizeof) */
/* States indicating how grokdeclarator() should handle declspecs marked
with __attribute__((deprecated)). An object declared as
__attribute__((deprecated)) suppresses warnings of uses of other
deprecated items. */
enum deprecated_states {
DEPRECATED_NORMAL,
DEPRECATED_SUPPRESS
};
/* Nonzero if we have seen an invalid cross reference
to a struct, union, or enum, but not yet printed the message. */
tree pending_invalid_xref;
/* File and line to appear in the eventual error message. */
location_t pending_invalid_xref_location;
/* The file and line that the prototype came from if this is an
old-style definition; used for diagnostics in
store_parm_decls_oldstyle. */
static location_t current_function_prototype_locus;
/* Whether this prototype was built-in. */
static bool current_function_prototype_built_in;
/* The argument type information of this prototype. */
static tree current_function_prototype_arg_types;
/* The argument information structure for the function currently being
defined. */
static struct c_arg_info *current_function_arg_info;
/* The obstack on which parser and related data structures, which are
not live beyond their top-level declaration or definition, are
allocated. */
struct obstack parser_obstack;
/* The current statement tree. */
static GTY(()) struct stmt_tree_s c_stmt_tree;
/* Zero if we are not in an iteration or switch statement, otherwise
a bitmask. See bitmask definitions in c-tree.h. */
unsigned char in_statement;
/* A list of decls to be made automatically visible in each file scope. */
static GTY(()) tree visible_builtins;
/* Set to 0 at beginning of a function definition, set to 1 if
a return statement that specifies a return value is seen. */
int current_function_returns_value;
/* Set to 0 at beginning of a function definition, set to 1 if
a return statement with no argument is seen. */
int current_function_returns_null;
/* Set to 0 at beginning of a function definition, set to 1 if
a call to a noreturn function is seen. */
int current_function_returns_abnormally;
/* Set to nonzero by `grokdeclarator' for a function
whose return type is defaulted, if warnings for this are desired. */
static int warn_about_return_type;
/* Nonzero when the current toplevel function contains a declaration
of a nested function which is never defined. */
static bool undef_nested_function;
/* If non-zero, implicit "omp declare target" attribute is added into the
attribute lists. */
int current_omp_declare_target_attribute;
/* Each c_binding structure describes one binding of an identifier to
a decl. All the decls in a scope - irrespective of namespace - are
chained together by the ->prev field, which (as the name implies)
runs in reverse order. All the decls in a given namespace bound to
a given identifier are chained by the ->shadowed field, which runs
from inner to outer scopes.
The ->decl field usually points to a DECL node, but there are two
exceptions. In the namespace of type tags, the bound entity is a
RECORD_TYPE, UNION_TYPE, or ENUMERAL_TYPE node. If an undeclared
identifier is encountered, it is bound to error_mark_node to
suppress further errors about that identifier in the current
function.
The ->u.type field stores the type of the declaration in this scope;
if NULL, the type is the type of the ->decl field. This is only of
relevance for objects with external or internal linkage which may
be redeclared in inner scopes, forming composite types that only
persist for the duration of those scopes. In the external scope,
this stores the composite of all the types declared for this
object, visible or not. The ->inner_comp field (used only at file
scope) stores whether an incomplete array type at file scope was
completed at an inner scope to an array size other than 1.
The ->u.label field is used for labels. It points to a structure
which stores additional information used for warnings.
The depth field is copied from the scope structure that holds this
decl. It is used to preserve the proper ordering of the ->shadowed
field (see bind()) and also for a handful of special-case checks.
Finally, the invisible bit is true for a decl which should be
ignored for purposes of normal name lookup, and the nested bit is
true for a decl that's been bound a second time in an inner scope;
in all such cases, the binding in the outer scope will have its
invisible bit true. */
struct GTY((chain_next ("%h.prev"))) c_binding {
union GTY(()) { /* first so GTY desc can use decl */
tree GTY((tag ("0"))) type; /* the type in this scope */
struct c_label_vars * GTY((tag ("1"))) label; /* for warnings */
} GTY((desc ("TREE_CODE (%0.decl) == LABEL_DECL"))) u;
tree decl; /* the decl bound */
tree id; /* the identifier it's bound to */
struct c_binding *prev; /* the previous decl in this scope */
struct c_binding *shadowed; /* the innermost decl shadowed by this one */
unsigned int depth : 28; /* depth of this scope */
BOOL_BITFIELD invisible : 1; /* normal lookup should ignore this binding */
BOOL_BITFIELD nested : 1; /* do not set DECL_CONTEXT when popping */
BOOL_BITFIELD inner_comp : 1; /* incomplete array completed in inner scope */
BOOL_BITFIELD in_struct : 1; /* currently defined as struct field */
location_t locus; /* location for nested bindings */
};
#define B_IN_SCOPE(b1, b2) ((b1)->depth == (b2)->depth)
#define B_IN_CURRENT_SCOPE(b) ((b)->depth == current_scope->depth)
#define B_IN_FILE_SCOPE(b) ((b)->depth == 1 /*file_scope->depth*/)
#define B_IN_EXTERNAL_SCOPE(b) ((b)->depth == 0 /*external_scope->depth*/)
/* Each C symbol points to three linked lists of c_binding structures.
These describe the values of the identifier in the three different
namespaces defined by the language. */
struct GTY(()) lang_identifier {
struct c_common_identifier common_id;
struct c_binding *symbol_binding; /* vars, funcs, constants, typedefs */
struct c_binding *tag_binding; /* struct/union/enum tags */
struct c_binding *label_binding; /* labels */
};
/* Validate c-lang.c's assumptions. */
extern char C_SIZEOF_STRUCT_LANG_IDENTIFIER_isnt_accurate
[(sizeof(struct lang_identifier) == C_SIZEOF_STRUCT_LANG_IDENTIFIER) ? 1 : -1];
/* The binding oracle; see c-tree.h. */
void (*c_binding_oracle) (enum c_oracle_request, tree identifier);
/* This flag is set on an identifier if we have previously asked the
binding oracle for this identifier's symbol binding. */
#define I_SYMBOL_CHECKED(node) \
(TREE_LANG_FLAG_4 (IDENTIFIER_NODE_CHECK (node)))
static inline struct c_binding* *
i_symbol_binding (tree node)
{
struct lang_identifier *lid
= (struct lang_identifier *) IDENTIFIER_NODE_CHECK (node);
if (lid->symbol_binding == NULL
&& c_binding_oracle != NULL
&& !I_SYMBOL_CHECKED (node))
{
/* Set the "checked" flag first, to avoid infinite recursion
when the binding oracle calls back into gcc. */
I_SYMBOL_CHECKED (node) = 1;
c_binding_oracle (C_ORACLE_SYMBOL, node);
}
return &lid->symbol_binding;
}
#define I_SYMBOL_BINDING(node) (*i_symbol_binding (node))
#define I_SYMBOL_DECL(node) \
(I_SYMBOL_BINDING(node) ? I_SYMBOL_BINDING(node)->decl : 0)
/* This flag is set on an identifier if we have previously asked the
binding oracle for this identifier's tag binding. */
#define I_TAG_CHECKED(node) \
(TREE_LANG_FLAG_5 (IDENTIFIER_NODE_CHECK (node)))
static inline struct c_binding **
i_tag_binding (tree node)
{
struct lang_identifier *lid
= (struct lang_identifier *) IDENTIFIER_NODE_CHECK (node);
if (lid->tag_binding == NULL
&& c_binding_oracle != NULL
&& !I_TAG_CHECKED (node))
{
/* Set the "checked" flag first, to avoid infinite recursion
when the binding oracle calls back into gcc. */
I_TAG_CHECKED (node) = 1;
c_binding_oracle (C_ORACLE_TAG, node);
}
return &lid->tag_binding;
}
#define I_TAG_BINDING(node) (*i_tag_binding (node))
#define I_TAG_DECL(node) \
(I_TAG_BINDING(node) ? I_TAG_BINDING(node)->decl : 0)
/* This flag is set on an identifier if we have previously asked the
binding oracle for this identifier's label binding. */
#define I_LABEL_CHECKED(node) \
(TREE_LANG_FLAG_6 (IDENTIFIER_NODE_CHECK (node)))
static inline struct c_binding **
i_label_binding (tree node)
{
struct lang_identifier *lid
= (struct lang_identifier *) IDENTIFIER_NODE_CHECK (node);
if (lid->label_binding == NULL
&& c_binding_oracle != NULL
&& !I_LABEL_CHECKED (node))
{
/* Set the "checked" flag first, to avoid infinite recursion
when the binding oracle calls back into gcc. */
I_LABEL_CHECKED (node) = 1;
c_binding_oracle (C_ORACLE_LABEL, node);
}
return &lid->label_binding;
}
#define I_LABEL_BINDING(node) (*i_label_binding (node))
#define I_LABEL_DECL(node) \
(I_LABEL_BINDING(node) ? I_LABEL_BINDING(node)->decl : 0)
/* The resulting tree type. */
union GTY((desc ("TREE_CODE (&%h.generic) == IDENTIFIER_NODE"),
chain_next ("(union lang_tree_node *) c_tree_chain_next (&%h.generic)"))) lang_tree_node
{
union tree_node GTY ((tag ("0"),
desc ("tree_node_structure (&%h)")))
generic;
struct lang_identifier GTY ((tag ("1"))) identifier;
};
/* Track bindings and other things that matter for goto warnings. For
efficiency, we do not gather all the decls at the point of
definition. Instead, we point into the bindings structure. As
scopes are popped, we update these structures and gather the decls
that matter at that time. */
struct GTY(()) c_spot_bindings {
/* The currently open scope which holds bindings defined when the
label was defined or the goto statement was found. */
struct c_scope *scope;
/* The bindings in the scope field which were defined at the point
of the label or goto. This lets us look at older or newer
bindings in the scope, as appropriate. */
struct c_binding *bindings_in_scope;
/* The number of statement expressions that have started since this
label or goto statement was defined. This is zero if we are at
the same statement expression level. It is positive if we are in
a statement expression started since this spot. It is negative
if this spot was in a statement expression and we have left
it. */
int stmt_exprs;
/* Whether we started in a statement expression but are no longer in
it. This is set to true if stmt_exprs ever goes negative. */
bool left_stmt_expr;
};
/* This structure is used to keep track of bindings seen when a goto
statement is defined. This is only used if we see the goto
statement before we see the label. */
struct GTY(()) c_goto_bindings {
/* The location of the goto statement. */
location_t loc;
/* The bindings of the goto statement. */
struct c_spot_bindings goto_bindings;
};
typedef struct c_goto_bindings *c_goto_bindings_p;
/* The additional information we keep track of for a label binding.
These fields are updated as scopes are popped. */
struct GTY(()) c_label_vars {
/* The shadowed c_label_vars, when one label shadows another (which
can only happen using a __label__ declaration). */
struct c_label_vars *shadowed;
/* The bindings when the label was defined. */
struct c_spot_bindings label_bindings;
/* A list of decls that we care about: decls about which we should
warn if a goto branches to this label from later in the function.
Decls are added to this list as scopes are popped. We only add
the decls that matter. */
vec<tree, va_gc> *decls_in_scope;
/* A list of goto statements to this label. This is only used for
goto statements seen before the label was defined, so that we can
issue appropriate warnings for them. */
vec<c_goto_bindings_p, va_gc> *gotos;
};
/* Each c_scope structure describes the complete contents of one
scope. Four scopes are distinguished specially: the innermost or
current scope, the innermost function scope, the file scope (always
the second to outermost) and the outermost or external scope.
Most declarations are recorded in the current scope.
All normal label declarations are recorded in the innermost
function scope, as are bindings of undeclared identifiers to
error_mark_node. (GCC permits nested functions as an extension,
hence the 'innermost' qualifier.) Explicitly declared labels
(using the __label__ extension) appear in the current scope.
Being in the file scope (current_scope == file_scope) causes
special behavior in several places below. Also, under some
conditions the Objective-C front end records declarations in the
file scope even though that isn't the current scope.
All declarations with external linkage are recorded in the external
scope, even if they aren't visible there; this models the fact that
such declarations are visible to the entire program, and (with a
bit of cleverness, see pushdecl) allows diagnosis of some violations
of C99 6.2.2p7 and 6.2.7p2:
If, within the same translation unit, the same identifier appears
with both internal and external linkage, the behavior is
undefined.
All declarations that refer to the same object or function shall
have compatible type; otherwise, the behavior is undefined.
Initially only the built-in declarations, which describe compiler
intrinsic functions plus a subset of the standard library, are in
this scope.
The order of the blocks list matters, and it is frequently appended
to. To avoid having to walk all the way to the end of the list on
each insertion, or reverse the list later, we maintain a pointer to
the last list entry. (FIXME: It should be feasible to use a reversed
list here.)
The bindings list is strictly in reverse order of declarations;
pop_scope relies on this. */
struct GTY((chain_next ("%h.outer"))) c_scope {
/* The scope containing this one. */
struct c_scope *outer;
/* The next outermost function scope. */
struct c_scope *outer_function;
/* All bindings in this scope. */
struct c_binding *bindings;
/* For each scope (except the global one), a chain of BLOCK nodes
for all the scopes that were entered and exited one level down. */
tree blocks;
tree blocks_last;
/* The depth of this scope. Used to keep the ->shadowed chain of
bindings sorted innermost to outermost. */
unsigned int depth : 28;
/* True if we are currently filling this scope with parameter
declarations. */
BOOL_BITFIELD parm_flag : 1;
/* True if we saw [*] in this scope. Used to give an error messages
if these appears in a function definition. */
BOOL_BITFIELD had_vla_unspec : 1;
/* True if we already complained about forward parameter decls
in this scope. This prevents double warnings on
foo (int a; int b; ...) */
BOOL_BITFIELD warned_forward_parm_decls : 1;
/* True if this is the outermost block scope of a function body.
This scope contains the parameters, the local variables declared
in the outermost block, and all the labels (except those in
nested functions, or declared at block scope with __label__). */
BOOL_BITFIELD function_body : 1;
/* True means make a BLOCK for this scope no matter what. */
BOOL_BITFIELD keep : 1;
/* True means that an unsuffixed float constant is _Decimal64. */
BOOL_BITFIELD float_const_decimal64 : 1;
/* True if this scope has any label bindings. This is used to speed
up searching for labels when popping scopes, particularly since
labels are normally only found at function scope. */
BOOL_BITFIELD has_label_bindings : 1;
/* True if we should issue a warning if a goto statement crosses any
of the bindings. We still need to check the list of bindings to
find the specific ones we need to warn about. This is true if
decl_jump_unsafe would return true for any of the bindings. This
is used to avoid looping over all the bindings unnecessarily. */
BOOL_BITFIELD has_jump_unsafe_decl : 1;
};
/* The scope currently in effect. */
static GTY(()) struct c_scope *current_scope;
/* The innermost function scope. Ordinary (not explicitly declared)
labels, bindings to error_mark_node, and the lazily-created
bindings of __func__ and its friends get this scope. */
static GTY(()) struct c_scope *current_function_scope;
/* The C file scope. This is reset for each input translation unit. */
static GTY(()) struct c_scope *file_scope;
/* The outermost scope. This is used for all declarations with
external linkage, and only these, hence the name. */
static GTY(()) struct c_scope *external_scope;
/* A chain of c_scope structures awaiting reuse. */
static GTY((deletable)) struct c_scope *scope_freelist;
/* A chain of c_binding structures awaiting reuse. */
static GTY((deletable)) struct c_binding *binding_freelist;
/* Append VAR to LIST in scope SCOPE. */
#define SCOPE_LIST_APPEND(scope, list, decl) do { \
struct c_scope *s_ = (scope); \
tree d_ = (decl); \
if (s_->list##_last) \
BLOCK_CHAIN (s_->list##_last) = d_; \
else \
s_->list = d_; \
s_->list##_last = d_; \
} while (0)
/* Concatenate FROM in scope FSCOPE onto TO in scope TSCOPE. */
#define SCOPE_LIST_CONCAT(tscope, to, fscope, from) do { \
struct c_scope *t_ = (tscope); \
struct c_scope *f_ = (fscope); \
if (t_->to##_last) \
BLOCK_CHAIN (t_->to##_last) = f_->from; \
else \
t_->to = f_->from; \
t_->to##_last = f_->from##_last; \
} while (0)
/* A c_inline_static structure stores details of a static identifier
referenced in a definition of a function that may be an inline
definition if no subsequent declaration of that function uses
"extern" or does not use "inline". */
struct GTY((chain_next ("%h.next"))) c_inline_static {
/* The location for a diagnostic. */
location_t location;
/* The function that may be an inline definition. */
tree function;
/* The object or function referenced. */
tree static_decl;
/* What sort of reference this is. */
enum c_inline_static_type type;
/* The next such structure or NULL. */
struct c_inline_static *next;
};
/* List of static identifiers used or referenced in functions that may
be inline definitions. */
static GTY(()) struct c_inline_static *c_inline_statics;
/* True means unconditionally make a BLOCK for the next scope pushed. */
static bool keep_next_level_flag;
/* True means the next call to push_scope will be the outermost scope
of a function body, so do not push a new scope, merely cease
expecting parameter decls. */
static bool next_is_function_body;
/* A vector of pointers to c_binding structures. */
typedef struct c_binding *c_binding_ptr;
/* Information that we keep for a struct or union while it is being
parsed. */
class c_struct_parse_info
{
public:
/* If warn_cxx_compat, a list of types defined within this
struct. */
auto_vec<tree> struct_types;
/* If warn_cxx_compat, a list of field names which have bindings,
and which are defined in this struct, but which are not defined
in any enclosing struct. This is used to clear the in_struct
field of the c_bindings structure. */
auto_vec<c_binding_ptr> fields;
/* If warn_cxx_compat, a list of typedef names used when defining
fields in this struct. */
auto_vec<tree> typedefs_seen;
};
/* Information for the struct or union currently being parsed, or
NULL if not parsing a struct or union. */
static class c_struct_parse_info *struct_parse_info;
/* Forward declarations. */
static tree lookup_name_in_scope (tree, struct c_scope *);
static tree c_make_fname_decl (location_t, tree, int);
static tree grokdeclarator (const struct c_declarator *,
struct c_declspecs *,
enum decl_context, bool, tree *, tree *, tree *,
bool *, enum deprecated_states);
static tree grokparms (struct c_arg_info *, bool);
static void layout_array_type (tree);
static void warn_defaults_to (location_t, int, const char *, ...)
ATTRIBUTE_GCC_DIAG(3,4);
static const char *header_for_builtin_fn (tree);
/* T is a statement. Add it to the statement-tree. This is the
C/ObjC version--C++ has a slightly different version of this
function. */
tree
add_stmt (tree t)
{
enum tree_code code = TREE_CODE (t);
if (CAN_HAVE_LOCATION_P (t) && code != LABEL_EXPR)
{
if (!EXPR_HAS_LOCATION (t))
SET_EXPR_LOCATION (t, input_location);
}
if (code == LABEL_EXPR || code == CASE_LABEL_EXPR)
STATEMENT_LIST_HAS_LABEL (cur_stmt_list) = 1;
/* Add T to the statement-tree. Non-side-effect statements need to be
recorded during statement expressions. */
if (!building_stmt_list_p ())
push_stmt_list ();
append_to_statement_list_force (t, &cur_stmt_list);
return t;
}
/* Build a pointer type using the default pointer mode. */
static tree
c_build_pointer_type (tree to_type)
{
addr_space_t as = to_type == error_mark_node? ADDR_SPACE_GENERIC
: TYPE_ADDR_SPACE (to_type);
machine_mode pointer_mode;
if (as != ADDR_SPACE_GENERIC || c_default_pointer_mode == VOIDmode)
pointer_mode = targetm.addr_space.pointer_mode (as);
else
pointer_mode = c_default_pointer_mode;
return build_pointer_type_for_mode (to_type, pointer_mode, false);
}
/* Return true if we will want to say something if a goto statement
crosses DECL. */
static bool
decl_jump_unsafe (tree decl)
{
if (error_operand_p (decl))
return false;
/* Don't warn for compound literals. If a goto statement crosses
their initialization, it should cross also all the places where
the complit is used or where the complit address might be saved
into some variable, so code after the label to which goto jumps
should not be able to refer to the compound literal. */
if (VAR_P (decl) && C_DECL_COMPOUND_LITERAL_P (decl))
return false;
/* Always warn about crossing variably modified types. */
if ((VAR_P (decl) || TREE_CODE (decl) == TYPE_DECL)
&& variably_modified_type_p (TREE_TYPE (decl), NULL_TREE))
return true;
/* Otherwise, only warn if -Wgoto-misses-init and this is an
initialized automatic decl. */
if (warn_jump_misses_init
&& VAR_P (decl)
&& !TREE_STATIC (decl)
&& DECL_INITIAL (decl) != NULL_TREE)
return true;
return false;
}
void
c_print_identifier (FILE *file, tree node, int indent)
{
void (*save) (enum c_oracle_request, tree identifier);
/* Temporarily hide any binding oracle. Without this, calls to
debug_tree from the debugger will end up calling into the oracle,
making for a confusing debug session. As the oracle isn't needed
here for normal operation, it's simplest to suppress it. */
save = c_binding_oracle;
c_binding_oracle = NULL;
print_node (file, "symbol", I_SYMBOL_DECL (node), indent + 4);
print_node (file, "tag", I_TAG_DECL (node), indent + 4);
print_node (file, "label", I_LABEL_DECL (node), indent + 4);
if (C_IS_RESERVED_WORD (node) && C_RID_CODE (node) != RID_CXX_COMPAT_WARN)
{
tree rid = ridpointers[C_RID_CODE (node)];
indent_to (file, indent + 4);
fprintf (file, "rid " HOST_PTR_PRINTF " \"%s\"",
(void *) rid, IDENTIFIER_POINTER (rid));
}
c_binding_oracle = save;
}
/* Establish a binding between NAME, an IDENTIFIER_NODE, and DECL,
which may be any of several kinds of DECL or TYPE or error_mark_node,
in the scope SCOPE. */
static void
bind (tree name, tree decl, struct c_scope *scope, bool invisible,
bool nested, location_t locus)
{
struct c_binding *b, **here;
if (binding_freelist)
{
b = binding_freelist;
binding_freelist = b->prev;
}
else
b = ggc_alloc<c_binding> ();
b->shadowed = 0;
b->decl = decl;
b->id = name;
b->depth = scope->depth;
b->invisible = invisible;
b->nested = nested;
b->inner_comp = 0;
b->in_struct = 0;
b->locus = locus;
b->u.type = NULL;
b->prev = scope->bindings;
scope->bindings = b;
if (decl_jump_unsafe (decl))
scope->has_jump_unsafe_decl = 1;
if (!name)
return;
switch (TREE_CODE (decl))
{
case LABEL_DECL: here = &I_LABEL_BINDING (name); break;
case ENUMERAL_TYPE:
case UNION_TYPE:
case RECORD_TYPE: here = &I_TAG_BINDING (name); break;
case VAR_DECL:
case FUNCTION_DECL:
case TYPE_DECL:
case CONST_DECL:
case PARM_DECL:
case ERROR_MARK: here = &I_SYMBOL_BINDING (name); break;
default:
gcc_unreachable ();
}
/* Locate the appropriate place in the chain of shadowed decls
to insert this binding. Normally, scope == current_scope and
this does nothing. */
while (*here && (*here)->depth > scope->depth)
here = &(*here)->shadowed;
b->shadowed = *here;
*here = b;
}
/* Clear the binding structure B, stick it on the binding_freelist,
and return the former value of b->prev. This is used by pop_scope
and get_parm_info to iterate destructively over all the bindings
from a given scope. */
static struct c_binding *
free_binding_and_advance (struct c_binding *b)
{
struct c_binding *prev = b->prev;
memset (b, 0, sizeof (struct c_binding));
b->prev = binding_freelist;
binding_freelist = b;
return prev;
}
/* Bind a label. Like bind, but skip fields which aren't used for
labels, and add the LABEL_VARS value. */
static void
bind_label (tree name, tree label, struct c_scope *scope,
struct c_label_vars *label_vars)
{
struct c_binding *b;
bind (name, label, scope, /*invisible=*/false, /*nested=*/false,
UNKNOWN_LOCATION);
scope->has_label_bindings = true;
b = scope->bindings;
gcc_assert (b->decl == label);
label_vars->shadowed = b->u.label;
b->u.label = label_vars;
}
/* Hook called at end of compilation to assume 1 elt
for a file-scope tentative array defn that wasn't complete before. */
void
c_finish_incomplete_decl (tree decl)
{
if (VAR_P (decl))
{
tree type = TREE_TYPE (decl);
if (type != error_mark_node
&& TREE_CODE (type) == ARRAY_TYPE
&& !DECL_EXTERNAL (decl)
&& TYPE_DOMAIN (type) == NULL_TREE)
{
warning_at (DECL_SOURCE_LOCATION (decl),
0, "array %q+D assumed to have one element", decl);
complete_array_type (&TREE_TYPE (decl), NULL_TREE, true);
relayout_decl (decl);
}
}
}
/* Record that inline function FUNC contains a reference (location
LOC) to static DECL (file-scope or function-local according to
TYPE). */
void
record_inline_static (location_t loc, tree func, tree decl,
enum c_inline_static_type type)
{
c_inline_static *csi = ggc_alloc<c_inline_static> ();
csi->location = loc;
csi->function = func;
csi->static_decl = decl;
csi->type = type;
csi->next = c_inline_statics;
c_inline_statics = csi;
}
/* Check for references to static declarations in inline functions at
the end of the translation unit and diagnose them if the functions
are still inline definitions. */
static void
check_inline_statics (void)
{
struct c_inline_static *csi;
for (csi = c_inline_statics; csi; csi = csi->next)
{
if (DECL_EXTERNAL (csi->function))
switch (csi->type)
{
case csi_internal:
pedwarn (csi->location, 0,
"%qD is static but used in inline function %qD "
"which is not static", csi->static_decl, csi->function);
break;
case csi_modifiable:
pedwarn (csi->location, 0,
"%q+D is static but declared in inline function %qD "
"which is not static", csi->static_decl, csi->function);
break;
default:
gcc_unreachable ();
}
}
c_inline_statics = NULL;
}
/* Fill in a c_spot_bindings structure. If DEFINING is true, set it
for the current state, otherwise set it to uninitialized. */
static void
set_spot_bindings (struct c_spot_bindings *p, bool defining)
{
if (defining)
{
p->scope = current_scope;
p->bindings_in_scope = current_scope->bindings;
}
else
{
p->scope = NULL;
p->bindings_in_scope = NULL;
}
p->stmt_exprs = 0;
p->left_stmt_expr = false;
}
/* Update spot bindings P as we pop out of SCOPE. Return true if we
should push decls for a label. */
static bool
update_spot_bindings (struct c_scope *scope, struct c_spot_bindings *p)
{
if (p->scope != scope)
{
/* This label or goto is defined in some other scope, or it is a
label which is not yet defined. There is nothing to
update. */
return false;
}
/* Adjust the spot bindings to refer to the bindings already defined
in the enclosing scope. */
p->scope = scope->outer;
p->bindings_in_scope = p->scope->bindings;
return true;
}
/* The Objective-C front-end often needs to determine the current scope. */
void *
objc_get_current_scope (void)
{
return current_scope;
}
/* The following function is used only by Objective-C. It needs to live here
because it accesses the innards of c_scope. */
void
objc_mark_locals_volatile (void *enclosing_blk)
{
struct c_scope *scope;
struct c_binding *b;
for (scope = current_scope;
scope && scope != enclosing_blk;
scope = scope->outer)
{
for (b = scope->bindings; b; b = b->prev)
objc_volatilize_decl (b->decl);
/* Do not climb up past the current function. */
if (scope->function_body)
break;
}
}
/* Return true if we are in the global binding level. */
bool
global_bindings_p (void)
{
return current_scope == file_scope;
}
/* Return true if we're declaring parameters in an old-style function
declaration. */
bool
old_style_parameter_scope (void)
{
/* If processing parameters and there is no function statement list, we
* have an old-style function declaration. */
return (current_scope->parm_flag && !DECL_SAVED_TREE (current_function_decl));
}
void
keep_next_level (void)
{
keep_next_level_flag = true;
}
/* Set the flag for the FLOAT_CONST_DECIMAL64 pragma being ON. */
void
set_float_const_decimal64 (void)
{
current_scope->float_const_decimal64 = true;
}
/* Clear the flag for the FLOAT_CONST_DECIMAL64 pragma. */
void
clear_float_const_decimal64 (void)
{
current_scope->float_const_decimal64 = false;
}
/* Return nonzero if an unsuffixed float constant is _Decimal64. */
bool
float_const_decimal64_p (void)
{
return current_scope->float_const_decimal64;
}
/* Identify this scope as currently being filled with parameters. */
void
declare_parm_level (void)
{
current_scope->parm_flag = true;
}
void
push_scope (void)
{
if (next_is_function_body)
{
/* This is the transition from the parameters to the top level
of the function body. These are the same scope
(C99 6.2.1p4,6) so we do not push another scope structure.
next_is_function_body is set only by store_parm_decls, which
in turn is called when and only when we are about to
encounter the opening curly brace for the function body.
The outermost block of a function always gets a BLOCK node,
because the debugging output routines expect that each
function has at least one BLOCK. */
current_scope->parm_flag = false;
current_scope->function_body = true;
current_scope->keep = true;
current_scope->outer_function = current_function_scope;
current_function_scope = current_scope;
keep_next_level_flag = false;
next_is_function_body = false;
/* The FLOAT_CONST_DECIMAL64 pragma applies to nested scopes. */
if (current_scope->outer)
current_scope->float_const_decimal64
= current_scope->outer->float_const_decimal64;
else
current_scope->float_const_decimal64 = false;
}
else
{
struct c_scope *scope;
if (scope_freelist)
{
scope = scope_freelist;
scope_freelist = scope->outer;
}
else
scope = ggc_cleared_alloc<c_scope> ();
/* The FLOAT_CONST_DECIMAL64 pragma applies to nested scopes. */
if (current_scope)
scope->float_const_decimal64 = current_scope->float_const_decimal64;
else
scope->float_const_decimal64 = false;
scope->keep = keep_next_level_flag;
scope->outer = current_scope;
scope->depth = current_scope ? (current_scope->depth + 1) : 0;
/* Check for scope depth overflow. Unlikely (2^28 == 268,435,456) but
possible. */
if (current_scope && scope->depth == 0)
{
scope->depth--;
sorry ("GCC supports only %u nested scopes", scope->depth);
}
current_scope = scope;
keep_next_level_flag = false;
}
}
/* This is called when we are leaving SCOPE. For each label defined
in SCOPE, add any appropriate decls to its decls_in_scope fields.
These are the decls whose initialization will be skipped by a goto
later in the function. */
static void
update_label_decls (struct c_scope *scope)
{
struct c_scope *s;
s = scope;
while (s != NULL)
{
if (s->has_label_bindings)
{
struct c_binding *b;
for (b = s->bindings; b != NULL; b = b->prev)
{
struct c_label_vars *label_vars;
struct c_binding *b1;
bool hjud;
unsigned int ix;
struct c_goto_bindings *g;
if (TREE_CODE (b->decl) != LABEL_DECL)
continue;
label_vars = b->u.label;
b1 = label_vars->label_bindings.bindings_in_scope;
if (label_vars->label_bindings.scope == NULL)
hjud = false;
else
hjud = label_vars->label_bindings.scope->has_jump_unsafe_decl;
if (update_spot_bindings (scope, &label_vars->label_bindings))
{
/* This label is defined in this scope. */
if (hjud)
{
for (; b1 != NULL; b1 = b1->prev)
{
/* A goto from later in the function to this
label will never see the initialization
of B1, if any. Save it to issue a
warning if needed. */
if (decl_jump_unsafe (b1->decl))
vec_safe_push(label_vars->decls_in_scope, b1->decl);
}
}
}
/* Update the bindings of any goto statements associated
with this label. */
FOR_EACH_VEC_SAFE_ELT (label_vars->gotos, ix, g)
update_spot_bindings (scope, &g->goto_bindings);
}
}
/* Don't search beyond the current function. */
if (s == current_function_scope)
break;
s = s->outer;
}
}
/* Set the TYPE_CONTEXT of all of TYPE's variants to CONTEXT. */
static void
set_type_context (tree type, tree context)
{
for (type = TYPE_MAIN_VARIANT (type); type;
type = TYPE_NEXT_VARIANT (type))
TYPE_CONTEXT (type) = context;
}
/* Exit a scope. Restore the state of the identifier-decl mappings
that were in effect when this scope was entered. Return a BLOCK
node containing all the DECLs in this scope that are of interest
to debug info generation. */
tree
pop_scope (void)
{
struct c_scope *scope = current_scope;
tree block, context, p;
struct c_binding *b;
bool functionbody = scope->function_body;
bool keep = functionbody || scope->keep || scope->bindings;
update_label_decls (scope);
/* If appropriate, create a BLOCK to record the decls for the life
of this function. */
block = NULL_TREE;
if (keep)
{
block = make_node (BLOCK);
BLOCK_SUBBLOCKS (block) = scope->blocks;
TREE_USED (block) = 1;
/* In each subblock, record that this is its superior. */
for (p = scope->blocks; p; p = BLOCK_CHAIN (p))
BLOCK_SUPERCONTEXT (p) = block;
BLOCK_VARS (block) = NULL_TREE;
}
/* The TYPE_CONTEXTs for all of the tagged types belonging to this
scope must be set so that they point to the appropriate
construct, i.e. either to the current FUNCTION_DECL node, or
else to the BLOCK node we just constructed.
Note that for tagged types whose scope is just the formal
parameter list for some function type specification, we can't
properly set their TYPE_CONTEXTs here, because we don't have a
pointer to the appropriate FUNCTION_TYPE node readily available
to us. For those cases, the TYPE_CONTEXTs of the relevant tagged
type nodes get set in `grokdeclarator' as soon as we have created
the FUNCTION_TYPE node which will represent the "scope" for these
"parameter list local" tagged types. */
if (scope->function_body)
context = current_function_decl;
else if (scope == file_scope)
{
tree file_decl
= build_translation_unit_decl (get_identifier (main_input_filename));
context = file_decl;
debug_hooks->register_main_translation_unit (file_decl);
}
else
context = block;
/* Clear all bindings in this scope. */
for (b = scope->bindings; b; b = free_binding_and_advance (b))
{
p = b->decl;
switch (TREE_CODE (p))
{
case LABEL_DECL:
/* Warnings for unused labels, errors for undefined labels. */
if (TREE_USED (p) && !DECL_INITIAL (p))
{
error ("label %q+D used but not defined", p);
DECL_INITIAL (p) = error_mark_node;
}
else
warn_for_unused_label (p);
/* Labels go in BLOCK_VARS. */
DECL_CHAIN (p) = BLOCK_VARS (block);
BLOCK_VARS (block) = p;
gcc_assert (I_LABEL_BINDING (b->id) == b);
I_LABEL_BINDING (b->id) = b->shadowed;
/* Also pop back to the shadowed label_vars. */
release_tree_vector (b->u.label->decls_in_scope);
b->u.label = b->u.label->shadowed;
break;
case ENUMERAL_TYPE:
case UNION_TYPE:
case RECORD_TYPE:
set_type_context (p, context);
/* Types may not have tag-names, in which case the type
appears in the bindings list with b->id NULL. */
if (b->id)
{
gcc_assert (I_TAG_BINDING (b->id) == b);
I_TAG_BINDING (b->id) = b->shadowed;
}
break;
case FUNCTION_DECL:
/* Propagate TREE_ADDRESSABLE from nested functions to their
containing functions. */
if (!TREE_ASM_WRITTEN (p)
&& DECL_INITIAL (p) != NULL_TREE
&& TREE_ADDRESSABLE (p)
&& DECL_ABSTRACT_ORIGIN (p) != NULL_TREE
&& DECL_ABSTRACT_ORIGIN (p) != p)
TREE_ADDRESSABLE (DECL_ABSTRACT_ORIGIN (p)) = 1;
if (!TREE_PUBLIC (p)
&& !DECL_INITIAL (p)
&& !b->nested
&& scope != file_scope
&& scope != external_scope)
{
error ("nested function %q+D declared but never defined", p);
undef_nested_function = true;
}
else if (DECL_DECLARED_INLINE_P (p)
&& TREE_PUBLIC (p)
&& !DECL_INITIAL (p))
{
/* C99 6.7.4p6: "a function with external linkage... declared
with an inline function specifier ... shall also be defined
in the same translation unit." */
if (!flag_gnu89_inline
&& !lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (p))
&& scope == external_scope)
pedwarn (input_location, 0,
"inline function %q+D declared but never defined", p);
DECL_EXTERNAL (p) = 1;
}
goto common_symbol;
case VAR_DECL:
/* Warnings for unused variables. */
if ((!TREE_USED (p) || !DECL_READ_P (p))
&& !TREE_NO_WARNING (p)
&& !DECL_IN_SYSTEM_HEADER (p)
&& DECL_NAME (p)
&& !DECL_ARTIFICIAL (p)
&& scope != file_scope
&& scope != external_scope)
{
if (!TREE_USED (p))
warning (OPT_Wunused_variable, "unused variable %q+D", p);
else if (DECL_CONTEXT (p) == current_function_decl)
warning_at (DECL_SOURCE_LOCATION (p),
OPT_Wunused_but_set_variable,
"variable %qD set but not used", p);
}
if (b->inner_comp)
{
error ("type of array %q+D completed incompatibly with"
" implicit initialization", p);
}
/* Fall through. */
case TYPE_DECL:
case CONST_DECL:
common_symbol:
/* All of these go in BLOCK_VARS, but only if this is the
binding in the home scope. */
if (!b->nested)
{
DECL_CHAIN (p) = BLOCK_VARS (block);
BLOCK_VARS (block) = p;
}
else if (VAR_OR_FUNCTION_DECL_P (p) && scope != file_scope)
{
/* For block local externs add a special
DECL_EXTERNAL decl for debug info generation. */
tree extp = copy_node (p);
DECL_EXTERNAL (extp) = 1;
TREE_STATIC (extp) = 0;
TREE_PUBLIC (extp) = 1;
DECL_INITIAL (extp) = NULL_TREE;
DECL_LANG_SPECIFIC (extp) = NULL;
DECL_CONTEXT (extp) = current_function_decl;
if (TREE_CODE (p) == FUNCTION_DECL)
{
DECL_RESULT (extp) = NULL_TREE;
DECL_SAVED_TREE (extp) = NULL_TREE;
DECL_STRUCT_FUNCTION (extp) = NULL;
}
if (b->locus != UNKNOWN_LOCATION)
DECL_SOURCE_LOCATION (extp) = b->locus;
DECL_CHAIN (extp) = BLOCK_VARS (block);
BLOCK_VARS (block) = extp;
}
/* If this is the file scope set DECL_CONTEXT of each decl to
the TRANSLATION_UNIT_DECL. This makes same_translation_unit_p
work. */
if (scope == file_scope)
{
DECL_CONTEXT (p) = context;
if (TREE_CODE (p) == TYPE_DECL
&& TREE_TYPE (p) != error_mark_node)
set_type_context (TREE_TYPE (p), context);
}
gcc_fallthrough ();
/* Parameters go in DECL_ARGUMENTS, not BLOCK_VARS, and have
already been put there by store_parm_decls. Unused-
parameter warnings are handled by function.c.
error_mark_node obviously does not go in BLOCK_VARS and
does not get unused-variable warnings. */
case PARM_DECL:
case ERROR_MARK:
/* It is possible for a decl not to have a name. We get
here with b->id NULL in this case. */
if (b->id)
{
gcc_assert (I_SYMBOL_BINDING (b->id) == b);
I_SYMBOL_BINDING (b->id) = b->shadowed;
if (b->shadowed && b->shadowed->u.type)
TREE_TYPE (b->shadowed->decl) = b->shadowed->u.type;
}
break;
default:
gcc_unreachable ();
}
}
/* Dispose of the block that we just made inside some higher level. */
if ((scope->function_body || scope == file_scope) && context)
{
DECL_INITIAL (context) = block;
BLOCK_SUPERCONTEXT (block) = context;
}
else if (scope->outer)
{
if (block)
SCOPE_LIST_APPEND (scope->outer, blocks, block);
/* If we did not make a block for the scope just exited, any
blocks made for inner scopes must be carried forward so they
will later become subblocks of something else. */
else if (scope->blocks)
SCOPE_LIST_CONCAT (scope->outer, blocks, scope, blocks);
}
/* Pop the current scope, and free the structure for reuse. */
current_scope = scope->outer;
if (scope->function_body)
current_function_scope = scope->outer_function;
memset (scope, 0, sizeof (struct c_scope));
scope->outer = scope_freelist;
scope_freelist = scope;
return block;
}
void
push_file_scope (void)
{
tree decl;
if (file_scope)
return;
push_scope ();
file_scope = current_scope;
start_fname_decls ();
for (decl = visible_builtins; decl; decl = DECL_CHAIN (decl))
bind (DECL_NAME (decl), decl, file_scope,
/*invisible=*/false, /*nested=*/true, DECL_SOURCE_LOCATION (decl));
}
void
pop_file_scope (void)
{
/* In case there were missing closebraces, get us back to the global
binding level. */
while (current_scope != file_scope)
pop_scope ();
/* __FUNCTION__ is defined at file scope (""). This
call may not be necessary as my tests indicate it
still works without it. */
finish_fname_decls ();
check_inline_statics ();
/* This is the point to write out a PCH if we're doing that.
In that case we do not want to do anything else. */
if (pch_file)
{
c_common_write_pch ();
/* Ensure even the callers don't try to finalize the CU. */
flag_syntax_only = 1;
return;
}
/* Pop off the file scope and close this translation unit. */
pop_scope ();
file_scope = 0;
maybe_apply_pending_pragma_weaks ();
}
/* Adjust the bindings for the start of a statement expression. */
void
c_bindings_start_stmt_expr (struct c_spot_bindings* switch_bindings)
{
struct c_scope *scope;
for (scope = current_scope; scope != NULL; scope = scope->outer)
{
struct c_binding *b;
if (!scope->has_label_bindings)
continue;
for (b = scope->bindings; b != NULL; b = b->prev)
{
struct c_label_vars *label_vars;
unsigned int ix;
struct c_goto_bindings *g;
if (TREE_CODE (b->decl) != LABEL_DECL)
continue;
label_vars = b->u.label;
++label_vars->label_bindings.stmt_exprs;
FOR_EACH_VEC_SAFE_ELT (label_vars->gotos, ix, g)
++g->goto_bindings.stmt_exprs;
}
}
if (switch_bindings != NULL)
++switch_bindings->stmt_exprs;
}
/* Adjust the bindings for the end of a statement expression. */
void
c_bindings_end_stmt_expr (struct c_spot_bindings *switch_bindings)
{
struct c_scope *scope;
for (scope = current_scope; scope != NULL; scope = scope->outer)
{
struct c_binding *b;
if (!scope->has_label_bindings)
continue;
for (b = scope->bindings; b != NULL; b = b->prev)
{
struct c_label_vars *label_vars;
unsigned int ix;
struct c_goto_bindings *g;
if (TREE_CODE (b->decl) != LABEL_DECL)
continue;
label_vars = b->u.label;
--label_vars->label_bindings.stmt_exprs;
if (label_vars->label_bindings.stmt_exprs < 0)
{
label_vars->label_bindings.left_stmt_expr = true;
label_vars->label_bindings.stmt_exprs = 0;
}
FOR_EACH_VEC_SAFE_ELT (label_vars->gotos, ix, g)
{
--g->goto_bindings.stmt_exprs;
if (g->goto_bindings.stmt_exprs < 0)
{
g->goto_bindings.left_stmt_expr = true;
g->goto_bindings.stmt_exprs = 0;
}
}
}
}
if (switch_bindings != NULL)
{
--switch_bindings->stmt_exprs;
gcc_assert (switch_bindings->stmt_exprs >= 0);
}
}
/* Push a definition or a declaration of struct, union or enum tag "name".
"type" should be the type node.
We assume that the tag "name" is not already defined, and has a location
of LOC.
Note that the definition may really be just a forward reference.
In that case, the TYPE_SIZE will be zero. */
static void
pushtag (location_t loc, tree name, tree type)
{
/* Record the identifier as the type's name if it has none. */
if (name && !TYPE_NAME (type))
TYPE_NAME (type) = name;
bind (name, type, current_scope, /*invisible=*/false, /*nested=*/false, loc);
/* Create a fake NULL-named TYPE_DECL node whose TREE_TYPE will be the
tagged type we just added to the current scope. This fake
NULL-named TYPE_DECL node helps dwarfout.c to know when it needs
to output a representation of a tagged type, and it also gives
us a convenient place to record the "scope start" address for the
tagged type. */
TYPE_STUB_DECL (type) = pushdecl (build_decl (loc,
TYPE_DECL, NULL_TREE, type));
/* An approximation for now, so we can tell this is a function-scope tag.
This will be updated in pop_scope. */
TYPE_CONTEXT (type) = DECL_CONTEXT (TYPE_STUB_DECL (type));
if (warn_cxx_compat && name != NULL_TREE)
{
struct c_binding *b = I_SYMBOL_BINDING (name);
if (b != NULL
&& b->decl != NULL_TREE
&& TREE_CODE (b->decl) == TYPE_DECL
&& (B_IN_CURRENT_SCOPE (b)
|| (current_scope == file_scope && B_IN_EXTERNAL_SCOPE (b)))
&& (TYPE_MAIN_VARIANT (TREE_TYPE (b->decl))
!= TYPE_MAIN_VARIANT (type)))
{
auto_diagnostic_group d;
if (warning_at (loc, OPT_Wc___compat,
("using %qD as both a typedef and a tag is "
"invalid in C++"), b->decl)
&& b->locus != UNKNOWN_LOCATION)
inform (b->locus, "originally defined here");
}
}
}
/* An exported interface to pushtag. This is used by the gdb plugin's
binding oracle to introduce a new tag binding. */
void
c_pushtag (location_t loc, tree name, tree type)
{
pushtag (loc, name, type);
}
/* An exported interface to bind a declaration. LOC is the location
to use. DECL is the declaration to bind. The decl's name is used
to determine how it is bound. If DECL is a VAR_DECL, then
IS_GLOBAL determines whether the decl is put into the global (file
and external) scope or the current function's scope; if DECL is not
a VAR_DECL then it is always put into the file scope. */
void
c_bind (location_t loc, tree decl, bool is_global)
{
struct c_scope *scope;
bool nested = false;
if (!VAR_P (decl) || current_function_scope == NULL)
{
/* Types and functions are always considered to be global. */
scope = file_scope;
DECL_EXTERNAL (decl) = 1;
TREE_PUBLIC (decl) = 1;
}
else if (is_global)
{
/* Also bind it into the external scope. */
bind (DECL_NAME (decl), decl, external_scope, true, false, loc);
nested = true;
scope = file_scope;
DECL_EXTERNAL (decl) = 1;
TREE_PUBLIC (decl) = 1;
}
else
{
DECL_CONTEXT (decl) = current_function_decl;
TREE_PUBLIC (decl) = 0;
scope = current_function_scope;
}
bind (DECL_NAME (decl), decl, scope, false, nested, loc);
}
/* Stores the first FILE*, const struct tm* etc. argument type (whatever
it is) seen in a declaration of a file I/O etc. built-in, corresponding
to the builtin_structptr_types array. Subsequent declarations of such
built-ins are expected to refer to it rather than to fileptr_type_node,
etc. which is just void* (or to any other type).
Used only by match_builtin_function_types. */
static const unsigned builtin_structptr_type_count
= sizeof builtin_structptr_types / sizeof builtin_structptr_types[0];
static GTY(()) tree last_structptr_types[builtin_structptr_type_count];
/* Returns true if types T1 and T2 representing return types or types
of function arguments are close enough to be considered interchangeable
in redeclarations of built-in functions. */
static bool
types_close_enough_to_match (tree t1, tree t2)
{
return (TYPE_MODE (t1) == TYPE_MODE (t2)
&& POINTER_TYPE_P (t1) == POINTER_TYPE_P (t2)
&& FUNCTION_POINTER_TYPE_P (t1) == FUNCTION_POINTER_TYPE_P (t2));
}
/* Subroutine of compare_decls. Allow harmless mismatches in return
and argument types provided that the type modes match. Set *STRICT
and *ARGNO to the expected argument type and number in case of
an argument type mismatch or null and zero otherwise. Return
a unified type given a suitable match, and 0 otherwise. */
static tree
match_builtin_function_types (tree newtype, tree oldtype,
tree *strict, unsigned *argno)
{
*argno = 0;
*strict = NULL_TREE;
/* Accept the return type of the new declaration if it has the same
mode and if they're both pointers or if neither is. */
tree oldrettype = TREE_TYPE (oldtype);
tree newrettype = TREE_TYPE (newtype);
if (!types_close_enough_to_match (oldrettype, newrettype))
return NULL_TREE;
/* Check that the return types are compatible but don't fail if they
are not (e.g., int vs long in ILP32) and just let the caller know. */
if (!comptypes (TYPE_MAIN_VARIANT (oldrettype),
TYPE_MAIN_VARIANT (newrettype)))
*strict = oldrettype;
tree oldargs = TYPE_ARG_TYPES (oldtype);
tree newargs = TYPE_ARG_TYPES (newtype);
tree tryargs = newargs;
const unsigned nlst
= sizeof last_structptr_types / sizeof last_structptr_types[0];
const unsigned nbst
= sizeof builtin_structptr_types / sizeof builtin_structptr_types[0];
gcc_checking_assert (nlst == nbst);
for (unsigned i = 1; oldargs || newargs; ++i)
{
if (!oldargs
|| !newargs
|| !TREE_VALUE (oldargs)
|| !TREE_VALUE (newargs))
return NULL_TREE;
tree oldtype = TYPE_MAIN_VARIANT (TREE_VALUE (oldargs));
tree newtype = TREE_VALUE (newargs);
if (newtype == error_mark_node)
return NULL_TREE;
newtype = TYPE_MAIN_VARIANT (newtype);
if (!types_close_enough_to_match (oldtype, newtype))
return NULL_TREE;
unsigned j = nbst;
if (POINTER_TYPE_P (oldtype))
/* Iterate over well-known struct types like FILE (whose types
aren't known to us) and compare the pointer to each to
the pointer argument. */
for (j = 0; j < nbst; ++j)
{
if (TREE_VALUE (oldargs) != builtin_structptr_types[j].node)
continue;
/* Store the first FILE* etc. argument type (whatever it is), and
expect any subsequent declarations of file I/O etc. built-ins
to refer to it rather than to fileptr_type_node etc. which is
just void* (or const void*). */
if (last_structptr_types[j])
{
if (!comptypes (last_structptr_types[j], newtype))
{
*argno = i;
*strict = last_structptr_types[j];
}
}
else
last_structptr_types[j] = newtype;
break;
}
if (j == nbst && !comptypes (oldtype, newtype))
{
if (POINTER_TYPE_P (oldtype))
{
/* For incompatible pointers, only reject differences in
the unqualified variants of the referenced types but
consider differences in qualifiers as benign (report
those to caller via *STRICT below). */
tree oldref = TYPE_MAIN_VARIANT (TREE_TYPE (oldtype));
tree newref = TYPE_MAIN_VARIANT (TREE_TYPE (newtype));
if (!comptypes (oldref, newref))
return NULL_TREE;
}
if (!*strict)
{
*argno = i;
*strict = oldtype;
}
}
oldargs = TREE_CHAIN (oldargs);
newargs = TREE_CHAIN (newargs);
}
tree trytype = build_function_type (newrettype, tryargs);
/* Allow declaration to change transaction_safe attribute. */
tree oldattrs = TYPE_ATTRIBUTES (oldtype);
tree oldtsafe = lookup_attribute ("transaction_safe", oldattrs);
tree newattrs = TYPE_ATTRIBUTES (newtype);
tree newtsafe = lookup_attribute ("transaction_safe", newattrs);
if (oldtsafe && !newtsafe)
oldattrs = remove_attribute ("transaction_safe", oldattrs);
else if (newtsafe && !oldtsafe)
oldattrs = tree_cons (get_identifier ("transaction_safe"),
NULL_TREE, oldattrs);
return build_type_attribute_variant (trytype, oldattrs);
}
/* Subroutine of diagnose_mismatched_decls. Check for function type
mismatch involving an empty arglist vs a nonempty one and give clearer
diagnostics. */
static void
diagnose_arglist_conflict (tree newdecl, tree olddecl,
tree newtype, tree oldtype)
{
tree t;
if (TREE_CODE (olddecl) != FUNCTION_DECL
|| !comptypes (TREE_TYPE (oldtype), TREE_TYPE (newtype))
|| !((!prototype_p (oldtype) && DECL_INITIAL (olddecl) == NULL_TREE)
|| (!prototype_p (newtype) && DECL_INITIAL (newdecl) == NULL_TREE)))
return;
t = TYPE_ARG_TYPES (oldtype);
if (t == NULL_TREE)
t = TYPE_ARG_TYPES (newtype);
for (; t; t = TREE_CHAIN (t))
{
tree type = TREE_VALUE (t);
if (TREE_CHAIN (t) == NULL_TREE
&& TYPE_MAIN_VARIANT (type) != void_type_node)
{
inform (input_location, "a parameter list with an ellipsis "
"cannot match an empty parameter name list declaration");
break;
}
if (c_type_promotes_to (type) != type)
{
inform (input_location, "an argument type that has a default "
"promotion cannot match an empty parameter name list "
"declaration");
break;
}
}
}
/* Another subroutine of diagnose_mismatched_decls. OLDDECL is an
old-style function definition, NEWDECL is a prototype declaration.
Diagnose inconsistencies in the argument list. Returns TRUE if
the prototype is compatible, FALSE if not. */
static bool
validate_proto_after_old_defn (tree newdecl, tree newtype, tree oldtype)
{
tree newargs, oldargs;
int i;
#define END_OF_ARGLIST(t) ((t) == void_type_node)
oldargs = TYPE_ACTUAL_ARG_TYPES (oldtype);
newargs = TYPE_ARG_TYPES (newtype);
i = 1;
for (;;)
{
tree oldargtype = TREE_VALUE (oldargs);
tree newargtype = TREE_VALUE (newargs);
if (oldargtype == error_mark_node || newargtype == error_mark_node)
return false;
oldargtype = (TYPE_ATOMIC (oldargtype)
? c_build_qualified_type (TYPE_MAIN_VARIANT (oldargtype),
TYPE_QUAL_ATOMIC)
: TYPE_MAIN_VARIANT (oldargtype));
newargtype = (TYPE_ATOMIC (newargtype)
? c_build_qualified_type (TYPE_MAIN_VARIANT (newargtype),
TYPE_QUAL_ATOMIC)
: TYPE_MAIN_VARIANT (newargtype));
if (END_OF_ARGLIST (oldargtype) && END_OF_ARGLIST (newargtype))
break;
/* Reaching the end of just one list means the two decls don't
agree on the number of arguments. */
if (END_OF_ARGLIST (oldargtype))
{
error ("prototype for %q+D declares more arguments "
"than previous old-style definition", newdecl);
return false;
}
else if (END_OF_ARGLIST (newargtype))
{
error ("prototype for %q+D declares fewer arguments "
"than previous old-style definition", newdecl);
return false;
}
/* Type for passing arg must be consistent with that declared
for the arg. */
else if (!comptypes (oldargtype, newargtype))
{
error ("prototype for %q+D declares argument %d"
" with incompatible type",
newdecl, i);
return false;
}
oldargs = TREE_CHAIN (oldargs);
newargs = TREE_CHAIN (newargs);
i++;
}
/* If we get here, no errors were found, but do issue a warning
for this poor-style construct. */
warning (0, "prototype for %q+D follows non-prototype definition",
newdecl);
return true;
#undef END_OF_ARGLIST
}
/* Subroutine of diagnose_mismatched_decls. Report the location of DECL,
first in a pair of mismatched declarations, using the diagnostic
function DIAG. */
static void
locate_old_decl (tree decl)
{
if (TREE_CODE (decl) == FUNCTION_DECL && fndecl_built_in_p (decl)
&& !C_DECL_DECLARED_BUILTIN (decl))
;
else if (DECL_INITIAL (decl))
inform (input_location, "previous definition of %q+D was here", decl);
else if (C_DECL_IMPLICIT (decl))
inform (input_location, "previous implicit declaration of %q+D was here", decl);
else
inform (input_location, "previous declaration of %q+D was here", decl);
}
/* Subroutine of duplicate_decls. Compare NEWDECL to OLDDECL.
Returns true if the caller should proceed to merge the two, false
if OLDDECL should simply be discarded. As a side effect, issues
all necessary diagnostics for invalid or poor-style combinations.
If it returns true, writes the types of NEWDECL and OLDDECL to
*NEWTYPEP and *OLDTYPEP - these may have been adjusted from
TREE_TYPE (NEWDECL, OLDDECL) respectively. */
static bool
diagnose_mismatched_decls (tree newdecl, tree olddecl,
tree *newtypep, tree *oldtypep)
{
tree newtype, oldtype;
bool retval = true;
#define DECL_EXTERN_INLINE(DECL) (DECL_DECLARED_INLINE_P (DECL) \
&& DECL_EXTERNAL (DECL))
/* If we have error_mark_node for either decl or type, just discard
the previous decl - we're in an error cascade already. */
if (olddecl == error_mark_node || newdecl == error_mark_node)
return false;
*oldtypep = oldtype = TREE_TYPE (olddecl);
*newtypep = newtype = TREE_TYPE (newdecl);
if (oldtype == error_mark_node || newtype == error_mark_node)
return false;
/* Two different categories of symbol altogether. This is an error
unless OLDDECL is a builtin. OLDDECL will be discarded in any case. */
if (TREE_CODE (olddecl) != TREE_CODE (newdecl))
{
if (!(TREE_CODE (olddecl) == FUNCTION_DECL
&& fndecl_built_in_p (olddecl)
&& !C_DECL_DECLARED_BUILTIN (olddecl)))
{
auto_diagnostic_group d;
error ("%q+D redeclared as different kind of symbol", newdecl);
locate_old_decl (olddecl);
}
else if (TREE_PUBLIC (newdecl))
warning (OPT_Wbuiltin_declaration_mismatch,
"built-in function %q+D declared as non-function",
newdecl);
else
warning (OPT_Wshadow, "declaration of %q+D shadows "
"a built-in function", newdecl);
return false;
}
/* Enumerators have no linkage, so may only be declared once in a
given scope. */
if (TREE_CODE (olddecl) == CONST_DECL)
{
auto_diagnostic_group d;
error ("redeclaration of enumerator %q+D", newdecl);
locate_old_decl (olddecl);
return false;
}
bool pedwarned = false;
bool warned = false;
auto_diagnostic_group d;
if (!comptypes (oldtype, newtype))
{
if (TREE_CODE (olddecl) == FUNCTION_DECL
&& fndecl_built_in_p (olddecl, BUILT_IN_NORMAL)
&& !C_DECL_DECLARED_BUILTIN (olddecl))
{
/* Accept "harmless" mismatches in function types such
as missing qualifiers or int vs long when they're the same
size. However, diagnose return and argument types that are
incompatible according to language rules. */
tree mismatch_expect;
unsigned mismatch_argno;
tree trytype = match_builtin_function_types (newtype, oldtype,
&mismatch_expect,
&mismatch_argno);
if (trytype && comptypes (newtype, trytype))
*oldtypep = oldtype = trytype;
else
{
/* If types don't match for a built-in, throw away the
built-in. No point in calling locate_old_decl here, it
won't print anything. */
const char *header = header_for_builtin_fn (olddecl);
location_t loc = DECL_SOURCE_LOCATION (newdecl);
if (warning_at (loc, OPT_Wbuiltin_declaration_mismatch,
"conflicting types for built-in function %q+D; "
"expected %qT",
newdecl, oldtype)
&& header)
{
/* Suggest the right header to include as the preferred
solution rather than the spelling of the declaration. */
rich_location richloc (line_table, loc);
maybe_add_include_fixit (&richloc, header, true);
inform (&richloc,
"%qD is declared in header %qs", olddecl, header);
}
return false;
}
if (mismatch_expect && extra_warnings)
{
location_t newloc = DECL_SOURCE_LOCATION (newdecl);
bool warned = false;
if (mismatch_argno)
warned = warning_at (newloc, OPT_Wbuiltin_declaration_mismatch,
"mismatch in argument %u type of built-in "
"function %qD; expected %qT",
mismatch_argno, newdecl, mismatch_expect);
else
warned = warning_at (newloc, OPT_Wbuiltin_declaration_mismatch,
"mismatch in return type of built-in "
"function %qD; expected %qT",
newdecl, mismatch_expect);
const char *header = header_for_builtin_fn (olddecl);
if (warned && header)
{
rich_location richloc (line_table, newloc);
maybe_add_include_fixit (&richloc, header, true);
inform (&richloc,
"%qD is declared in header %qs", olddecl, header);
}
}
}
else if (TREE_CODE (olddecl) == FUNCTION_DECL
&& DECL_IS_BUILTIN (olddecl))
{
/* A conflicting function declaration for a predeclared
function that isn't actually built in. Objective C uses
these. The new declaration silently overrides everything
but the volatility (i.e. noreturn) indication. See also
below. FIXME: Make Objective C use normal builtins. */
TREE_THIS_VOLATILE (newdecl) |= TREE_THIS_VOLATILE (olddecl);
return false;
}
/* Permit void foo (...) to match int foo (...) if the latter is
the definition and implicit int was used. See
c-torture/compile/920625-2.c. */
else if (TREE_CODE (newdecl) == FUNCTION_DECL && DECL_INITIAL (newdecl)
&& TYPE_MAIN_VARIANT (TREE_TYPE (oldtype)) == void_type_node
&& TYPE_MAIN_VARIANT (TREE_TYPE (newtype)) == integer_type_node
&& C_FUNCTION_IMPLICIT_INT (newdecl) && !DECL_INITIAL (olddecl))
{
pedwarned = pedwarn (input_location, 0,
"conflicting types for %q+D", newdecl);
/* Make sure we keep void as the return type. */
TREE_TYPE (newdecl) = *newtypep = newtype = oldtype;
C_FUNCTION_IMPLICIT_INT (newdecl) = 0;
}
/* Permit void foo (...) to match an earlier call to foo (...) with
no declared type (thus, implicitly int). */
else if (TREE_CODE (newdecl) == FUNCTION_DECL
&& TYPE_MAIN_VARIANT (TREE_TYPE (newtype)) == void_type_node
&& TYPE_MAIN_VARIANT (TREE_TYPE (oldtype)) == integer_type_node
&& C_DECL_IMPLICIT (olddecl) && !DECL_INITIAL (olddecl))
{
pedwarned = pedwarn (input_location, 0,
"conflicting types for %q+D", newdecl);
/* Make sure we keep void as the return type. */
TREE_TYPE (olddecl) = *oldtypep = oldtype = newtype;
}
else
{
int new_quals = TYPE_QUALS (newtype);
int old_quals = TYPE_QUALS (oldtype);
if (new_quals != old_quals)
{
addr_space_t new_addr = DECODE_QUAL_ADDR_SPACE (new_quals);
addr_space_t old_addr = DECODE_QUAL_ADDR_SPACE (old_quals);
if (new_addr != old_addr)
{
if (ADDR_SPACE_GENERIC_P (new_addr))
error ("conflicting named address spaces (generic vs %s) "
"for %q+D",
c_addr_space_name (old_addr), newdecl);
else if (ADDR_SPACE_GENERIC_P (old_addr))
error ("conflicting named address spaces (%s vs generic) "
"for %q+D",
c_addr_space_name (new_addr), newdecl);
else
error ("conflicting named address spaces (%s vs %s) "
"for %q+D",
c_addr_space_name (new_addr),
c_addr_space_name (old_addr),
newdecl);
}
if (CLEAR_QUAL_ADDR_SPACE (new_quals)
!= CLEAR_QUAL_ADDR_SPACE (old_quals))
error ("conflicting type qualifiers for %q+D", newdecl);
}
else
error ("conflicting types for %q+D", newdecl);
diagnose_arglist_conflict (newdecl, olddecl, newtype, oldtype);
locate_old_decl (olddecl);
return false;
}
}
/* Redeclaration of a type is a constraint violation (6.7.2.3p1),
but silently ignore the redeclaration if either is in a system
header. (Conflicting redeclarations were handled above.) This
is allowed for C11 if the types are the same, not just
compatible. */
if (TREE_CODE (newdecl) == TYPE_DECL)
{
bool types_different = false;
int comptypes_result;
comptypes_result
= comptypes_check_different_types (oldtype, newtype, &types_different);
if (comptypes_result != 1 || types_different)
{
error ("redefinition of typedef %q+D with different type", newdecl);
locate_old_decl (olddecl);
return false;
}
if (DECL_IN_SYSTEM_HEADER (newdecl)
|| DECL_IN_SYSTEM_HEADER (olddecl)
|| TREE_NO_WARNING (newdecl)
|| TREE_NO_WARNING (olddecl))
return true; /* Allow OLDDECL to continue in use. */
if (variably_modified_type_p (newtype, NULL))
{
error ("redefinition of typedef %q+D with variably modified type",
newdecl);
locate_old_decl (olddecl);
}
else if (pedwarn_c99 (input_location, OPT_Wpedantic,
"redefinition of typedef %q+D", newdecl))
locate_old_decl (olddecl);
return true;
}
/* Function declarations can either be 'static' or 'extern' (no
qualifier is equivalent to 'extern' - C99 6.2.2p5) and therefore
can never conflict with each other on account of linkage
(6.2.2p4). Multiple definitions are not allowed (6.9p3,5) but
gnu89 mode permits two definitions if one is 'extern inline' and
one is not. The non- extern-inline definition supersedes the
extern-inline definition. */
else if (TREE_CODE (newdecl) == FUNCTION_DECL)
{
/* If you declare a built-in function name as static, or
define the built-in with an old-style definition (so we
can't validate the argument list) the built-in definition is
overridden, but optionally warn this was a bad choice of name. */
if (fndecl_built_in_p (olddecl)
&& !C_DECL_DECLARED_BUILTIN (olddecl))
{
if (!TREE_PUBLIC (newdecl)
|| (DECL_INITIAL (newdecl)
&& !prototype_p (TREE_TYPE (newdecl))))
{
warning_at (DECL_SOURCE_LOCATION (newdecl),
OPT_Wshadow, "declaration of %qD shadows "
"a built-in function", newdecl);
/* Discard the old built-in function. */
return false;
}
if (!prototype_p (TREE_TYPE (newdecl)))
{
/* Set for built-ins that take no arguments. */
bool func_void_args = false;
if (tree at = TYPE_ARG_TYPES (oldtype))
func_void_args = VOID_TYPE_P (TREE_VALUE (at));
if (extra_warnings && !func_void_args)
warning_at (DECL_SOURCE_LOCATION (newdecl),
OPT_Wbuiltin_declaration_mismatch,
"declaration of built-in function %qD without "
"a prototype; expected %qT",
newdecl, TREE_TYPE (olddecl));
}
}
if (DECL_INITIAL (newdecl))
{
if (DECL_INITIAL (olddecl))
{
/* If both decls are in the same TU and the new declaration
isn't overriding an extern inline reject the new decl.
In c99, no overriding is allowed in the same translation
unit. */
if ((!DECL_EXTERN_INLINE (olddecl)
|| DECL_EXTERN_INLINE (newdecl)
|| (!flag_gnu89_inline
&& (!DECL_DECLARED_INLINE_P (olddecl)
|| !lookup_attribute ("gnu_inline",
DECL_ATTRIBUTES (olddecl)))
&& (!DECL_DECLARED_INLINE_P (newdecl)
|| !lookup_attribute ("gnu_inline",
DECL_ATTRIBUTES (newdecl))))
)
&& same_translation_unit_p (newdecl, olddecl))
{
auto_diagnostic_group d;
error ("redefinition of %q+D", newdecl);
locate_old_decl (olddecl);
return false;
}
}
}
/* If we have a prototype after an old-style function definition,
the argument types must be checked specially. */
else if (DECL_INITIAL (olddecl)
&& !prototype_p (oldtype) && prototype_p (newtype)
&& TYPE_ACTUAL_ARG_TYPES (oldtype))
{
auto_diagnostic_group d;
if (!validate_proto_after_old_defn (newdecl, newtype, oldtype))
{
locate_old_decl (olddecl);
return false;
}
}
/* A non-static declaration (even an "extern") followed by a
static declaration is undefined behavior per C99 6.2.2p3-5,7.
The same is true for a static forward declaration at block
scope followed by a non-static declaration/definition at file
scope. Static followed by non-static at the same scope is
not undefined behavior, and is the most convenient way to get
some effects (see e.g. what unwind-dw2-fde-glibc.c does to
the definition of _Unwind_Find_FDE in unwind-dw2-fde.c), but
we do diagnose it if -Wtraditional. */
if (TREE_PUBLIC (olddecl) && !TREE_PUBLIC (newdecl))
{
/* Two exceptions to the rule. If olddecl is an extern
inline, or a predeclared function that isn't actually
built in, newdecl silently overrides olddecl. The latter
occur only in Objective C; see also above. (FIXME: Make
Objective C use normal builtins.) */
if (!DECL_IS_BUILTIN (olddecl)
&& !DECL_EXTERN_INLINE (olddecl))
{
auto_diagnostic_group d;
error ("static declaration of %q+D follows "
"non-static declaration", newdecl);
locate_old_decl (olddecl);
}
return false;
}
else if (TREE_PUBLIC (newdecl) && !TREE_PUBLIC (olddecl))
{
if (DECL_CONTEXT (olddecl))
{
auto_diagnostic_group d;
error ("non-static declaration of %q+D follows "
"static declaration", newdecl);
locate_old_decl (olddecl);
return false;
}
else if (warn_traditional)
{
warned |= warning (OPT_Wtraditional,
"non-static declaration of %q+D "
"follows static declaration", newdecl);
}
}
/* Make sure gnu_inline attribute is either not present, or
present on all inline decls. */
if (DECL_DECLARED_INLINE_P (olddecl)
&& DECL_DECLARED_INLINE_P (newdecl))
{
bool newa = lookup_attribute ("gnu_inline",
DECL_ATTRIBUTES (newdecl)) != NULL;
bool olda = lookup_attribute ("gnu_inline",
DECL_ATTRIBUTES (olddecl)) != NULL;
if (newa != olda)
{
auto_diagnostic_group d;
error_at (input_location, "%<gnu_inline%> attribute present on %q+D",
newa ? newdecl : olddecl);
error_at (DECL_SOURCE_LOCATION (newa ? olddecl : newdecl),
"but not here");
}
}
}
else if (VAR_P (newdecl))
{
/* Only variables can be thread-local, and all declarations must
agree on this property. */
if (C_DECL_THREADPRIVATE_P (olddecl) && !DECL_THREAD_LOCAL_P (newdecl))
{
/* Nothing to check. Since OLDDECL is marked threadprivate
and NEWDECL does not have a thread-local attribute, we
will merge the threadprivate attribute into NEWDECL. */
;
}
else if (DECL_THREAD_LOCAL_P (newdecl) != DECL_THREAD_LOCAL_P (olddecl))
{
auto_diagnostic_group d;
if (DECL_THREAD_LOCAL_P (newdecl))
error ("thread-local declaration of %q+D follows "
"non-thread-local declaration", newdecl);
else
error ("non-thread-local declaration of %q+D follows "
"thread-local declaration", newdecl);
locate_old_decl (olddecl);
return false;
}
/* Multiple initialized definitions are not allowed (6.9p3,5). */
if (DECL_INITIAL (newdecl) && DECL_INITIAL (olddecl))
{
auto_diagnostic_group d;
error ("redefinition of %q+D", newdecl);
locate_old_decl (olddecl);
return false;
}
/* Objects declared at file scope: if the first declaration had
external linkage (even if it was an external reference) the
second must have external linkage as well, or the behavior is
undefined. If the first declaration had internal linkage, then
the second must too, or else be an external reference (in which
case the composite declaration still has internal linkage).
As for function declarations, we warn about the static-then-
extern case only for -Wtraditional. See generally 6.2.2p3-5,7. */
if (DECL_FILE_SCOPE_P (newdecl)
&& TREE_PUBLIC (newdecl) != TREE_PUBLIC (olddecl))
{
if (DECL_EXTERNAL (newdecl))
{
if (!DECL_FILE_SCOPE_P (olddecl))
{
auto_diagnostic_group d;
error ("extern declaration of %q+D follows "
"declaration with no linkage", newdecl);
locate_old_decl (olddecl);
return false;
}
else if (warn_traditional)
{
warned |= warning (OPT_Wtraditional,
"non-static declaration of %q+D "
"follows static declaration", newdecl);
}
}
else
{
auto_diagnostic_group d;
if (TREE_PUBLIC (newdecl))
error ("non-static declaration of %q+D follows "
"static declaration", newdecl);
else
error ("static declaration of %q+D follows "
"non-static declaration", newdecl);
locate_old_decl (olddecl);
return false;
}
}
/* Two objects with the same name declared at the same block
scope must both be external references (6.7p3). */
else if (!DECL_FILE_SCOPE_P (newdecl))
{
if (DECL_EXTERNAL (newdecl))
{
/* Extern with initializer at block scope, which will
already have received an error. */
}
else if (DECL_EXTERNAL (olddecl))
{
auto_diagnostic_group d;
error ("declaration of %q+D with no linkage follows "
"extern declaration", newdecl);
locate_old_decl (olddecl);
}
else
{
auto_diagnostic_group d;
error ("redeclaration of %q+D with no linkage", newdecl);
locate_old_decl (olddecl);
}
return false;
}
/* C++ does not permit a decl to appear multiple times at file
scope. */
if (warn_cxx_compat
&& DECL_FILE_SCOPE_P (newdecl)
&& !DECL_EXTERNAL (newdecl)
&& !DECL_EXTERNAL (olddecl))
warned |= warning_at (DECL_SOURCE_LOCATION (newdecl),
OPT_Wc___compat,
("duplicate declaration of %qD is "
"invalid in C++"),
newdecl);
}
/* warnings */
/* All decls must agree on a visibility. */
if (CODE_CONTAINS_STRUCT (TREE_CODE (newdecl), TS_DECL_WITH_VIS)
&& DECL_VISIBILITY_SPECIFIED (newdecl) && DECL_VISIBILITY_SPECIFIED (olddecl)
&& DECL_VISIBILITY (newdecl) != DECL_VISIBILITY (olddecl))
{
warned |= warning (0, "redeclaration of %q+D with different visibility "
"(old visibility preserved)", newdecl);
}
if (TREE_CODE (newdecl) == FUNCTION_DECL)
warned |= diagnose_mismatched_attributes (olddecl, newdecl);
else /* PARM_DECL, VAR_DECL */
{
/* Redeclaration of a parameter is a constraint violation (this is
not explicitly stated, but follows from C99 6.7p3 [no more than
one declaration of the same identifier with no linkage in the
same scope, except type tags] and 6.2.2p6 [parameters have no
linkage]). We must check for a forward parameter declaration,
indicated by TREE_ASM_WRITTEN on the old declaration - this is
an extension, the mandatory diagnostic for which is handled by
mark_forward_parm_decls. */
if (TREE_CODE (newdecl) == PARM_DECL
&& (!TREE_ASM_WRITTEN (olddecl) || TREE_ASM_WRITTEN (newdecl)))
{
auto_diagnostic_group d;
error ("redefinition of parameter %q+D", newdecl);
locate_old_decl (olddecl);
return false;
}
}
/* Optional warning for completely redundant decls. */
if (!warned && !pedwarned
&& warn_redundant_decls
/* Don't warn about a function declaration followed by a
definition. */
&& !(TREE_CODE (newdecl) == FUNCTION_DECL
&& DECL_INITIAL (newdecl) && !DECL_INITIAL (olddecl))
/* Don't warn about redundant redeclarations of builtins. */
&& !(TREE_CODE (newdecl) == FUNCTION_DECL
&& !fndecl_built_in_p (newdecl)
&& fndecl_built_in_p (olddecl)
&& !C_DECL_DECLARED_BUILTIN (olddecl))
/* Don't warn about an extern followed by a definition. */
&& !(DECL_EXTERNAL (olddecl) && !DECL_EXTERNAL (newdecl))
/* Don't warn about forward parameter decls. */
&& !(TREE_CODE (newdecl) == PARM_DECL
&& TREE_ASM_WRITTEN (olddecl) && !TREE_ASM_WRITTEN (newdecl))
/* Don't warn about a variable definition following a declaration. */
&& !(VAR_P (newdecl)
&& DECL_INITIAL (newdecl) && !DECL_INITIAL (olddecl)))
{
warned = warning (OPT_Wredundant_decls, "redundant redeclaration of %q+D",
newdecl);
}
/* Report location of previous decl/defn. */
if (warned || pedwarned)
locate_old_decl (olddecl);
#undef DECL_EXTERN_INLINE
return retval;
}
/* Subroutine of duplicate_decls. NEWDECL has been found to be
consistent with OLDDECL, but carries new information. Merge the
new information into OLDDECL. This function issues no
diagnostics. */
static void
merge_decls (tree newdecl, tree olddecl, tree newtype, tree oldtype)
{
bool new_is_definition = (TREE_CODE (newdecl) == FUNCTION_DECL
&& DECL_INITIAL (newdecl) != NULL_TREE);
bool new_is_prototype = (TREE_CODE (newdecl) == FUNCTION_DECL
&& prototype_p (TREE_TYPE (newdecl)));
bool old_is_prototype = (TREE_CODE (olddecl) == FUNCTION_DECL
&& prototype_p (TREE_TYPE (olddecl)));
/* For real parm decl following a forward decl, rechain the old decl
in its new location and clear TREE_ASM_WRITTEN (it's not a
forward decl anymore). */
if (TREE_CODE (newdecl) == PARM_DECL
&& TREE_ASM_WRITTEN (olddecl) && !TREE_ASM_WRITTEN (newdecl))
{
struct c_binding *b, **here;
for (here = ¤t_scope->bindings; *here; here = &(*here)->prev)
if ((*here)->decl == olddecl)
goto found;
gcc_unreachable ();
found:
b = *here;
*here = b->prev;
b->prev = current_scope->bindings;
current_scope->bindings = b;
TREE_ASM_WRITTEN (olddecl) = 0;
}
DECL_ATTRIBUTES (newdecl)
= targetm.merge_decl_attributes (olddecl, newdecl);
/* For typedefs use the old type, as the new type's DECL_NAME points
at newdecl, which will be ggc_freed. */
if (TREE_CODE (newdecl) == TYPE_DECL)
{
/* But NEWTYPE might have an attribute, honor that. */
tree tem = newtype;
newtype = oldtype;
if (TYPE_USER_ALIGN (tem))
{
if (TYPE_ALIGN (tem) > TYPE_ALIGN (newtype))
SET_TYPE_ALIGN (newtype, TYPE_ALIGN (tem));
TYPE_USER_ALIGN (newtype) = true;
}
/* And remove the new type from the variants list. */
if (TYPE_NAME (TREE_TYPE (newdecl)) == newdecl)
{
tree remove = TREE_TYPE (newdecl);
if (TYPE_MAIN_VARIANT (remove) == remove)
{
gcc_assert (TYPE_NEXT_VARIANT (remove) == NULL_TREE);
/* If remove is the main variant, no need to remove that
from the list. One of the DECL_ORIGINAL_TYPE
variants, e.g. created for aligned attribute, might still
refer to the newdecl TYPE_DECL though, so remove that one
in that case. */
if (DECL_ORIGINAL_TYPE (newdecl)
&& DECL_ORIGINAL_TYPE (newdecl) != remove)
for (tree t = TYPE_MAIN_VARIANT (DECL_ORIGINAL_TYPE (newdecl));
t; t = TYPE_MAIN_VARIANT (t))
if (TYPE_NAME (TYPE_NEXT_VARIANT (t)) == newdecl)
{
TYPE_NEXT_VARIANT (t)
= TYPE_NEXT_VARIANT (TYPE_NEXT_VARIANT (t));
break;
}
}
else
for (tree t = TYPE_MAIN_VARIANT (remove); ;
t = TYPE_NEXT_VARIANT (t))
if (TYPE_NEXT_VARIANT (t) == remove)
{
TYPE_NEXT_VARIANT (t) = TYPE_NEXT_VARIANT (remove);
break;
}
}
}
/* Merge the data types specified in the two decls. */
TREE_TYPE (newdecl)
= TREE_TYPE (olddecl)
= composite_type (newtype, oldtype);
/* Lay the type out, unless already done. */
if (!comptypes (oldtype, TREE_TYPE (newdecl)))
{
if (TREE_TYPE (newdecl) != error_mark_node)
layout_type (TREE_TYPE (newdecl));
if (TREE_CODE (newdecl) != FUNCTION_DECL
&& TREE_CODE (newdecl) != TYPE_DECL
&& TREE_CODE (newdecl) != CONST_DECL)
layout_decl (newdecl, 0);
}
else
{
/* Since the type is OLDDECL's, make OLDDECL's size go with. */
DECL_SIZE (newdecl) = DECL_SIZE (olddecl);
DECL_SIZE_UNIT (newdecl) = DECL_SIZE_UNIT (olddecl);
SET_DECL_MODE (newdecl, DECL_MODE (olddecl));
if (DECL_ALIGN (olddecl) > DECL_ALIGN (newdecl))
{
SET_DECL_ALIGN (newdecl, DECL_ALIGN (olddecl));
DECL_USER_ALIGN (newdecl) |= DECL_USER_ALIGN (olddecl);
}
if (DECL_WARN_IF_NOT_ALIGN (olddecl)
> DECL_WARN_IF_NOT_ALIGN (newdecl))
SET_DECL_WARN_IF_NOT_ALIGN (newdecl,
DECL_WARN_IF_NOT_ALIGN (olddecl));
}
/* Keep the old rtl since we can safely use it. */
if (HAS_RTL_P (olddecl))
COPY_DECL_RTL (olddecl, newdecl);
/* Merge the type qualifiers. */
if (TREE_READONLY (newdecl))
TREE_READONLY (olddecl) = 1;
if (TREE_THIS_VOLATILE (newdecl))
TREE_THIS_VOLATILE (olddecl) = 1;
/* Merge deprecatedness. */
if (TREE_DEPRECATED (newdecl))
TREE_DEPRECATED (olddecl) = 1;
/* If a decl is in a system header and the other isn't, keep the one on the
system header. Otherwise, keep source location of definition rather than
declaration and of prototype rather than non-prototype unless that
prototype is built-in. */
if (CODE_CONTAINS_STRUCT (TREE_CODE (olddecl), TS_DECL_WITH_VIS)
&& DECL_IN_SYSTEM_HEADER (olddecl)
&& !DECL_IN_SYSTEM_HEADER (newdecl) )
DECL_SOURCE_LOCATION (newdecl) = DECL_SOURCE_LOCATION (olddecl);
else if (CODE_CONTAINS_STRUCT (TREE_CODE (olddecl), TS_DECL_WITH_VIS)
&& DECL_IN_SYSTEM_HEADER (newdecl)
&& !DECL_IN_SYSTEM_HEADER (olddecl))
DECL_SOURCE_LOCATION (olddecl) = DECL_SOURCE_LOCATION (newdecl);
else if ((DECL_INITIAL (newdecl) == NULL_TREE
&& DECL_INITIAL (olddecl) != NULL_TREE)
|| (old_is_prototype && !new_is_prototype
&& !C_DECL_BUILTIN_PROTOTYPE (olddecl)))
DECL_SOURCE_LOCATION (newdecl) = DECL_SOURCE_LOCATION (olddecl);
/* Merge the initialization information. */
if (DECL_INITIAL (newdecl) == NULL_TREE)
DECL_INITIAL (newdecl) = DECL_INITIAL (olddecl);
/* Merge the threadprivate attribute. */
if (VAR_P (olddecl) && C_DECL_THREADPRIVATE_P (olddecl))
C_DECL_THREADPRIVATE_P (newdecl) = 1;
if (CODE_CONTAINS_STRUCT (TREE_CODE (olddecl), TS_DECL_WITH_VIS))
{
/* Copy the assembler name.
Currently, it can only be defined in the prototype. */
COPY_DECL_ASSEMBLER_NAME (olddecl, newdecl);
/* Use visibility of whichever declaration had it specified */
if (DECL_VISIBILITY_SPECIFIED (olddecl))
{
DECL_VISIBILITY (newdecl) = DECL_VISIBILITY (olddecl);
DECL_VISIBILITY_SPECIFIED (newdecl) = 1;
}
if (TREE_CODE (newdecl) == FUNCTION_DECL)
{
DECL_STATIC_CONSTRUCTOR(newdecl) |= DECL_STATIC_CONSTRUCTOR(olddecl);
DECL_STATIC_DESTRUCTOR (newdecl) |= DECL_STATIC_DESTRUCTOR (olddecl);
DECL_NO_LIMIT_STACK (newdecl) |= DECL_NO_LIMIT_STACK (olddecl);
DECL_NO_INSTRUMENT_FUNCTION_ENTRY_EXIT (newdecl)
|= DECL_NO_INSTRUMENT_FUNCTION_ENTRY_EXIT (olddecl);
TREE_THIS_VOLATILE (newdecl) |= TREE_THIS_VOLATILE (olddecl);
DECL_IS_MALLOC (newdecl) |= DECL_IS_MALLOC (olddecl);
if (DECL_IS_OPERATOR_NEW_P (olddecl))
DECL_SET_IS_OPERATOR_NEW (newdecl, true);
if (DECL_IS_OPERATOR_DELETE_P (olddecl))
DECL_SET_IS_OPERATOR_DELETE (newdecl, true);
TREE_READONLY (newdecl) |= TREE_READONLY (olddecl);
DECL_PURE_P (newdecl) |= DECL_PURE_P (olddecl);
DECL_IS_NOVOPS (newdecl) |= DECL_IS_NOVOPS (olddecl);
}
/* Merge the storage class information. */
merge_weak (newdecl, olddecl);
/* For functions, static overrides non-static. */
if (TREE_CODE (newdecl) == FUNCTION_DECL)
{
TREE_PUBLIC (newdecl) &= TREE_PUBLIC (olddecl);
/* This is since we don't automatically
copy the attributes of NEWDECL into OLDDECL. */
TREE_PUBLIC (olddecl) = TREE_PUBLIC (newdecl);
/* If this clears `static', clear it in the identifier too. */
if (!TREE_PUBLIC (olddecl))
TREE_PUBLIC (DECL_NAME (olddecl)) = 0;
}
}
/* In c99, 'extern' declaration before (or after) 'inline' means this
function is not DECL_EXTERNAL, unless 'gnu_inline' attribute
is present. */
if (TREE_CODE (newdecl) == FUNCTION_DECL
&& !flag_gnu89_inline
&& (DECL_DECLARED_INLINE_P (newdecl)
|| DECL_DECLARED_INLINE_P (olddecl))
&& (!DECL_DECLARED_INLINE_P (newdecl)
|| !DECL_DECLARED_INLINE_P (olddecl)
|| !DECL_EXTERNAL (olddecl))
&& DECL_EXTERNAL (newdecl)
&& !lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (newdecl))
&& !current_function_decl)
DECL_EXTERNAL (newdecl) = 0;
/* An inline definition following a static declaration is not
DECL_EXTERNAL. */
if (new_is_definition
&& (DECL_DECLARED_INLINE_P (newdecl)
|| DECL_DECLARED_INLINE_P (olddecl))
&& !TREE_PUBLIC (olddecl))
DECL_EXTERNAL (newdecl) = 0;
if (DECL_EXTERNAL (newdecl))
{
TREE_STATIC (newdecl) = TREE_STATIC (olddecl);
DECL_EXTERNAL (newdecl) = DECL_EXTERNAL (olddecl);
/* An extern decl does not override previous storage class. */
TREE_PUBLIC (newdecl) = TREE_PUBLIC (olddecl);
if (!DECL_EXTERNAL (newdecl))
{
DECL_CONTEXT (newdecl) = DECL_CONTEXT (olddecl);
DECL_COMMON (newdecl) = DECL_COMMON (olddecl);
}
}
else
{
TREE_STATIC (olddecl) = TREE_STATIC (newdecl);
TREE_PUBLIC (olddecl) = TREE_PUBLIC (newdecl);
}
if (TREE_CODE (newdecl) == FUNCTION_DECL)
{
/* If we're redefining a function previously defined as extern
inline, make sure we emit debug info for the inline before we
throw it away, in case it was inlined into a function that
hasn't been written out yet. */
if (new_is_definition && DECL_INITIAL (olddecl))
/* The new defn must not be inline. */
DECL_UNINLINABLE (newdecl) = 1;
else
{
/* If either decl says `inline', this fn is inline, unless
its definition was passed already. */
if (DECL_DECLARED_INLINE_P (newdecl)
|| DECL_DECLARED_INLINE_P (olddecl))
DECL_DECLARED_INLINE_P (newdecl) = 1;
DECL_UNINLINABLE (newdecl) = DECL_UNINLINABLE (olddecl)
= (DECL_UNINLINABLE (newdecl) || DECL_UNINLINABLE (olddecl));
DECL_DISREGARD_INLINE_LIMITS (newdecl)
= DECL_DISREGARD_INLINE_LIMITS (olddecl)
= (DECL_DISREGARD_INLINE_LIMITS (newdecl)
|| DECL_DISREGARD_INLINE_LIMITS (olddecl));
}
if (fndecl_built_in_p (olddecl))
{
/* If redeclaring a builtin function, it stays built in.
But it gets tagged as having been declared. */
copy_decl_built_in_function (newdecl, olddecl);
C_DECL_DECLARED_BUILTIN (newdecl) = 1;
if (new_is_prototype)
{
C_DECL_BUILTIN_PROTOTYPE (newdecl) = 0;
if (DECL_BUILT_IN_CLASS (newdecl) == BUILT_IN_NORMAL)
{
enum built_in_function fncode = DECL_FUNCTION_CODE (newdecl);
switch (fncode)
{
/* If a compatible prototype of these builtin functions
is seen, assume the runtime implements it with the
expected semantics. */
case BUILT_IN_STPCPY:
if (builtin_decl_explicit_p (fncode))
set_builtin_decl_implicit_p (fncode, true);
break;
default:
if (builtin_decl_explicit_p (fncode))
set_builtin_decl_declared_p (fncode, true);
break;
}
copy_attributes_to_builtin (newdecl);
}
}
else
C_DECL_BUILTIN_PROTOTYPE (newdecl)
= C_DECL_BUILTIN_PROTOTYPE (olddecl);
}
/* Preserve function specific target and optimization options */
if (DECL_FUNCTION_SPECIFIC_TARGET (olddecl)
&& !DECL_FUNCTION_SPECIFIC_TARGET (newdecl))
DECL_FUNCTION_SPECIFIC_TARGET (newdecl)
= DECL_FUNCTION_SPECIFIC_TARGET (olddecl);
if (DECL_FUNCTION_SPECIFIC_OPTIMIZATION (olddecl)
&& !DECL_FUNCTION_SPECIFIC_OPTIMIZATION (newdecl))
DECL_FUNCTION_SPECIFIC_OPTIMIZATION (newdecl)
= DECL_FUNCTION_SPECIFIC_OPTIMIZATION (olddecl);
/* Also preserve various other info from the definition. */
if (!new_is_definition)
{
tree t;
DECL_RESULT (newdecl) = DECL_RESULT (olddecl);
DECL_INITIAL (newdecl) = DECL_INITIAL (olddecl);
DECL_STRUCT_FUNCTION (newdecl) = DECL_STRUCT_FUNCTION (olddecl);
DECL_SAVED_TREE (newdecl) = DECL_SAVED_TREE (olddecl);
DECL_ARGUMENTS (newdecl) = copy_list (DECL_ARGUMENTS (olddecl));
for (t = DECL_ARGUMENTS (newdecl); t ; t = DECL_CHAIN (t))
DECL_CONTEXT (t) = newdecl;
/* See if we've got a function to instantiate from. */
if (DECL_SAVED_TREE (olddecl))
DECL_ABSTRACT_ORIGIN (newdecl)
= DECL_ABSTRACT_ORIGIN (olddecl);
}
}
/* Merge the USED information. */
if (TREE_USED (olddecl))
TREE_USED (newdecl) = 1;
else if (TREE_USED (newdecl))
TREE_USED (olddecl) = 1;
if (VAR_P (olddecl) || TREE_CODE (olddecl) == PARM_DECL)
DECL_READ_P (newdecl) |= DECL_READ_P (olddecl);
if (DECL_PRESERVE_P (olddecl))
DECL_PRESERVE_P (newdecl) = 1;
else if (DECL_PRESERVE_P (newdecl))
DECL_PRESERVE_P (olddecl) = 1;
/* Merge DECL_COMMON */
if (VAR_P (olddecl) && VAR_P (newdecl)
&& !lookup_attribute ("common", DECL_ATTRIBUTES (newdecl))
&& !lookup_attribute ("nocommon", DECL_ATTRIBUTES (newdecl)))
DECL_COMMON (newdecl) = DECL_COMMON (newdecl) && DECL_COMMON (olddecl);
/* Copy most of the decl-specific fields of NEWDECL into OLDDECL.
But preserve OLDDECL's DECL_UID, DECL_CONTEXT and
DECL_ARGUMENTS (if appropriate). */
{
unsigned olddecl_uid = DECL_UID (olddecl);
tree olddecl_context = DECL_CONTEXT (olddecl);
tree olddecl_arguments = NULL;
if (TREE_CODE (olddecl) == FUNCTION_DECL)
olddecl_arguments = DECL_ARGUMENTS (olddecl);
memcpy ((char *) olddecl + sizeof (struct tree_common),
(char *) newdecl + sizeof (struct tree_common),
sizeof (struct tree_decl_common) - sizeof (struct tree_common));
DECL_USER_ALIGN (olddecl) = DECL_USER_ALIGN (newdecl);
switch (TREE_CODE (olddecl))
{
case FUNCTION_DECL:
case VAR_DECL:
{
struct symtab_node *snode = olddecl->decl_with_vis.symtab_node;
memcpy ((char *) olddecl + sizeof (struct tree_decl_common),
(char *) newdecl + sizeof (struct tree_decl_common),
tree_code_size (TREE_CODE (olddecl)) - sizeof (struct tree_decl_common));
olddecl->decl_with_vis.symtab_node = snode;
if ((DECL_EXTERNAL (olddecl)
|| TREE_PUBLIC (olddecl)
|| TREE_STATIC (olddecl))
&& DECL_SECTION_NAME (newdecl) != NULL)
set_decl_section_name (olddecl, DECL_SECTION_NAME (newdecl));
/* This isn't quite correct for something like
int __thread x attribute ((tls_model ("local-exec")));
extern int __thread x;
as we'll lose the "local-exec" model. */
if (VAR_P (olddecl) && DECL_THREAD_LOCAL_P (newdecl))
set_decl_tls_model (olddecl, DECL_TLS_MODEL (newdecl));
break;
}
case FIELD_DECL:
case PARM_DECL:
case LABEL_DECL:
case RESULT_DECL:
case CONST_DECL:
case TYPE_DECL:
memcpy ((char *) olddecl + sizeof (struct tree_decl_common),
(char *) newdecl + sizeof (struct tree_decl_common),
tree_code_size (TREE_CODE (olddecl)) - sizeof (struct tree_decl_common));
break;
default:
memcpy ((char *) olddecl + sizeof (struct tree_decl_common),
(char *) newdecl + sizeof (struct tree_decl_common),
sizeof (struct tree_decl_non_common) - sizeof (struct tree_decl_common));
}
DECL_UID (olddecl) = olddecl_uid;
DECL_CONTEXT (olddecl) = olddecl_context;
if (TREE_CODE (olddecl) == FUNCTION_DECL)
DECL_ARGUMENTS (olddecl) = olddecl_arguments;
}
/* If OLDDECL had its DECL_RTL instantiated, re-invoke make_decl_rtl
so that encode_section_info has a chance to look at the new decl
flags and attributes. */
if (DECL_RTL_SET_P (olddecl)
&& (TREE_CODE (olddecl) == FUNCTION_DECL
|| (VAR_P (olddecl) && TREE_STATIC (olddecl))))
make_decl_rtl (olddecl);
}
/* Handle when a new declaration NEWDECL has the same name as an old
one OLDDECL in the same binding contour. Prints an error message
if appropriate.
If safely possible, alter OLDDECL to look like NEWDECL, and return
true. Otherwise, return false. */
static bool
duplicate_decls (tree newdecl, tree olddecl)
{
tree newtype = NULL, oldtype = NULL;
if (!diagnose_mismatched_decls (newdecl, olddecl, &newtype, &oldtype))
{
/* Avoid `unused variable' and other warnings for OLDDECL. */
TREE_NO_WARNING (olddecl) = 1;
return false;
}
merge_decls (newdecl, olddecl, newtype, oldtype);
/* The NEWDECL will no longer be needed.
Before releasing the node, be sure to remove function from symbol
table that might have been inserted there to record comdat group.
Be sure to however do not free DECL_STRUCT_FUNCTION because this
structure is shared in between NEWDECL and OLDECL. */
if (TREE_CODE (newdecl) == FUNCTION_DECL)
DECL_STRUCT_FUNCTION (newdecl) = NULL;
if (VAR_OR_FUNCTION_DECL_P (newdecl))
{
struct symtab_node *snode = symtab_node::get (newdecl);
if (snode)
snode->remove ();
}
ggc_free (newdecl);
return true;
}
/* Check whether decl-node NEW_DECL shadows an existing declaration. */
static void
warn_if_shadowing (tree new_decl)
{
struct c_binding *b;
/* Shadow warnings wanted? */
if (!(warn_shadow
|| warn_shadow_local
|| warn_shadow_compatible_local)
/* No shadow warnings for internally generated vars. */
|| DECL_IS_BUILTIN (new_decl))
return;
/* Is anything being shadowed? Invisible decls do not count. */
for (b = I_SYMBOL_BINDING (DECL_NAME (new_decl)); b; b = b->shadowed)
if (b->decl && b->decl != new_decl && !b->invisible
&& (b->decl == error_mark_node
|| diagnostic_report_warnings_p (global_dc,
DECL_SOURCE_LOCATION (b->decl))))
{
tree old_decl = b->decl;
if (old_decl == error_mark_node)
{
warning (OPT_Wshadow, "declaration of %q+D shadows previous "
"non-variable", new_decl);
break;
}
bool warned = false;
auto_diagnostic_group d;
if (TREE_CODE (old_decl) == PARM_DECL)
{
enum opt_code warning_code;
/* If '-Wshadow=compatible-local' is specified without other
-Wshadow= flags, we will warn only when the types of the
shadowing variable (i.e. new_decl) and the shadowed variable
(old_decl) are compatible. */
if (warn_shadow)
warning_code = OPT_Wshadow;
else if (comptypes (TREE_TYPE (old_decl), TREE_TYPE (new_decl)))
warning_code = OPT_Wshadow_compatible_local;
else
warning_code = OPT_Wshadow_local;
warned = warning_at (DECL_SOURCE_LOCATION (new_decl), warning_code,
"declaration of %qD shadows a parameter",
new_decl);
}
else if (DECL_FILE_SCOPE_P (old_decl))
{
/* Do not warn if a variable shadows a function, unless
the variable is a function or a pointer-to-function. */
if (TREE_CODE (old_decl) == FUNCTION_DECL
&& TREE_CODE (new_decl) != FUNCTION_DECL
&& !FUNCTION_POINTER_TYPE_P (TREE_TYPE (new_decl)))
continue;
warned = warning_at (DECL_SOURCE_LOCATION (new_decl), OPT_Wshadow,
"declaration of %qD shadows a global "
"declaration",
new_decl);
}
else if (TREE_CODE (old_decl) == FUNCTION_DECL
&& fndecl_built_in_p (old_decl))
{
warning (OPT_Wshadow, "declaration of %q+D shadows "
"a built-in function", new_decl);
break;
}
else
{
enum opt_code warning_code;
/* If '-Wshadow=compatible-local' is specified without other
-Wshadow= flags, we will warn only when the types of the
shadowing variable (i.e. new_decl) and the shadowed variable
(old_decl) are compatible. */
if (warn_shadow)
warning_code = OPT_Wshadow;
else if (comptypes (TREE_TYPE (old_decl), TREE_TYPE (new_decl)))
warning_code = OPT_Wshadow_compatible_local;
else
warning_code = OPT_Wshadow_local;
warned = warning_at (DECL_SOURCE_LOCATION (new_decl), warning_code,
"declaration of %qD shadows a previous local",
new_decl);
}
if (warned)
inform (DECL_SOURCE_LOCATION (old_decl),
"shadowed declaration is here");
break;
}
}
/* Record a decl-node X as belonging to the current lexical scope.
Check for errors (such as an incompatible declaration for the same
name already seen in the same scope).
Returns either X or an old decl for the same name.
If an old decl is returned, it may have been smashed
to agree with what X says. */
tree
pushdecl (tree x)
{
tree name = DECL_NAME (x);
struct c_scope *scope = current_scope;
struct c_binding *b;
bool nested = false;
location_t locus = DECL_SOURCE_LOCATION (x);
/* Must set DECL_CONTEXT for everything not at file scope or
DECL_FILE_SCOPE_P won't work. Local externs don't count
unless they have initializers (which generate code). */
if (current_function_decl
&& (!VAR_OR_FUNCTION_DECL_P (x)
|| DECL_INITIAL (x) || !TREE_PUBLIC (x)))
DECL_CONTEXT (x) = current_function_decl;
/* Anonymous decls are just inserted in the scope. */
if (!name)
{
bind (name, x, scope, /*invisible=*/false, /*nested=*/false,
locus);
return x;
}
/* First, see if there is another declaration with the same name in
the current scope. If there is, duplicate_decls may do all the
work for us. If duplicate_decls returns false, that indicates
two incompatible decls in the same scope; we are to silently
replace the old one (duplicate_decls has issued all appropriate
diagnostics). In particular, we should not consider possible
duplicates in the external scope, or shadowing. */
b = I_SYMBOL_BINDING (name);
if (b && B_IN_SCOPE (b, scope))
{
struct c_binding *b_ext, *b_use;
tree type = TREE_TYPE (x);
tree visdecl = b->decl;
tree vistype = TREE_TYPE (visdecl);
if (TREE_CODE (TREE_TYPE (x)) == ARRAY_TYPE
&& COMPLETE_TYPE_P (TREE_TYPE (x)))
b->inner_comp = false;
b_use = b;
b_ext = b;
/* If this is an external linkage declaration, we should check
for compatibility with the type in the external scope before
setting the type at this scope based on the visible
information only. */
if (TREE_PUBLIC (x) && TREE_PUBLIC (visdecl))
{
while (b_ext && !B_IN_EXTERNAL_SCOPE (b_ext))
b_ext = b_ext->shadowed;
if (b_ext)
{
b_use = b_ext;
if (b_use->u.type)
TREE_TYPE (b_use->decl) = b_use->u.type;
}
}
if (duplicate_decls (x, b_use->decl))
{
if (b_use != b)
{
/* Save the updated type in the external scope and
restore the proper type for this scope. */
tree thistype;
if (comptypes (vistype, type))
thistype = composite_type (vistype, type);
else
thistype = TREE_TYPE (b_use->decl);
b_use->u.type = TREE_TYPE (b_use->decl);
if (TREE_CODE (b_use->decl) == FUNCTION_DECL
&& fndecl_built_in_p (b_use->decl))
thistype
= build_type_attribute_variant (thistype,
TYPE_ATTRIBUTES
(b_use->u.type));
TREE_TYPE (b_use->decl) = thistype;
}
return b_use->decl;
}
else
goto skip_external_and_shadow_checks;
}
/* All declarations with external linkage, and all external
references, go in the external scope, no matter what scope is
current. However, the binding in that scope is ignored for
purposes of normal name lookup. A separate binding structure is
created in the requested scope; this governs the normal
visibility of the symbol.
The binding in the externals scope is used exclusively for
detecting duplicate declarations of the same object, no matter
what scope they are in; this is what we do here. (C99 6.2.7p2:
All declarations that refer to the same object or function shall
have compatible type; otherwise, the behavior is undefined.)
However, in Objective-C, we also want to detect declarations
conflicting with those of the basic types. */
if ((DECL_EXTERNAL (x) || scope == file_scope)
&& (VAR_OR_FUNCTION_DECL_P (x) || c_dialect_objc ()))
{
tree type = TREE_TYPE (x);
tree vistype = NULL_TREE;
tree visdecl = NULL_TREE;
bool type_saved = false;
if (b && !B_IN_EXTERNAL_SCOPE (b)
&& VAR_OR_FUNCTION_DECL_P (b->decl)
&& DECL_FILE_SCOPE_P (b->decl))
{
visdecl = b->decl;
vistype = TREE_TYPE (visdecl);
}
if (scope != file_scope
&& !DECL_IN_SYSTEM_HEADER (x))
warning_at (locus, OPT_Wnested_externs,
"nested extern declaration of %qD", x);
while (b && !B_IN_EXTERNAL_SCOPE (b))
{
/* If this decl might be modified, save its type. This is
done here rather than when the decl is first bound
because the type may change after first binding, through
being completed or through attributes being added. If we
encounter multiple such decls, only the first should have
its type saved; the others will already have had their
proper types saved and the types will not have changed as
their scopes will not have been re-entered. */
if (DECL_P (b->decl) && DECL_FILE_SCOPE_P (b->decl) && !type_saved)
{
b->u.type = TREE_TYPE (b->decl);
type_saved = true;
}
if (B_IN_FILE_SCOPE (b)
&& VAR_P (b->decl)
&& TREE_STATIC (b->decl)
&& TREE_CODE (TREE_TYPE (b->decl)) == ARRAY_TYPE
&& !TYPE_DOMAIN (TREE_TYPE (b->decl))
&& TREE_CODE (type) == ARRAY_TYPE
&& TYPE_DOMAIN (type)
&& TYPE_MAX_VALUE (TYPE_DOMAIN (type))
&& !integer_zerop (TYPE_MAX_VALUE (TYPE_DOMAIN (type))))
{
/* Array type completed in inner scope, which should be
diagnosed if the completion does not have size 1 and
it does not get completed in the file scope. */
b->inner_comp = true;
}
b = b->shadowed;
}
/* If a matching external declaration has been found, set its
type to the composite of all the types of that declaration.
After the consistency checks, it will be reset to the
composite of the visible types only. */
if (b && (TREE_PUBLIC (x) || same_translation_unit_p (x, b->decl))
&& b->u.type)
TREE_TYPE (b->decl) = b->u.type;
/* The point of the same_translation_unit_p check here is,
we want to detect a duplicate decl for a construct like
foo() { extern bar(); } ... static bar(); but not if
they are in different translation units. In any case,
the static does not go in the externals scope. */
if (b
&& (TREE_PUBLIC (x) || same_translation_unit_p (x, b->decl))
&& duplicate_decls (x, b->decl))
{
tree thistype;
if (vistype)
{
if (comptypes (vistype, type))
thistype = composite_type (vistype, type);
else
thistype = TREE_TYPE (b->decl);
}
else
thistype = type;
b->u.type = TREE_TYPE (b->decl);
if (TREE_CODE (b->decl) == FUNCTION_DECL
&& fndecl_built_in_p (b->decl))
thistype
= build_type_attribute_variant (thistype,
TYPE_ATTRIBUTES (b->u.type));
TREE_TYPE (b->decl) = thistype;
bind (name, b->decl, scope, /*invisible=*/false, /*nested=*/true,
locus);
return b->decl;
}
else if (TREE_PUBLIC (x))
{
if (visdecl && !b && duplicate_decls (x, visdecl))
{
/* An external declaration at block scope referring to a
visible entity with internal linkage. The composite
type will already be correct for this scope, so we
just need to fall through to make the declaration in
this scope. */
nested = true;
x = visdecl;
}
else
{
bind (name, x, external_scope, /*invisible=*/true,
/*nested=*/false, locus);
nested = true;
}
}
}
if (TREE_CODE (x) != PARM_DECL)
warn_if_shadowing (x);
skip_external_and_shadow_checks:
if (TREE_CODE (x) == TYPE_DECL)
{
/* So this is a typedef, set its underlying type. */
set_underlying_type (x);
/* If X is a typedef defined in the current function, record it
for the purpose of implementing the -Wunused-local-typedefs
warning. */
record_locally_defined_typedef (x);
}
bind (name, x, scope, /*invisible=*/false, nested, locus);
/* If x's type is incomplete because it's based on a
structure or union which has not yet been fully declared,
attach it to that structure or union type, so we can go
back and complete the variable declaration later, if the
structure or union gets fully declared.
If the input is erroneous, we can have error_mark in the type
slot (e.g. "f(void a, ...)") - that doesn't count as an
incomplete type. */
if (TREE_TYPE (x) != error_mark_node
&& !COMPLETE_TYPE_P (TREE_TYPE (x)))
{
tree element = TREE_TYPE (x);
while (TREE_CODE (element) == ARRAY_TYPE)
element = TREE_TYPE (element);
element = TYPE_MAIN_VARIANT (element);
if ((RECORD_OR_UNION_TYPE_P (element)
|| TREE_CODE (element) == ENUMERAL_TYPE)
&& (TREE_CODE (x) != TYPE_DECL
|| TREE_CODE (TREE_TYPE (x)) == ARRAY_TYPE)
&& !COMPLETE_TYPE_P (element))
C_TYPE_INCOMPLETE_VARS (element)
= tree_cons (NULL_TREE, x, C_TYPE_INCOMPLETE_VARS (element));
}
return x;
}
/* Issue a warning about implicit function declaration. ID is the function
identifier, OLDDECL is a declaration of the function in a different scope,
or NULL_TREE. */
static void
implicit_decl_warning (location_t loc, tree id, tree olddecl)
{
if (!warn_implicit_function_declaration)
return;
bool warned;
auto_diagnostic_group d;
name_hint hint;
if (!olddecl)
hint = lookup_name_fuzzy (id, FUZZY_LOOKUP_FUNCTION_NAME, loc);
if (flag_isoc99)
{
if (const char *suggestion = hint.suggestion ())
{
gcc_rich_location richloc (loc);
richloc.add_fixit_replace (suggestion);
warned = pedwarn (&richloc, OPT_Wimplicit_function_declaration,
"implicit declaration of function %qE;"
" did you mean %qs?",
id, suggestion);
}
else
warned = pedwarn (loc, OPT_Wimplicit_function_declaration,
"implicit declaration of function %qE", id);
}
else if (const char *suggestion = hint.suggestion ())
{
gcc_rich_location richloc (loc);
richloc.add_fixit_replace (suggestion);
warned = warning_at
(&richloc, OPT_Wimplicit_function_declaration,
G_("implicit declaration of function %qE; did you mean %qs?"),
id, suggestion);
}
else
warned = warning_at (loc, OPT_Wimplicit_function_declaration,
G_("implicit declaration of function %qE"), id);
if (warned)
{
/* Whether the olddecl is an undeclared builtin function.
locate_old_decl will not generate a diagnostic for those,
so in that case we want to look elsewhere. */
bool undeclared_builtin = (olddecl
&& TREE_CODE (olddecl) == FUNCTION_DECL
&& fndecl_built_in_p (olddecl)
&& !C_DECL_DECLARED_BUILTIN (olddecl));
if (undeclared_builtin)
{
const char *header = header_for_builtin_fn (olddecl);
if (header)
{
rich_location richloc (line_table, loc);
maybe_add_include_fixit (&richloc, header, true);
inform (&richloc,
"include %qs or provide a declaration of %qE",
header, id);
}
}
else if (olddecl)
locate_old_decl (olddecl);
}
if (!warned)
hint.suppress ();
}
/* Return the name of the header file that declares built-in function
FNDECL, or null if either we don't know or don't expect to see an
explicit declaration. */
static const char *
header_for_builtin_fn (tree fndecl)
{
if (DECL_BUILT_IN_CLASS (fndecl) != BUILT_IN_NORMAL)
return NULL;
switch (DECL_FUNCTION_CODE (fndecl))
{
CASE_FLT_FN (BUILT_IN_ACOS):
CASE_FLT_FN (BUILT_IN_ACOSH):
CASE_FLT_FN (BUILT_IN_ASIN):
CASE_FLT_FN (BUILT_IN_ASINH):
CASE_FLT_FN (BUILT_IN_ATAN):
CASE_FLT_FN (BUILT_IN_ATANH):
CASE_FLT_FN (BUILT_IN_ATAN2):
CASE_FLT_FN (BUILT_IN_CBRT):
CASE_FLT_FN (BUILT_IN_CEIL):
CASE_FLT_FN_FLOATN_NX (BUILT_IN_CEIL):
CASE_FLT_FN (BUILT_IN_COPYSIGN):
CASE_FLT_FN_FLOATN_NX (BUILT_IN_COPYSIGN):
CASE_FLT_FN (BUILT_IN_COS):
CASE_FLT_FN (BUILT_IN_COSH):
CASE_FLT_FN (BUILT_IN_ERF):
CASE_FLT_FN (BUILT_IN_ERFC):
CASE_FLT_FN (BUILT_IN_EXP):
CASE_FLT_FN (BUILT_IN_EXP2):
CASE_FLT_FN (BUILT_IN_EXPM1):
CASE_FLT_FN (BUILT_IN_FABS):
CASE_FLT_FN_FLOATN_NX (BUILT_IN_FABS):
CASE_FLT_FN (BUILT_IN_FDIM):
CASE_FLT_FN (BUILT_IN_FLOOR):
CASE_FLT_FN_FLOATN_NX (BUILT_IN_FLOOR):
CASE_FLT_FN (BUILT_IN_FMA):
CASE_FLT_FN_FLOATN_NX (BUILT_IN_FMA):
CASE_FLT_FN (BUILT_IN_FMAX):
CASE_FLT_FN_FLOATN_NX (BUILT_IN_FMAX):
CASE_FLT_FN (BUILT_IN_FMIN):
CASE_FLT_FN_FLOATN_NX (BUILT_IN_FMIN):
CASE_FLT_FN (BUILT_IN_FMOD):
CASE_FLT_FN (BUILT_IN_FREXP):
CASE_FLT_FN (BUILT_IN_HYPOT):
CASE_FLT_FN (BUILT_IN_ILOGB):
CASE_FLT_FN (BUILT_IN_LDEXP):
CASE_FLT_FN (BUILT_IN_LGAMMA):
CASE_FLT_FN (BUILT_IN_LLRINT):
CASE_FLT_FN (BUILT_IN_LLROUND):
CASE_FLT_FN (BUILT_IN_LOG):
CASE_FLT_FN (BUILT_IN_LOG10):
CASE_FLT_FN (BUILT_IN_LOG1P):
CASE_FLT_FN (BUILT_IN_LOG2):
CASE_FLT_FN (BUILT_IN_LOGB):
CASE_FLT_FN (BUILT_IN_LRINT):
CASE_FLT_FN (BUILT_IN_LROUND):
CASE_FLT_FN (BUILT_IN_MODF):
CASE_FLT_FN (BUILT_IN_NAN):
CASE_FLT_FN (BUILT_IN_NEARBYINT):
CASE_FLT_FN_FLOATN_NX (BUILT_IN_NEARBYINT):
CASE_FLT_FN (BUILT_IN_NEXTAFTER):
CASE_FLT_FN (BUILT_IN_NEXTTOWARD):
CASE_FLT_FN (BUILT_IN_POW):
CASE_FLT_FN (BUILT_IN_REMAINDER):
CASE_FLT_FN (BUILT_IN_REMQUO):
CASE_FLT_FN (BUILT_IN_RINT):
CASE_FLT_FN_FLOATN_NX (BUILT_IN_RINT):
CASE_FLT_FN (BUILT_IN_ROUND):
CASE_FLT_FN_FLOATN_NX (BUILT_IN_ROUND):
CASE_FLT_FN (BUILT_IN_SCALBLN):
CASE_FLT_FN (BUILT_IN_SCALBN):
CASE_FLT_FN (BUILT_IN_SIN):
CASE_FLT_FN (BUILT_IN_SINH):
CASE_FLT_FN (BUILT_IN_SINCOS):
CASE_FLT_FN (BUILT_IN_SQRT):
CASE_FLT_FN_FLOATN_NX (BUILT_IN_SQRT):
CASE_FLT_FN (BUILT_IN_TAN):
CASE_FLT_FN (BUILT_IN_TANH):
CASE_FLT_FN (BUILT_IN_TGAMMA):
CASE_FLT_FN (BUILT_IN_TRUNC):
CASE_FLT_FN_FLOATN_NX (BUILT_IN_TRUNC):
case BUILT_IN_ISINF:
case BUILT_IN_ISNAN:
return "<math.h>";
CASE_FLT_FN (BUILT_IN_CABS):
CASE_FLT_FN (BUILT_IN_CACOS):
CASE_FLT_FN (BUILT_IN_CACOSH):
CASE_FLT_FN (BUILT_IN_CARG):
CASE_FLT_FN (BUILT_IN_CASIN):
CASE_FLT_FN (BUILT_IN_CASINH):
CASE_FLT_FN (BUILT_IN_CATAN):
CASE_FLT_FN (BUILT_IN_CATANH):
CASE_FLT_FN (BUILT_IN_CCOS):
CASE_FLT_FN (BUILT_IN_CCOSH):
CASE_FLT_FN (BUILT_IN_CEXP):
CASE_FLT_FN (BUILT_IN_CIMAG):
CASE_FLT_FN (BUILT_IN_CLOG):
CASE_FLT_FN (BUILT_IN_CONJ):
CASE_FLT_FN (BUILT_IN_CPOW):
CASE_FLT_FN (BUILT_IN_CPROJ):
CASE_FLT_FN (BUILT_IN_CREAL):
CASE_FLT_FN (BUILT_IN_CSIN):
CASE_FLT_FN (BUILT_IN_CSINH):
CASE_FLT_FN (BUILT_IN_CSQRT):
CASE_FLT_FN (BUILT_IN_CTAN):
CASE_FLT_FN (BUILT_IN_CTANH):
return "<complex.h>";
case BUILT_IN_MEMCHR:
case BUILT_IN_MEMCMP:
case BUILT_IN_MEMCPY:
case BUILT_IN_MEMMOVE:
case BUILT_IN_MEMSET:
case BUILT_IN_STRCAT:
case BUILT_IN_STRCHR:
case BUILT_IN_STRCMP:
case BUILT_IN_STRCPY:
case BUILT_IN_STRCSPN:
case BUILT_IN_STRLEN:
case BUILT_IN_STRNCAT:
case BUILT_IN_STRNCMP:
case BUILT_IN_STRNCPY:
case BUILT_IN_STRPBRK:
case BUILT_IN_STRRCHR:
case BUILT_IN_STRSPN:
case BUILT_IN_STRSTR:
return "<string.h>";
case BUILT_IN_FPRINTF:
case BUILT_IN_PUTC:
case BUILT_IN_FPUTC:
case BUILT_IN_FPUTS:
case BUILT_IN_FSCANF:
case BUILT_IN_FWRITE:
case BUILT_IN_PRINTF:
case BUILT_IN_PUTCHAR:
case BUILT_IN_PUTS:
case BUILT_IN_SCANF:
case BUILT_IN_SNPRINTF:
case BUILT_IN_SPRINTF:
case BUILT_IN_SSCANF:
case BUILT_IN_VFPRINTF:
case BUILT_IN_VFSCANF:
case BUILT_IN_VPRINTF:
case BUILT_IN_VSCANF:
case BUILT_IN_VSNPRINTF:
case BUILT_IN_VSPRINTF:
case BUILT_IN_VSSCANF:
return "<stdio.h>";
case BUILT_IN_ISALNUM:
case BUILT_IN_ISALPHA:
case BUILT_IN_ISBLANK:
case BUILT_IN_ISCNTRL:
case BUILT_IN_ISDIGIT:
case BUILT_IN_ISGRAPH:
case BUILT_IN_ISLOWER:
case BUILT_IN_ISPRINT:
case BUILT_IN_ISPUNCT:
case BUILT_IN_ISSPACE:
case BUILT_IN_ISUPPER:
case BUILT_IN_ISXDIGIT:
case BUILT_IN_TOLOWER:
case BUILT_IN_TOUPPER:
return "<ctype.h>";
case BUILT_IN_ISWALNUM:
case BUILT_IN_ISWALPHA:
case BUILT_IN_ISWBLANK:
case BUILT_IN_ISWCNTRL:
case BUILT_IN_ISWDIGIT:
case BUILT_IN_ISWGRAPH:
case BUILT_IN_ISWLOWER:
case BUILT_IN_ISWPRINT:
case BUILT_IN_ISWPUNCT:
case BUILT_IN_ISWSPACE:
case BUILT_IN_ISWUPPER:
case BUILT_IN_ISWXDIGIT:
case BUILT_IN_TOWLOWER:
case BUILT_IN_TOWUPPER:
return "<wctype.h>";
case BUILT_IN_ABORT:
case BUILT_IN_ABS:
case BUILT_IN_CALLOC:
case BUILT_IN_EXIT:
case BUILT_IN_FREE:
case BUILT_IN_LABS:
case BUILT_IN_LLABS:
case BUILT_IN_MALLOC:
case BUILT_IN_REALLOC:
case BUILT_IN__EXIT2:
case BUILT_IN_ALIGNED_ALLOC:
return "<stdlib.h>";
case BUILT_IN_IMAXABS:
return "<inttypes.h>";
case BUILT_IN_STRFTIME:
return "<time.h>";
default:
return NULL;
}
}
/* Generate an implicit declaration for identifier FUNCTIONID at LOC as a
function of type int (). */
tree
implicitly_declare (location_t loc, tree functionid)
{
struct c_binding *b;
tree decl = NULL_TREE;
tree asmspec_tree;
for (b = I_SYMBOL_BINDING (functionid); b; b = b->shadowed)
{
if (B_IN_SCOPE (b, external_scope))
{
decl = b->decl;
break;
}
}
if (decl)
{
if (TREE_CODE (decl) != FUNCTION_DECL)
return decl;
/* FIXME: Objective-C has weird not-really-builtin functions
which are supposed to be visible automatically. They wind up
in the external scope because they're pushed before the file
scope gets created. Catch this here and rebind them into the
file scope. */
if (!fndecl_built_in_p (decl) && DECL_IS_BUILTIN (decl))
{
bind (functionid, decl, file_scope,
/*invisible=*/false, /*nested=*/true,
DECL_SOURCE_LOCATION (decl));
return decl;
}
else
{
tree newtype = default_function_type;
if (b->u.type)
TREE_TYPE (decl) = b->u.type;
/* Implicit declaration of a function already declared
(somehow) in a different scope, or as a built-in.
If this is the first time this has happened, warn;
then recycle the old declaration but with the new type. */
if (!C_DECL_IMPLICIT (decl))
{
implicit_decl_warning (loc, functionid, decl);
C_DECL_IMPLICIT (decl) = 1;
}
if (fndecl_built_in_p (decl))
{
newtype = build_type_attribute_variant (newtype,
TYPE_ATTRIBUTES
(TREE_TYPE (decl)));
if (!comptypes (newtype, TREE_TYPE (decl)))
{
bool warned = warning_at (loc,
OPT_Wbuiltin_declaration_mismatch,
"incompatible implicit "
"declaration of built-in "
"function %qD", decl);
/* See if we can hint which header to include. */
const char *header = header_for_builtin_fn (decl);
if (header != NULL && warned)
{
rich_location richloc (line_table, loc);
maybe_add_include_fixit (&richloc, header, true);
inform (&richloc,
"include %qs or provide a declaration of %qD",
header, decl);
}
newtype = TREE_TYPE (decl);
}
}
else
{
if (!comptypes (newtype, TREE_TYPE (decl)))
{
auto_diagnostic_group d;
error_at (loc, "incompatible implicit declaration of "
"function %qD", decl);
locate_old_decl (decl);
}
}
b->u.type = TREE_TYPE (decl);
TREE_TYPE (decl) = newtype;
bind (functionid, decl, current_scope,
/*invisible=*/false, /*nested=*/true,
DECL_SOURCE_LOCATION (decl));
return decl;
}
}
/* Not seen before. */
decl = build_decl (loc, FUNCTION_DECL, functionid, default_function_type);
DECL_EXTERNAL (decl) = 1;
TREE_PUBLIC (decl) = 1;
C_DECL_IMPLICIT (decl) = 1;
implicit_decl_warning (loc, functionid, 0);
asmspec_tree = maybe_apply_renaming_pragma (decl, /*asmname=*/NULL);
if (asmspec_tree)
set_user_assembler_name (decl, TREE_STRING_POINTER (asmspec_tree));
/* C89 says implicit declarations are in the innermost block.
So we record the decl in the standard fashion. */
decl = pushdecl (decl);
/* No need to call objc_check_decl here - it's a function type. */
rest_of_decl_compilation (decl, 0, 0);
/* Write a record describing this implicit function declaration
to the prototypes file (if requested). */
gen_aux_info_record (decl, 0, 1, 0);
/* Possibly apply some default attributes to this implicit declaration. */
decl_attributes (&decl, NULL_TREE, 0);
return decl;
}
/* Issue an error message for a reference to an undeclared variable
ID, including a reference to a builtin outside of function-call
context. Establish a binding of the identifier to error_mark_node
in an appropriate scope, which will suppress further errors for the
same identifier. The error message should be given location LOC. */
void
undeclared_variable (location_t loc, tree id)
{
static bool already = false;
struct c_scope *scope;
auto_diagnostic_group d;
if (current_function_decl == NULL_TREE)
{
name_hint guessed_id = lookup_name_fuzzy (id, FUZZY_LOOKUP_NAME, loc);
if (const char *suggestion = guessed_id.suggestion ())
{
gcc_rich_location richloc (loc);
richloc.add_fixit_replace (suggestion);
error_at (&richloc,
"%qE undeclared here (not in a function);"
" did you mean %qs?",
id, suggestion);
}
else
error_at (loc, "%qE undeclared here (not in a function)", id);
scope = current_scope;
}
else
{
if (!objc_diagnose_private_ivar (id))
{
name_hint guessed_id = lookup_name_fuzzy (id, FUZZY_LOOKUP_NAME, loc);
if (const char *suggestion = guessed_id.suggestion ())
{
gcc_rich_location richloc (loc);
richloc.add_fixit_replace (suggestion);
error_at (&richloc,
"%qE undeclared (first use in this function);"
" did you mean %qs?",
id, suggestion);
}
else
error_at (loc, "%qE undeclared (first use in this function)", id);
}
if (!already)
{
inform (loc, "each undeclared identifier is reported only"
" once for each function it appears in");
already = true;
}
/* If we are parsing old-style parameter decls, current_function_decl
will be nonnull but current_function_scope will be null. */
scope = current_function_scope ? current_function_scope : current_scope;
}
bind (id, error_mark_node, scope, /*invisible=*/false, /*nested=*/false,
UNKNOWN_LOCATION);
}
/* Subroutine of lookup_label, declare_label, define_label: construct a
LABEL_DECL with all the proper frills. Also create a struct
c_label_vars initialized for the current scope. */
static tree
make_label (location_t location, tree name, bool defining,
struct c_label_vars **p_label_vars)
{
tree label = build_decl (location, LABEL_DECL, name, void_type_node);
DECL_CONTEXT (label) = current_function_decl;
SET_DECL_MODE (label, VOIDmode);
c_label_vars *label_vars = ggc_alloc<c_label_vars> ();
label_vars->shadowed = NULL;
set_spot_bindings (&label_vars->label_bindings, defining);
label_vars->decls_in_scope = make_tree_vector ();
label_vars->gotos = NULL;
*p_label_vars = label_vars;
return label;
}
/* Get the LABEL_DECL corresponding to identifier NAME as a label.
Create one if none exists so far for the current function.
This is called when a label is used in a goto expression or
has its address taken. */
tree
lookup_label (tree name)
{
tree label;
struct c_label_vars *label_vars;
if (current_function_scope == 0)
{
error ("label %qE referenced outside of any function", name);
return NULL_TREE;
}
/* Use a label already defined or ref'd with this name, but not if
it is inherited from a containing function and wasn't declared
using __label__. */
label = I_LABEL_DECL (name);
if (label && (DECL_CONTEXT (label) == current_function_decl
|| C_DECLARED_LABEL_FLAG (label)))
{
/* If the label has only been declared, update its apparent
location to point here, for better diagnostics if it
turns out not to have been defined. */
if (DECL_INITIAL (label) == NULL_TREE)
DECL_SOURCE_LOCATION (label) = input_location;
return label;
}
/* No label binding for that identifier; make one. */
label = make_label (input_location, name, false, &label_vars);
/* Ordinary labels go in the current function scope. */
bind_label (name, label, current_function_scope, label_vars);
return label;
}
/* Issue a warning about DECL for a goto statement at GOTO_LOC going
to LABEL. */
static void
warn_about_goto (location_t goto_loc, tree label, tree decl)
{
if (variably_modified_type_p (TREE_TYPE (decl), NULL_TREE))
error_at (goto_loc,
"jump into scope of identifier with variably modified type");
else
warning_at (goto_loc, OPT_Wjump_misses_init,
"jump skips variable initialization");
inform (DECL_SOURCE_LOCATION (label), "label %qD defined here", label);
inform (DECL_SOURCE_LOCATION (decl), "%qD declared here", decl);
}
/* Look up a label because of a goto statement. This is like
lookup_label, but also issues any appropriate warnings. */
tree
lookup_label_for_goto (location_t loc, tree name)
{
tree label;
struct c_label_vars *label_vars;
unsigned int ix;
tree decl;
label = lookup_label (name);
if (label == NULL_TREE)
return NULL_TREE;
/* If we are jumping to a different function, we can't issue any
useful warnings. */
if (DECL_CONTEXT (label) != current_function_decl)
{
gcc_assert (C_DECLARED_LABEL_FLAG (label));
return label;
}
label_vars = I_LABEL_BINDING (name)->u.label;
/* If the label has not yet been defined, then push this goto on a
list for possible later warnings. */
if (label_vars->label_bindings.scope == NULL)
{
c_goto_bindings *g = ggc_alloc<c_goto_bindings> ();
g->loc = loc;
set_spot_bindings (&g->goto_bindings, true);
vec_safe_push (label_vars->gotos, g);
return label;
}
/* If there are any decls in label_vars->decls_in_scope, then this
goto has missed the declaration of the decl. This happens for a
case like
int i = 1;
lab:
...
goto lab;
Issue a warning or error. */
FOR_EACH_VEC_SAFE_ELT (label_vars->decls_in_scope, ix, decl)
warn_about_goto (loc, label, decl);
if (label_vars->label_bindings.left_stmt_expr)
{
error_at (loc, "jump into statement expression");
inform (DECL_SOURCE_LOCATION (label), "label %qD defined here", label);
}
return label;
}
/* Make a label named NAME in the current function, shadowing silently
any that may be inherited from containing functions or containing
scopes. This is called for __label__ declarations. */
tree
declare_label (tree name)
{
struct c_binding *b = I_LABEL_BINDING (name);
tree label;
struct c_label_vars *label_vars;
/* Check to make sure that the label hasn't already been declared
at this scope */
if (b && B_IN_CURRENT_SCOPE (b))
{
auto_diagnostic_group d;
error ("duplicate label declaration %qE", name);
locate_old_decl (b->decl);
/* Just use the previous declaration. */
return b->decl;
}
label = make_label (input_location, name, false, &label_vars);
C_DECLARED_LABEL_FLAG (label) = 1;
/* Declared labels go in the current scope. */
bind_label (name, label, current_scope, label_vars);
return label;
}
/* When we define a label, issue any appropriate warnings if there are
any gotos earlier in the function which jump to this label. */
static void
check_earlier_gotos (tree label, struct c_label_vars* label_vars)
{
unsigned int ix;
struct c_goto_bindings *g;
FOR_EACH_VEC_SAFE_ELT (label_vars->gotos, ix, g)
{
struct c_binding *b;
struct c_scope *scope;
/* We have a goto to this label. The goto is going forward. In
g->scope, the goto is going to skip any binding which was
defined after g->bindings_in_scope. */
if (g->goto_bindings.scope->has_jump_unsafe_decl)
{
for (b = g->goto_bindings.scope->bindings;
b != g->goto_bindings.bindings_in_scope;
b = b->prev)
{
if (decl_jump_unsafe (b->decl))
warn_about_goto (g->loc, label, b->decl);
}
}
/* We also need to warn about decls defined in any scopes
between the scope of the label and the scope of the goto. */
for (scope = label_vars->label_bindings.scope;
scope != g->goto_bindings.scope;
scope = scope->outer)
{
gcc_assert (scope != NULL);
if (scope->has_jump_unsafe_decl)
{
if (scope == label_vars->label_bindings.scope)
b = label_vars->label_bindings.bindings_in_scope;
else
b = scope->bindings;
for (; b != NULL; b = b->prev)
{
if (decl_jump_unsafe (b->decl))
warn_about_goto (g->loc, label, b->decl);
}
}
}
if (g->goto_bindings.stmt_exprs > 0)
{
error_at (g->loc, "jump into statement expression");
inform (DECL_SOURCE_LOCATION (label), "label %qD defined here",
label);
}
}
/* Now that the label is defined, we will issue warnings about
subsequent gotos to this label when we see them. */
vec_safe_truncate (label_vars->gotos, 0);
label_vars->gotos = NULL;
}
/* Define a label, specifying the location in the source file.
Return the LABEL_DECL node for the label, if the definition is valid.
Otherwise return NULL_TREE. */
tree
define_label (location_t location, tree name)
{
/* Find any preexisting label with this name. It is an error
if that label has already been defined in this function, or
if there is a containing function with a declared label with
the same name. */
tree label = I_LABEL_DECL (name);
if (label
&& ((DECL_CONTEXT (label) == current_function_decl
&& DECL_INITIAL (label) != NULL_TREE)
|| (DECL_CONTEXT (label) != current_function_decl
&& C_DECLARED_LABEL_FLAG (label))))
{
auto_diagnostic_group d;
error_at (location, "duplicate label %qD", label);
locate_old_decl (label);
return NULL_TREE;
}
else if (label && DECL_CONTEXT (label) == current_function_decl)
{
struct c_label_vars *label_vars = I_LABEL_BINDING (name)->u.label;
/* The label has been used or declared already in this function,
but not defined. Update its location to point to this
definition. */
DECL_SOURCE_LOCATION (label) = location;
set_spot_bindings (&label_vars->label_bindings, true);
/* Issue warnings as required about any goto statements from
earlier in the function. */
check_earlier_gotos (label, label_vars);
}
else
{
struct c_label_vars *label_vars;
/* No label binding for that identifier; make one. */
label = make_label (location, name, true, &label_vars);
/* Ordinary labels go in the current function scope. */
bind_label (name, label, current_function_scope, label_vars);
}
if (!in_system_header_at (input_location) && lookup_name (name))
warning_at (location, OPT_Wtraditional,
"traditional C lacks a separate namespace "
"for labels, identifier %qE conflicts", name);
/* Mark label as having been defined. */
DECL_INITIAL (label) = error_mark_node;
return label;
}
/* Get the bindings for a new switch statement. This is used to issue
warnings as appropriate for jumps from the switch to case or
default labels. */
struct c_spot_bindings *
c_get_switch_bindings (void)
{
struct c_spot_bindings *switch_bindings;
switch_bindings = XNEW (struct c_spot_bindings);
set_spot_bindings (switch_bindings, true);
return switch_bindings;
}
void
c_release_switch_bindings (struct c_spot_bindings *bindings)
{
gcc_assert (bindings->stmt_exprs == 0 && !bindings->left_stmt_expr);
XDELETE (bindings);
}
/* This is called at the point of a case or default label to issue
warnings about decls as needed. It returns true if it found an
error, not just a warning. */
bool
c_check_switch_jump_warnings (struct c_spot_bindings *switch_bindings,
location_t switch_loc, location_t case_loc)
{
bool saw_error;
struct c_scope *scope;
saw_error = false;
for (scope = current_scope;
scope != switch_bindings->scope;
scope = scope->outer)
{
struct c_binding *b;
gcc_assert (scope != NULL);
if (!scope->has_jump_unsafe_decl)
continue;
for (b = scope->bindings; b != NULL; b = b->prev)
{
if (decl_jump_unsafe (b->decl))
{
if (variably_modified_type_p (TREE_TYPE (b->decl), NULL_TREE))
{
saw_error = true;
error_at (case_loc,
("switch jumps into scope of identifier with "
"variably modified type"));
}
else
warning_at (case_loc, OPT_Wjump_misses_init,
"switch jumps over variable initialization");
inform (switch_loc, "switch starts here");
inform (DECL_SOURCE_LOCATION (b->decl), "%qD declared here",
b->decl);
}
}
}
if (switch_bindings->stmt_exprs > 0)
{
saw_error = true;
error_at (case_loc, "switch jumps into statement expression");
inform (switch_loc, "switch starts here");
}
return saw_error;
}
/* Given NAME, an IDENTIFIER_NODE,
return the structure (or union or enum) definition for that name.
If THISLEVEL_ONLY is nonzero, searches only the current_scope.
CODE says which kind of type the caller wants;
it is RECORD_TYPE or UNION_TYPE or ENUMERAL_TYPE.
If PLOC is not NULL and this returns non-null, it sets *PLOC to the
location where the tag was defined.
If the wrong kind of type is found, an error is reported. */
static tree
lookup_tag (enum tree_code code, tree name, bool thislevel_only,
location_t *ploc)
{
struct c_binding *b = I_TAG_BINDING (name);
bool thislevel = false;
if (!b || !b->decl)
return NULL_TREE;
/* We only care about whether it's in this level if
thislevel_only was set or it might be a type clash. */
if (thislevel_only || TREE_CODE (b->decl) != code)
{
/* For our purposes, a tag in the external scope is the same as
a tag in the file scope. (Primarily relevant to Objective-C
and its builtin structure tags, which get pushed before the
file scope is created.) */
if (B_IN_CURRENT_SCOPE (b)
|| (current_scope == file_scope && B_IN_EXTERNAL_SCOPE (b)))
thislevel = true;
}
if (thislevel_only && !thislevel)
return NULL_TREE;
if (TREE_CODE (b->decl) != code)
{
/* Definition isn't the kind we were looking for. */
pending_invalid_xref = name;
pending_invalid_xref_location = input_location;
/* If in the same binding level as a declaration as a tag
of a different type, this must not be allowed to
shadow that tag, so give the error immediately.
(For example, "struct foo; union foo;" is invalid.) */
if (thislevel)
pending_xref_error ();
}
if (ploc != NULL)
*ploc = b->locus;
return b->decl;
}
/* Return true if a definition exists for NAME with code CODE. */
bool
tag_exists_p (enum tree_code code, tree name)
{
struct c_binding *b = I_TAG_BINDING (name);
if (b == NULL || b->decl == NULL_TREE)
return false;
return TREE_CODE (b->decl) == code;
}
/* Print an error message now
for a recent invalid struct, union or enum cross reference.
We don't print them immediately because they are not invalid
when used in the `struct foo;' construct for shadowing. */
void
pending_xref_error (void)
{
if (pending_invalid_xref != NULL_TREE)
error_at (pending_invalid_xref_location, "%qE defined as wrong kind of tag",
pending_invalid_xref);
pending_invalid_xref = NULL_TREE;
}
/* Look up NAME in the current scope and its superiors
in the namespace of variables, functions and typedefs.
Return a ..._DECL node of some kind representing its definition,
or return NULL_TREE if it is undefined. */
tree
lookup_name (tree name)
{
struct c_binding *b = I_SYMBOL_BINDING (name);
if (b && !b->invisible)
{
maybe_record_typedef_use (b->decl);
return b->decl;
}
return NULL_TREE;
}
/* Similar to `lookup_name' but look only at the indicated scope. */
static tree
lookup_name_in_scope (tree name, struct c_scope *scope)
{
struct c_binding *b;
for (b = I_SYMBOL_BINDING (name); b; b = b->shadowed)
if (B_IN_SCOPE (b, scope))
return b->decl;
return NULL_TREE;
}
/* Look for the closest match for NAME within the currently valid
scopes.
This finds the identifier with the lowest Levenshtein distance to
NAME. If there are multiple candidates with equal minimal distance,
the first one found is returned. Scopes are searched from innermost
outwards, and within a scope in reverse order of declaration, thus
benefiting candidates "near" to the current scope.
The function also looks for similar macro names to NAME, since a
misspelled macro name will not be expanded, and hence looks like an
identifier to the C frontend.
It also looks for start_typename keywords, to detect "singed" vs "signed"
typos.
Use LOC for any deferred diagnostics. */
name_hint
lookup_name_fuzzy (tree name, enum lookup_name_fuzzy_kind kind, location_t loc)
{
gcc_assert (TREE_CODE (name) == IDENTIFIER_NODE);
/* First, try some well-known names in the C standard library, in case
the user forgot a #include. */
const char *header_hint
= get_c_stdlib_header_for_name (IDENTIFIER_POINTER (name));
if (header_hint)
return name_hint (NULL,
new suggest_missing_header (loc,
IDENTIFIER_POINTER (name),
header_hint));
/* Only suggest names reserved for the implementation if NAME begins
with an underscore. */
bool consider_implementation_names = (IDENTIFIER_POINTER (name)[0] == '_');
best_match<tree, tree> bm (name);
/* Look within currently valid scopes. */
for (c_scope *scope = current_scope; scope; scope = scope->outer)
for (c_binding *binding = scope->bindings; binding; binding = binding->prev)
{
if (!binding->id || binding->invisible)
continue;
if (binding->decl == error_mark_node)
continue;
/* Don't use bindings from implicitly declared functions,
as they were likely misspellings themselves. */
if (TREE_CODE (binding->decl) == FUNCTION_DECL)
if (C_DECL_IMPLICIT (binding->decl))
continue;
/* Don't suggest names that are reserved for use by the
implementation, unless NAME began with an underscore. */
if (!consider_implementation_names)
{
const char *suggestion_str = IDENTIFIER_POINTER (binding->id);
if (name_reserved_for_implementation_p (suggestion_str))
continue;
}
switch (kind)
{
case FUZZY_LOOKUP_TYPENAME:
if (TREE_CODE (binding->decl) != TYPE_DECL)
continue;
break;
case FUZZY_LOOKUP_FUNCTION_NAME:
if (TREE_CODE (binding->decl) != FUNCTION_DECL)
{
/* Allow function pointers. */
if ((VAR_P (binding->decl)
|| TREE_CODE (binding->decl) == PARM_DECL)
&& TREE_CODE (TREE_TYPE (binding->decl)) == POINTER_TYPE
&& (TREE_CODE (TREE_TYPE (TREE_TYPE (binding->decl)))
== FUNCTION_TYPE))
break;
continue;
}
break;
default:
break;
}
bm.consider (binding->id);
}
/* Consider macros: if the user misspelled a macro name e.g. "SOME_MACRO"
as:
x = SOME_OTHER_MACRO (y);
then "SOME_OTHER_MACRO" will survive to the frontend and show up
as a misspelled identifier.
Use the best distance so far so that a candidate is only set if
a macro is better than anything so far. This allows early rejection
(without calculating the edit distance) of macro names that must have
distance >= bm.get_best_distance (), and means that we only get a
non-NULL result for best_macro_match if it's better than any of
the identifiers already checked, which avoids needless creation
of identifiers for macro hashnodes. */
best_macro_match bmm (name, bm.get_best_distance (), parse_in);
cpp_hashnode *best_macro = bmm.get_best_meaningful_candidate ();
/* If a macro is the closest so far to NAME, use it, creating an
identifier tree node for it. */
if (best_macro)
{
const char *id = (const char *)best_macro->ident.str;
tree macro_as_identifier
= get_identifier_with_length (id, best_macro->ident.len);
bm.set_best_so_far (macro_as_identifier,
bmm.get_best_distance (),
bmm.get_best_candidate_length ());
}
/* Try the "start_typename" keywords to detect
"singed" vs "signed" typos. */
if (kind == FUZZY_LOOKUP_TYPENAME)
{
for (unsigned i = 0; i < num_c_common_reswords; i++)
{
const c_common_resword *resword = &c_common_reswords[i];
if (!c_keyword_starts_typename (resword->rid))
continue;
tree resword_identifier = ridpointers [resword->rid];
if (!resword_identifier)
continue;
gcc_assert (TREE_CODE (resword_identifier) == IDENTIFIER_NODE);
bm.consider (resword_identifier);
}
}
tree best = bm.get_best_meaningful_candidate ();
if (best)
return name_hint (IDENTIFIER_POINTER (best), NULL);
else
return name_hint (NULL, NULL);
}
/* Table of supported standard (C2x) attributes. */
const struct attribute_spec std_attribute_table[] =
{
/* { name, min_len, max_len, decl_req, type_req, fn_type_req,
affects_type_identity, handler, exclude } */
{ "deprecated", 0, 1, false, false, false, false,
handle_deprecated_attribute, NULL },
{ "fallthrough", 0, 0, false, false, false, false,
handle_fallthrough_attribute, NULL },
{ "maybe_unused", 0, 0, false, false, false, false,
handle_unused_attribute, NULL },
{ NULL, 0, 0, false, false, false, false, NULL, NULL }
};
/* Create the predefined scalar types of C,
and some nodes representing standard constants (0, 1, (void *) 0).
Initialize the global scope.
Make definitions for built-in primitive functions. */
void
c_init_decl_processing (void)
{
location_t save_loc = input_location;
/* Initialize reserved words for parser. */
c_parse_init ();
register_scoped_attributes (std_attribute_table, NULL);
current_function_decl = NULL_TREE;
gcc_obstack_init (&parser_obstack);
/* Make the externals scope. */
push_scope ();
external_scope = current_scope;
/* Declarations from c_common_nodes_and_builtins must not be associated
with this input file, lest we get differences between using and not
using preprocessed headers. */
input_location = BUILTINS_LOCATION;
c_common_nodes_and_builtins ();
/* In C, comparisons and TRUTH_* expressions have type int. */
truthvalue_type_node = integer_type_node;
truthvalue_true_node = integer_one_node;
truthvalue_false_node = integer_zero_node;
/* Even in C99, which has a real boolean type. */
pushdecl (build_decl (UNKNOWN_LOCATION, TYPE_DECL, get_identifier ("_Bool"),
boolean_type_node));
input_location = save_loc;
make_fname_decl = c_make_fname_decl;
start_fname_decls ();
}
/* Create the VAR_DECL at LOC for __FUNCTION__ etc. ID is the name to
give the decl, NAME is the initialization string and TYPE_DEP
indicates whether NAME depended on the type of the function. As we
don't yet implement delayed emission of static data, we mark the
decl as emitted so it is not placed in the output. Anything using
it must therefore pull out the STRING_CST initializer directly.
FIXME. */
static tree
c_make_fname_decl (location_t loc, tree id, int type_dep)
{
const char *name = fname_as_string (type_dep);
tree decl, type, init;
size_t length = strlen (name);
type = build_array_type (char_type_node,
build_index_type (size_int (length)));
type = c_build_qualified_type (type, TYPE_QUAL_CONST);
decl = build_decl (loc, VAR_DECL, id, type);
TREE_STATIC (decl) = 1;
TREE_READONLY (decl) = 1;
DECL_ARTIFICIAL (decl) = 1;
init = build_string (length + 1, name);
free (CONST_CAST (char *, name));
TREE_TYPE (init) = type;
DECL_INITIAL (decl) = init;
TREE_USED (decl) = 1;
if (current_function_decl
/* For invalid programs like this:
void foo()
const char* p = __FUNCTION__;
the __FUNCTION__ is believed to appear in K&R style function
parameter declarator. In that case we still don't have
function_scope. */
&& current_function_scope)
{
DECL_CONTEXT (decl) = current_function_decl;
bind (id, decl, current_function_scope,
/*invisible=*/false, /*nested=*/false, UNKNOWN_LOCATION);
}
finish_decl (decl, loc, init, NULL_TREE, NULL_TREE);
return decl;
}
tree
c_builtin_function (tree decl)
{
tree type = TREE_TYPE (decl);
tree id = DECL_NAME (decl);
const char *name = IDENTIFIER_POINTER (id);
C_DECL_BUILTIN_PROTOTYPE (decl) = prototype_p (type);
/* Should never be called on a symbol with a preexisting meaning. */
gcc_assert (!I_SYMBOL_BINDING (id));
bind (id, decl, external_scope, /*invisible=*/true, /*nested=*/false,
UNKNOWN_LOCATION);
/* Builtins in the implementation namespace are made visible without
needing to be explicitly declared. See push_file_scope. */
if (name[0] == '_' && (name[1] == '_' || ISUPPER (name[1])))
{
DECL_CHAIN (decl) = visible_builtins;
visible_builtins = decl;
}
return decl;
}
tree
c_builtin_function_ext_scope (tree decl)
{
tree type = TREE_TYPE (decl);
tree id = DECL_NAME (decl);
const char *name = IDENTIFIER_POINTER (id);
C_DECL_BUILTIN_PROTOTYPE (decl) = prototype_p (type);
if (external_scope)
bind (id, decl, external_scope, /*invisible=*/false, /*nested=*/false,
UNKNOWN_LOCATION);
/* Builtins in the implementation namespace are made visible without
needing to be explicitly declared. See push_file_scope. */
if (name[0] == '_' && (name[1] == '_' || ISUPPER (name[1])))
{
DECL_CHAIN (decl) = visible_builtins;
visible_builtins = decl;
}
return decl;
}
/* Implement LANG_HOOKS_SIMULATE_BUILTIN_FUNCTION_DECL. */
tree
c_simulate_builtin_function_decl (tree decl)
{
tree type = TREE_TYPE (decl);
C_DECL_BUILTIN_PROTOTYPE (decl) = prototype_p (type);
return pushdecl (decl);
}
/* Warn about attributes in a context where they are unused
(attribute-declarations, except for the "fallthrough" case, and
attributes on statements). */
void
c_warn_unused_attributes (tree attrs)
{
for (tree t = attrs; t != NULL_TREE; t = TREE_CHAIN (t))
if (get_attribute_namespace (t) == NULL_TREE)
/* The specifications of standard attributes mean this is a
constraint violation. */
pedwarn (input_location, OPT_Wattributes, "%qE attribute ignored",
get_attribute_name (t));
else
warning (OPT_Wattributes, "%qE attribute ignored",
get_attribute_name (t));
}
/* Warn for standard attributes being applied to a type that is not
being defined, where that is a constraint violation, and return a
list of attributes with them removed. */
tree
c_warn_type_attributes (tree attrs)
{
tree *attr_ptr = &attrs;
while (*attr_ptr)
if (get_attribute_namespace (*attr_ptr) == NULL_TREE)
{
pedwarn (input_location, OPT_Wattributes, "%qE attribute ignored",
get_attribute_name (*attr_ptr));
*attr_ptr = TREE_CHAIN (*attr_ptr);
}
else
attr_ptr = &TREE_CHAIN (*attr_ptr);
return attrs;
}
/* Called when a declaration is seen that contains no names to declare.
If its type is a reference to a structure, union or enum inherited
from a containing scope, shadow that tag name for the current scope
with a forward reference.
If its type defines a new named structure or union
or defines an enum, it is valid but we need not do anything here.
Otherwise, it is an error. */
void
shadow_tag (const struct c_declspecs *declspecs)
{
shadow_tag_warned (declspecs, 0);
}
/* WARNED is 1 if we have done a pedwarn, 2 if we have done a warning,
but no pedwarn. */
void
shadow_tag_warned (const struct c_declspecs *declspecs, int warned)
{
bool found_tag = false;
if (declspecs->type && !declspecs->default_int_p && !declspecs->typedef_p)
{
tree value = declspecs->type;
enum tree_code code = TREE_CODE (value);
if (code == RECORD_TYPE || code == UNION_TYPE || code == ENUMERAL_TYPE)
/* Used to test also that TYPE_SIZE (value) != 0.
That caused warning for `struct foo;' at top level in the file. */
{
tree name = TYPE_NAME (value);
tree t;
found_tag = true;
if (declspecs->restrict_p)
{
error ("invalid use of %<restrict%>");
warned = 1;
}
if (name == NULL_TREE)
{
if (warned != 1 && code != ENUMERAL_TYPE)
/* Empty unnamed enum OK */
{
pedwarn (input_location, 0,
"unnamed struct/union that defines no instances");
warned = 1;
}
}
else if (declspecs->typespec_kind != ctsk_tagdef
&& declspecs->typespec_kind != ctsk_tagfirstref
&& declspecs->typespec_kind != ctsk_tagfirstref_attrs
&& declspecs->storage_class != csc_none)
{
if (warned != 1)
pedwarn (input_location, 0,
"empty declaration with storage class specifier "
"does not redeclare tag");
warned = 1;
pending_xref_error ();
}
else if (declspecs->typespec_kind != ctsk_tagdef
&& declspecs->typespec_kind != ctsk_tagfirstref
&& declspecs->typespec_kind != ctsk_tagfirstref_attrs
&& (declspecs->const_p
|| declspecs->volatile_p
|| declspecs->atomic_p
|| declspecs->restrict_p
|| declspecs->address_space))
{
if (warned != 1)
pedwarn (input_location, 0,
"empty declaration with type qualifier "
"does not redeclare tag");
warned = 1;
pending_xref_error ();
}
else if (declspecs->typespec_kind != ctsk_tagdef
&& declspecs->typespec_kind != ctsk_tagfirstref
&& declspecs->typespec_kind != ctsk_tagfirstref_attrs
&& declspecs->alignas_p)
{
if (warned != 1)
pedwarn (input_location, 0,
"empty declaration with %<_Alignas%> "
"does not redeclare tag");
warned = 1;
pending_xref_error ();
}
else
{
pending_invalid_xref = NULL_TREE;
t = lookup_tag (code, name, true, NULL);
if (t == NULL_TREE)
{
t = make_node (code);
pushtag (input_location, name, t);
}
}
}
else
{
if (warned != 1 && !in_system_header_at (input_location))
{
pedwarn (input_location, 0,
"useless type name in empty declaration");
warned = 1;
}
}
}
else if (warned != 1 && !in_system_header_at (input_location)
&& declspecs->typedef_p)
{
pedwarn (input_location, 0, "useless type name in empty declaration");
warned = 1;
}
pending_invalid_xref = NULL_TREE;
if (declspecs->inline_p)
{
error ("%<inline%> in empty declaration");
warned = 1;
}
if (declspecs->noreturn_p)
{
error ("%<_Noreturn%> in empty declaration");
warned = 1;
}
if (current_scope == file_scope && declspecs->storage_class == csc_auto)
{
error ("%<auto%> in file-scope empty declaration");
warned = 1;
}
if (current_scope == file_scope && declspecs->storage_class == csc_register)
{
error ("%<register%> in file-scope empty declaration");
warned = 1;
}
if (!warned && !in_system_header_at (input_location)
&& declspecs->storage_class != csc_none)
{
warning (0, "useless storage class specifier in empty declaration");
warned = 2;
}
if (!warned && !in_system_header_at (input_location) && declspecs->thread_p)
{
warning (0, "useless %qs in empty declaration",
declspecs->thread_gnu_p ? "__thread" : "_Thread_local");
warned = 2;
}
if (!warned
&& !in_system_header_at (input_location)
&& (declspecs->const_p
|| declspecs->volatile_p
|| declspecs->atomic_p
|| declspecs->restrict_p
|| declspecs->address_space))
{
warning (0, "useless type qualifier in empty declaration");
warned = 2;
}
if (!warned && !in_system_header_at (input_location)
&& declspecs->alignas_p)
{
warning (0, "useless %<_Alignas%> in empty declaration");
warned = 2;
}
if (found_tag
&& warned == 2
&& (declspecs->typespec_kind == ctsk_tagref_attrs
|| declspecs->typespec_kind == ctsk_tagfirstref_attrs))
{
/* Standard attributes after the "struct" or "union" keyword are
only permitted when the contents of the type are defined, or
in the form "struct-or-union attribute-specifier-sequence
identifier;". If the ';' was not present, attributes were
diagnosed in the parser. Here, ensure that any other useless
elements of the declaration result in a pedwarn, not just a
warning. Forward declarations of enum types are not part of
standard C, but handle them the same. */
pedwarn (input_location, 0,
"invalid use of attributes in empty declaration");
warned = 1;
}
if (warned != 1)
{
if (declspecs->declspecs_seen_p
&& !declspecs->non_std_attrs_seen_p)
/* An attribute declaration (but not a fallthrough attribute
declaration, which was handled separately); warn if there
are any attributes being ignored (but not if the attributes
were empty). */
c_warn_unused_attributes (declspecs->attrs);
else if (!found_tag)
pedwarn (input_location, 0, "empty declaration");
}
}
/* Return the qualifiers from SPECS as a bitwise OR of TYPE_QUAL_*
bits. SPECS represents declaration specifiers that the grammar
only permits to contain type qualifiers and attributes. */
int
quals_from_declspecs (const struct c_declspecs *specs)
{
int quals = ((specs->const_p ? TYPE_QUAL_CONST : 0)
| (specs->volatile_p ? TYPE_QUAL_VOLATILE : 0)
| (specs->restrict_p ? TYPE_QUAL_RESTRICT : 0)
| (specs->atomic_p ? TYPE_QUAL_ATOMIC : 0)
| (ENCODE_QUAL_ADDR_SPACE (specs->address_space)));
gcc_assert (!specs->type
&& !specs->decl_attr
&& specs->typespec_word == cts_none
&& specs->storage_class == csc_none
&& !specs->typedef_p
&& !specs->explicit_signed_p
&& !specs->deprecated_p
&& !specs->long_p
&& !specs->long_long_p
&& !specs->short_p
&& !specs->signed_p
&& !specs->unsigned_p
&& !specs->complex_p
&& !specs->inline_p
&& !specs->noreturn_p
&& !specs->thread_p);
return quals;
}
/* Construct an array declarator. LOC is the location of the
beginning of the array (usually the opening brace). EXPR is the
expression inside [], or NULL_TREE. QUALS are the type qualifiers
inside the [] (to be applied to the pointer to which a parameter
array is converted). STATIC_P is true if "static" is inside the
[], false otherwise. VLA_UNSPEC_P is true if the array is [*], a
VLA of unspecified length which is nevertheless a complete type,
false otherwise. The field for the contained declarator is left to
be filled in by set_array_declarator_inner. */
struct c_declarator *
build_array_declarator (location_t loc,
tree expr, struct c_declspecs *quals, bool static_p,
bool vla_unspec_p)
{
struct c_declarator *declarator = XOBNEW (&parser_obstack,
struct c_declarator);
declarator->id_loc = loc;
declarator->kind = cdk_array;
declarator->declarator = 0;
declarator->u.array.dimen = expr;
if (quals)
{
declarator->u.array.attrs = quals->attrs;
declarator->u.array.quals = quals_from_declspecs (quals);
}
else
{
declarator->u.array.attrs = NULL_TREE;
declarator->u.array.quals = 0;
}
declarator->u.array.static_p = static_p;
declarator->u.array.vla_unspec_p = vla_unspec_p;
if (static_p || quals != NULL)
pedwarn_c90 (loc, OPT_Wpedantic,
"ISO C90 does not support %<static%> or type "
"qualifiers in parameter array declarators");
if (vla_unspec_p)
pedwarn_c90 (loc, OPT_Wpedantic,
"ISO C90 does not support %<[*]%> array declarators");
if (vla_unspec_p)
{
if (!current_scope->parm_flag)
{
/* C99 6.7.5.2p4 */
error_at (loc, "%<[*]%> not allowed in other than "
"function prototype scope");
declarator->u.array.vla_unspec_p = false;
return NULL;
}
current_scope->had_vla_unspec = true;
}
return declarator;
}
/* Set the contained declarator of an array declarator. DECL is the
declarator, as constructed by build_array_declarator; INNER is what
appears on the left of the []. */
struct c_declarator *
set_array_declarator_inner (struct c_declarator *decl,
struct c_declarator *inner)
{
decl->declarator = inner;
return decl;
}
/* INIT is a constructor that forms DECL's initializer. If the final
element initializes a flexible array field, add the size of that
initializer to DECL's size. */
static void
add_flexible_array_elts_to_size (tree decl, tree init)
{
tree elt, type;
if (vec_safe_is_empty (CONSTRUCTOR_ELTS (init)))
return;
elt = CONSTRUCTOR_ELTS (init)->last ().value;
type = TREE_TYPE (elt);
if (TREE_CODE (type) == ARRAY_TYPE
&& TYPE_SIZE (type) == NULL_TREE
&& TYPE_DOMAIN (type) != NULL_TREE
&& TYPE_MAX_VALUE (TYPE_DOMAIN (type)) == NULL_TREE)
{
complete_array_type (&type, elt, false);
DECL_SIZE (decl)
= size_binop (PLUS_EXPR, DECL_SIZE (decl), TYPE_SIZE (type));
DECL_SIZE_UNIT (decl)
= size_binop (PLUS_EXPR, DECL_SIZE_UNIT (decl), TYPE_SIZE_UNIT (type));
}
}
/* Decode a "typename", such as "int **", returning a ..._TYPE node.
Set *EXPR, if EXPR not NULL, to any expression to be evaluated
before the type name, and set *EXPR_CONST_OPERANDS, if
EXPR_CONST_OPERANDS not NULL, to indicate whether the type name may
appear in a constant expression. */
tree
groktypename (struct c_type_name *type_name, tree *expr,
bool *expr_const_operands)
{
tree type;
tree attrs = type_name->specs->attrs;
type_name->specs->attrs = NULL_TREE;
type = grokdeclarator (type_name->declarator, type_name->specs, TYPENAME,
false, NULL, &attrs, expr, expr_const_operands,
DEPRECATED_NORMAL);
/* Apply attributes. */
attrs = c_warn_type_attributes (attrs);
decl_attributes (&type, attrs, 0);
return type;
}
/* Looks up the most recent pushed declaration corresponding to DECL. */
static tree
lookup_last_decl (tree decl)
{
tree last_decl = lookup_name (DECL_NAME (decl));
if (!last_decl)
last_decl = lookup_name_in_scope (DECL_NAME (decl), external_scope);
return last_decl;
}
/* Wrapper for decl_attributes that adds some implicit attributes
to VAR_DECLs or FUNCTION_DECLs. */
static tree
c_decl_attributes (tree *node, tree attributes, int flags)
{
/* Add implicit "omp declare target" attribute if requested. */
if (current_omp_declare_target_attribute
&& ((VAR_P (*node) && is_global_var (*node))
|| TREE_CODE (*node) == FUNCTION_DECL))
{
if (VAR_P (*node)
&& !lang_hooks.types.omp_mappable_type (TREE_TYPE (*node)))
attributes = tree_cons (get_identifier ("omp declare target implicit"),
NULL_TREE, attributes);
else
{
attributes = tree_cons (get_identifier ("omp declare target"),
NULL_TREE, attributes);
attributes = tree_cons (get_identifier ("omp declare target block"),
NULL_TREE, attributes);
}
}
/* Look up the current declaration with all the attributes merged
so far so that attributes on the current declaration that's
about to be pushed that conflict with the former can be detected,
diagnosed, and rejected as appropriate. */
tree last_decl = lookup_last_decl (*node);
return decl_attributes (node, attributes, flags, last_decl);
}
/* Decode a declarator in an ordinary declaration or data definition.
This is called as soon as the type information and variable name
have been parsed, before parsing the initializer if any.
Here we create the ..._DECL node, fill in its type,
and put it on the list of decls for the current context.
When nonnull, set *LASTLOC to the location of the prior declaration
of the same entity if one exists.
The ..._DECL node is returned as the value.
Exception: for arrays where the length is not specified,
the type is left null, to be filled in by `finish_decl'.
Function definitions do not come here; they go to start_function
instead. However, external and forward declarations of functions
do go through here. Structure field declarations are done by
grokfield and not through here. */
tree
start_decl (struct c_declarator *declarator, struct c_declspecs *declspecs,
bool initialized, tree attributes, location_t *lastloc /* = NULL */)
{
tree decl;
tree tem;
tree expr = NULL_TREE;
enum deprecated_states deprecated_state = DEPRECATED_NORMAL;
/* An object declared as __attribute__((deprecated)) suppresses
warnings of uses of other deprecated items. */
if (lookup_attribute ("deprecated", attributes))
deprecated_state = DEPRECATED_SUPPRESS;
decl = grokdeclarator (declarator, declspecs,
NORMAL, initialized, NULL, &attributes, &expr, NULL,
deprecated_state);
if (!decl || decl == error_mark_node)
return NULL_TREE;
if (tree lastdecl = lastloc ? lookup_last_decl (decl) : NULL_TREE)
if (lastdecl != error_mark_node)
*lastloc = DECL_SOURCE_LOCATION (lastdecl);
if (expr)
add_stmt (fold_convert (void_type_node, expr));
if (TREE_CODE (decl) != FUNCTION_DECL && MAIN_NAME_P (DECL_NAME (decl))
&& TREE_PUBLIC (decl))
warning (OPT_Wmain, "%q+D is usually a function", decl);
if (initialized)
/* Is it valid for this decl to have an initializer at all?
If not, set INITIALIZED to zero, which will indirectly
tell 'finish_decl' to ignore the initializer once it is parsed. */
switch (TREE_CODE (decl))
{
case TYPE_DECL:
error ("typedef %qD is initialized (use %<__typeof__%> instead)", decl);
initialized = false;
break;
case FUNCTION_DECL:
error ("function %qD is initialized like a variable", decl);
initialized = false;
break;
case PARM_DECL:
/* DECL_INITIAL in a PARM_DECL is really DECL_ARG_TYPE. */
error ("parameter %qD is initialized", decl);
initialized = false;
break;
default:
/* Don't allow initializations for incomplete types except for
arrays which might be completed by the initialization. */
/* This can happen if the array size is an undefined macro.
We already gave a warning, so we don't need another one. */
if (TREE_TYPE (decl) == error_mark_node)
initialized = false;
else if (COMPLETE_TYPE_P (TREE_TYPE (decl)))
{
/* A complete type is ok if size is fixed. */
if (!poly_int_tree_p (TYPE_SIZE (TREE_TYPE (decl)))
|| C_DECL_VARIABLE_SIZE (decl))
{
error ("variable-sized object may not be initialized");
initialized = false;
}
}
else if (TREE_CODE (TREE_TYPE (decl)) != ARRAY_TYPE)
{
error ("variable %qD has initializer but incomplete type", decl);
initialized = false;
}
else if (C_DECL_VARIABLE_SIZE (decl))
{
/* Although C99 is unclear about whether incomplete arrays
of VLAs themselves count as VLAs, it does not make
sense to permit them to be initialized given that
ordinary VLAs may not be initialized. */
error ("variable-sized object may not be initialized");
initialized = false;
}
}
if (initialized)
{
if (current_scope == file_scope)
TREE_STATIC (decl) = 1;
/* Tell 'pushdecl' this is an initialized decl
even though we don't yet have the initializer expression.
Also tell 'finish_decl' it may store the real initializer. */
DECL_INITIAL (decl) = error_mark_node;
}
/* If this is a function declaration, write a record describing it to the
prototypes file (if requested). */
if (TREE_CODE (decl) == FUNCTION_DECL)
gen_aux_info_record (decl, 0, 0, prototype_p (TREE_TYPE (decl)));
/* ANSI specifies that a tentative definition which is not merged with
a non-tentative definition behaves exactly like a definition with an
initializer equal to zero. (Section 3.7.2)
-fno-common gives strict ANSI behavior, though this tends to break
a large body of code that grew up without this rule.
Thread-local variables are never common, since there's no entrenched
body of code to break, and it allows more efficient variable references
in the presence of dynamic linking. */
if (VAR_P (decl)
&& !initialized
&& TREE_PUBLIC (decl)
&& !DECL_THREAD_LOCAL_P (decl)
&& !flag_no_common)
DECL_COMMON (decl) = 1;
/* Set attributes here so if duplicate decl, will have proper attributes. */
c_decl_attributes (&decl, attributes, 0);
/* Handle gnu_inline attribute. */
if (declspecs->inline_p
&& !flag_gnu89_inline
&& TREE_CODE (decl) == FUNCTION_DECL
&& (lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (decl))
|| current_function_decl))
{
if (declspecs->storage_class == csc_auto && current_scope != file_scope)
;
else if (declspecs->storage_class != csc_static)
DECL_EXTERNAL (decl) = !DECL_EXTERNAL (decl);
}
if (TREE_CODE (decl) == FUNCTION_DECL
&& targetm.calls.promote_prototypes (TREE_TYPE (decl)))
{
struct c_declarator *ce = declarator;
if (ce->kind == cdk_pointer)
ce = declarator->declarator;
if (ce->kind == cdk_function)
{
tree args = ce->u.arg_info->parms;
for (; args; args = DECL_CHAIN (args))
{
tree type = TREE_TYPE (args);
if (type && INTEGRAL_TYPE_P (type)
&& TYPE_PRECISION (type) < TYPE_PRECISION (integer_type_node))
DECL_ARG_TYPE (args) = c_type_promotes_to (type);
}
}
}
if (TREE_CODE (decl) == FUNCTION_DECL
&& DECL_DECLARED_INLINE_P (decl)
&& DECL_UNINLINABLE (decl)
&& lookup_attribute ("noinline", DECL_ATTRIBUTES (decl)))
warning (OPT_Wattributes, "inline function %q+D given attribute %qs",
decl, "noinline");
/* C99 6.7.4p3: An inline definition of a function with external
linkage shall not contain a definition of a modifiable object
with static storage duration... */
if (VAR_P (decl)
&& current_scope != file_scope
&& TREE_STATIC (decl)
&& !TREE_READONLY (decl)
&& DECL_DECLARED_INLINE_P (current_function_decl)
&& DECL_EXTERNAL (current_function_decl))
record_inline_static (input_location, current_function_decl,
decl, csi_modifiable);
if (c_dialect_objc ()
&& VAR_OR_FUNCTION_DECL_P (decl))
objc_check_global_decl (decl);
/* Add this decl to the current scope.
TEM may equal DECL or it may be a previous decl of the same name. */
tem = pushdecl (decl);
if (initialized && DECL_EXTERNAL (tem))
{
DECL_EXTERNAL (tem) = 0;
TREE_STATIC (tem) = 1;
}
return tem;
}
/* Subroutine of finish_decl. TYPE is the type of an uninitialized object
DECL or the non-array element type if DECL is an uninitialized array.
If that type has a const member, diagnose this. */
static void
diagnose_uninitialized_cst_member (tree decl, tree type)
{
tree field;
for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
{
tree field_type;
if (TREE_CODE (field) != FIELD_DECL)
continue;
field_type = strip_array_types (TREE_TYPE (field));
if (TYPE_QUALS (field_type) & TYPE_QUAL_CONST)
{
warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wc___compat,
"uninitialized const member in %qT is invalid in C++",
strip_array_types (TREE_TYPE (decl)));
inform (DECL_SOURCE_LOCATION (field), "%qD should be initialized", field);
}
if (RECORD_OR_UNION_TYPE_P (field_type))
diagnose_uninitialized_cst_member (decl, field_type);
}
}
/* Finish processing of a declaration;
install its initial value.
If ORIGTYPE is not NULL_TREE, it is the original type of INIT.
If the length of an array type is not known before,
it must be determined now, from the initial value, or it is an error.
INIT_LOC is the location of the initial value. */
void
finish_decl (tree decl, location_t init_loc, tree init,
tree origtype, tree asmspec_tree)
{
tree type;
bool was_incomplete = (DECL_SIZE (decl) == NULL_TREE);
const char *asmspec = 0;
/* If a name was specified, get the string. */
if (VAR_OR_FUNCTION_DECL_P (decl)
&& DECL_FILE_SCOPE_P (decl))
asmspec_tree = maybe_apply_renaming_pragma (decl, asmspec_tree);
if (asmspec_tree)
asmspec = TREE_STRING_POINTER (asmspec_tree);
if (VAR_P (decl)
&& TREE_STATIC (decl)
&& global_bindings_p ())
/* So decl is a global variable. Record the types it uses
so that we can decide later to emit debug info for them. */
record_types_used_by_current_var_decl (decl);
/* If `start_decl' didn't like having an initialization, ignore it now. */
if (init != NULL_TREE && DECL_INITIAL (decl) == NULL_TREE)
init = NULL_TREE;
/* Don't crash if parm is initialized. */
if (TREE_CODE (decl) == PARM_DECL)
init = NULL_TREE;
if (init)
store_init_value (init_loc, decl, init, origtype);
if (c_dialect_objc () && (VAR_OR_FUNCTION_DECL_P (decl)
|| TREE_CODE (decl) == FIELD_DECL))
objc_check_decl (decl);
type = TREE_TYPE (decl);
/* Deduce size of array from initialization, if not already known.
This is only needed for an initialization in the current scope;
it must not be done for a file-scope initialization of a
declaration with external linkage, redeclared in an inner scope
with the outer declaration shadowed in an intermediate scope. */
if (TREE_CODE (type) == ARRAY_TYPE
&& TYPE_DOMAIN (type) == NULL_TREE
&& TREE_CODE (decl) != TYPE_DECL
&& !(TREE_PUBLIC (decl) && current_scope != file_scope))
{
bool do_default
= (TREE_STATIC (decl)
/* Even if pedantic, an external linkage array
may have incomplete type at first. */
? pedantic && !TREE_PUBLIC (decl)
: !DECL_EXTERNAL (decl));
int failure
= complete_array_type (&TREE_TYPE (decl), DECL_INITIAL (decl),
do_default);
/* Get the completed type made by complete_array_type. */
type = TREE_TYPE (decl);
switch (failure)
{
case 1:
error ("initializer fails to determine size of %q+D", decl);
break;
case 2:
if (do_default)
error ("array size missing in %q+D", decl);
break;
case 3:
error ("zero or negative size array %q+D", decl);
break;
case 0:
/* For global variables, update the copy of the type that
exists in the binding. */
if (TREE_PUBLIC (decl))
{
struct c_binding *b_ext = I_SYMBOL_BINDING (DECL_NAME (decl));
while (b_ext && !B_IN_EXTERNAL_SCOPE (b_ext))
b_ext = b_ext->shadowed;
if (b_ext && TREE_CODE (decl) == TREE_CODE (b_ext->decl))
{
if (b_ext->u.type && comptypes (b_ext->u.type, type))
b_ext->u.type = composite_type (b_ext->u.type, type);
else
b_ext->u.type = type;
}
}
break;
default:
gcc_unreachable ();
}
if (DECL_INITIAL (decl))
TREE_TYPE (DECL_INITIAL (decl)) = type;
relayout_decl (decl);
}
/* Look for braced array initializers for character arrays and
recursively convert them into STRING_CSTs. */
if (tree init = DECL_INITIAL (decl))
DECL_INITIAL (decl) = braced_lists_to_strings (type, init);
if (VAR_P (decl))
{
if (init && TREE_CODE (init) == CONSTRUCTOR)
add_flexible_array_elts_to_size (decl, init);
complete_flexible_array_elts (DECL_INITIAL (decl));
if (is_global_var (decl))
{
type_context_kind context = (DECL_THREAD_LOCAL_P (decl)
? TCTX_THREAD_STORAGE
: TCTX_STATIC_STORAGE);
if (!verify_type_context (input_location, context, TREE_TYPE (decl)))
TREE_TYPE (decl) = error_mark_node;
}
if (DECL_SIZE (decl) == NULL_TREE && TREE_TYPE (decl) != error_mark_node
&& COMPLETE_TYPE_P (TREE_TYPE (decl)))
layout_decl (decl, 0);
if (DECL_SIZE (decl) == NULL_TREE
/* Don't give an error if we already gave one earlier. */
&& TREE_TYPE (decl) != error_mark_node
&& (TREE_STATIC (decl)
/* A static variable with an incomplete type
is an error if it is initialized.
Also if it is not file scope.
Otherwise, let it through, but if it is not `extern'
then it may cause an error message later. */
? (DECL_INITIAL (decl) != NULL_TREE
|| !DECL_FILE_SCOPE_P (decl))
/* An automatic variable with an incomplete type
is an error. */
: !DECL_EXTERNAL (decl)))
{
error ("storage size of %q+D isn%'t known", decl);
TREE_TYPE (decl) = error_mark_node;
}
if ((RECORD_OR_UNION_TYPE_P (TREE_TYPE (decl))
|| TREE_CODE (TREE_TYPE (decl)) == ENUMERAL_TYPE)
&& DECL_SIZE (decl) == NULL_TREE
&& TREE_STATIC (decl))
incomplete_record_decls.safe_push (decl);
if (is_global_var (decl)
&& DECL_SIZE (decl) != NULL_TREE
&& TREE_TYPE (decl) != error_mark_node)
{
if (TREE_CODE (DECL_SIZE (decl)) == INTEGER_CST)
constant_expression_warning (DECL_SIZE (decl));
else
{
error ("storage size of %q+D isn%'t constant", decl);
TREE_TYPE (decl) = error_mark_node;
}
}
if (TREE_USED (type))
{
TREE_USED (decl) = 1;
DECL_READ_P (decl) = 1;
}
}
/* If this is a function and an assembler name is specified, reset DECL_RTL
so we can give it its new name. Also, update builtin_decl if it
was a normal built-in. */
if (TREE_CODE (decl) == FUNCTION_DECL && asmspec)
{
if (DECL_BUILT_IN_CLASS (decl) == BUILT_IN_NORMAL)
set_builtin_user_assembler_name (decl, asmspec);
set_user_assembler_name (decl, asmspec);
}
/* If #pragma weak was used, mark the decl weak now. */
maybe_apply_pragma_weak (decl);
/* Output the assembler code and/or RTL code for variables and functions,
unless the type is an undefined structure or union.
If not, it will get done when the type is completed. */
if (VAR_OR_FUNCTION_DECL_P (decl))
{
/* Determine the ELF visibility. */
if (TREE_PUBLIC (decl))
c_determine_visibility (decl);
/* This is a no-op in c-lang.c or something real in objc-act.c. */
if (c_dialect_objc ())
objc_check_decl (decl);
if (asmspec)
{
/* If this is not a static variable, issue a warning.
It doesn't make any sense to give an ASMSPEC for an
ordinary, non-register local variable. Historically,
GCC has accepted -- but ignored -- the ASMSPEC in
this case. */
if (!DECL_FILE_SCOPE_P (decl)
&& VAR_P (decl)
&& !C_DECL_REGISTER (decl)
&& !TREE_STATIC (decl))
warning (0, "ignoring %<asm%> specifier for non-static local "
"variable %q+D", decl);
else
set_user_assembler_name (decl, asmspec);
}
if (DECL_FILE_SCOPE_P (decl))
{
if (DECL_INITIAL (decl) == NULL_TREE
|| DECL_INITIAL (decl) == error_mark_node)
/* Don't output anything
when a tentative file-scope definition is seen.
But at end of compilation, do output code for them. */
DECL_DEFER_OUTPUT (decl) = 1;
if (asmspec && VAR_P (decl) && C_DECL_REGISTER (decl))
DECL_HARD_REGISTER (decl) = 1;
rest_of_decl_compilation (decl, true, 0);
if (TREE_CODE (decl) == FUNCTION_DECL)
{
tree parms = DECL_ARGUMENTS (decl);
const bool builtin = fndecl_built_in_p (decl);
if (tree access = build_attr_access_from_parms (parms, !builtin))
decl_attributes (&decl, access, 0);
}
}
else
{
/* In conjunction with an ASMSPEC, the `register'
keyword indicates that we should place the variable
in a particular register. */
if (asmspec && C_DECL_REGISTER (decl))
{
DECL_HARD_REGISTER (decl) = 1;
/* This cannot be done for a structure with volatile
fields, on which DECL_REGISTER will have been
reset. */
if (!DECL_REGISTER (decl))
error ("cannot put object with volatile field into register");
}
if (TREE_CODE (decl) != FUNCTION_DECL)
{
/* If we're building a variable sized type, and we might be
reachable other than via the top of the current binding
level, then create a new BIND_EXPR so that we deallocate
the object at the right time. */
/* Note that DECL_SIZE can be null due to errors. */
if (DECL_SIZE (decl)
&& !TREE_CONSTANT (DECL_SIZE (decl))
&& STATEMENT_LIST_HAS_LABEL (cur_stmt_list))
{
tree bind;
bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
TREE_SIDE_EFFECTS (bind) = 1;
add_stmt (bind);
BIND_EXPR_BODY (bind) = push_stmt_list ();
}
add_stmt (build_stmt (DECL_SOURCE_LOCATION (decl),
DECL_EXPR, decl));
}
}
if (!DECL_FILE_SCOPE_P (decl))
{
/* Recompute the RTL of a local array now
if it used to be an incomplete type. */
if (was_incomplete && !is_global_var (decl))
{
/* If we used it already as memory, it must stay in memory. */
TREE_ADDRESSABLE (decl) = TREE_USED (decl);
/* If it's still incomplete now, no init will save it. */
if (DECL_SIZE (decl) == NULL_TREE)
DECL_INITIAL (decl) = NULL_TREE;
}
}
}
if (TREE_CODE (decl) == TYPE_DECL)
{
if (!DECL_FILE_SCOPE_P (decl)
&& variably_modified_type_p (TREE_TYPE (decl), NULL_TREE))
add_stmt (build_stmt (DECL_SOURCE_LOCATION (decl), DECL_EXPR, decl));
rest_of_decl_compilation (decl, DECL_FILE_SCOPE_P (decl), 0);
}
/* Install a cleanup (aka destructor) if one was given. */
if (VAR_P (decl) && !TREE_STATIC (decl))
{
tree attr = lookup_attribute ("cleanup", DECL_ATTRIBUTES (decl));
if (attr)
{
tree cleanup_id = TREE_VALUE (TREE_VALUE (attr));
tree cleanup_decl = lookup_name (cleanup_id);
tree cleanup;
vec<tree, va_gc> *v;
/* Build "cleanup(&decl)" for the destructor. */
cleanup = build_unary_op (input_location, ADDR_EXPR, decl, false);
vec_alloc (v, 1);
v->quick_push (cleanup);
cleanup = c_build_function_call_vec (DECL_SOURCE_LOCATION (decl),
vNULL, cleanup_decl, v, NULL);
vec_free (v);
/* Don't warn about decl unused; the cleanup uses it. */
TREE_USED (decl) = 1;
TREE_USED (cleanup_decl) = 1;
DECL_READ_P (decl) = 1;
push_cleanup (decl, cleanup, false);
}
}
if (warn_cxx_compat
&& VAR_P (decl)
&& !DECL_EXTERNAL (decl)
&& DECL_INITIAL (decl) == NULL_TREE)
{
type = strip_array_types (type);
if (TREE_READONLY (decl))
warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wc___compat,
"uninitialized %<const %D%> is invalid in C++", decl);
else if (RECORD_OR_UNION_TYPE_P (type)
&& C_TYPE_FIELDS_READONLY (type))
diagnose_uninitialized_cst_member (decl, type);
}
if (flag_openmp
&& VAR_P (decl)
&& lookup_attribute ("omp declare target implicit",
DECL_ATTRIBUTES (decl)))
{
DECL_ATTRIBUTES (decl)
= remove_attribute ("omp declare target implicit",
DECL_ATTRIBUTES (decl));
if (!lang_hooks.types.omp_mappable_type (TREE_TYPE (decl)))
error ("%q+D in declare target directive does not have mappable type",
decl);
else if (!lookup_attribute ("omp declare target",
DECL_ATTRIBUTES (decl))
&& !lookup_attribute ("omp declare target link",
DECL_ATTRIBUTES (decl)))
DECL_ATTRIBUTES (decl)
= tree_cons (get_identifier ("omp declare target"),
NULL_TREE, DECL_ATTRIBUTES (decl));
}
/* This is the last point we can lower alignment so give the target the
chance to do so. */
if (VAR_P (decl)
&& !is_global_var (decl)
&& !DECL_HARD_REGISTER (decl))
targetm.lower_local_decl_alignment (decl);
invoke_plugin_callbacks (PLUGIN_FINISH_DECL, decl);
}
/* Given a parsed parameter declaration, decode it into a PARM_DECL.
EXPR is NULL or a pointer to an expression that needs to be
evaluated for the side effects of array size expressions in the
parameters. */
tree
grokparm (const struct c_parm *parm, tree *expr)
{
tree attrs = parm->attrs;
tree decl = grokdeclarator (parm->declarator, parm->specs, PARM, false,
NULL, &attrs, expr, NULL, DEPRECATED_NORMAL);
decl_attributes (&decl, attrs, 0);
return decl;
}
/* Return attribute "arg spec" corresponding to an array/VLA parameter
described by PARM, concatenated onto attributes ATTRS.
The spec consists of one dollar symbol for each specified variable
bound, one asterisk for each unspecified variable bound, followed
by at most one specification of the most significant bound of
an ordinary array parameter. For ordinary arrays the specification
is either the constant bound itself, or the space character for
an array with an unspecified bound (the [] form). Finally, a chain
of specified variable bounds is appended to the spec, starting with
the most significant bound. For example, the PARM T a[2][m][3][n]
will produce __attribute__((arg spec ("[$$2]", m, n)).
For T a typedef for an array with variable bounds, the bounds are
included in the specification in the expected order.
No "arg spec" is created for parameters of pointer types, making
a distinction between T(*)[N] (or, equivalently, T[][N]) and
the T[M][N] form, all of which have the same type and are represented
the same, but only the last of which gets an "arg spec" describing
the most significant bound M. */
static tree
get_parm_array_spec (const struct c_parm *parm, tree attrs)
{
/* The attribute specification string, minor bound first. */
std::string spec;
/* A list of VLA variable bounds, major first, or null if unspecified
or not a VLA. */
tree vbchain = NULL_TREE;
/* True for a pointer parameter. */
bool pointer = false;
/* True for an ordinary array with an unpecified bound. */
bool nobound = false;
/* Create a string representation for the bounds of the array/VLA. */
for (c_declarator *pd = parm->declarator, *next; pd; pd = next)
{
next = pd->declarator;
while (next && next->kind == cdk_attrs)
next = next->declarator;
/* Remember if a pointer has been seen to avoid storing the constant
bound. */
if (pd->kind == cdk_pointer)
pointer = true;
if ((pd->kind == cdk_pointer || pd->kind == cdk_function)
&& (!next || next->kind == cdk_id))
{
/* Do nothing for the common case of a pointer. The fact that
the parameter is one can be deduced from the absence of
an arg spec for it. */
return attrs;
}
if (pd->kind == cdk_id)
{
if (pointer
|| !parm->specs->type
|| TREE_CODE (parm->specs->type) != ARRAY_TYPE
|| !TYPE_DOMAIN (parm->specs->type)
|| !TYPE_MAX_VALUE (TYPE_DOMAIN (parm->specs->type)))
continue;
tree max = TYPE_MAX_VALUE (TYPE_DOMAIN (parm->specs->type));
if (!vbchain
&& TREE_CODE (max) == INTEGER_CST)
{
/* Extract the upper bound from a parameter of an array type
unless the parameter is an ordinary array of unspecified
bound in which case a next iteration of the loop will
exit. */
if (spec.empty () || spec.end ()[-1] != ' ')
{
if (!tree_fits_shwi_p (max))
continue;
/* The upper bound is the value of the largest valid
index. */
HOST_WIDE_INT n = tree_to_shwi (max) + 1;
char buf[40];
sprintf (buf, "%lu", (unsigned long)n);
spec += buf;
}
continue;
}
/* For a VLA typedef, create a list of its variable bounds and
append it in the expected order to VBCHAIN. */
tree tpbnds = NULL_TREE;
for (tree type = parm->specs->type; TREE_CODE (type) == ARRAY_TYPE;
type = TREE_TYPE (type))
{
tree nelts = array_type_nelts (type);
if (TREE_CODE (nelts) != INTEGER_CST)
{
/* Each variable VLA bound is represented by the dollar
sign. */
spec += "$";
tpbnds = tree_cons (NULL_TREE, nelts, tpbnds);
}
}
tpbnds = nreverse (tpbnds);
vbchain = chainon (vbchain, tpbnds);
continue;
}
if (pd->kind != cdk_array)
continue;
if (pd->u.array.vla_unspec_p)
{
/* Each unspecified bound is represented by a star. There
can be any number of these in a declaration (but none in
a definition). */
spec += '*';
continue;
}
tree nelts = pd->u.array.dimen;
if (!nelts)
{
/* Ordinary array of unspecified size. There can be at most
one for the most significant bound. Exit on the next
iteration which determines whether or not PARM is declared
as a pointer or an array. */
nobound = true;
continue;
}
if (TREE_CODE (nelts) == INTEGER_CST)
{
/* Skip all constant bounds except the most significant one.
The interior ones are included in the array type. */
if (next && (next->kind == cdk_array || next->kind == cdk_pointer))
continue;
if (!tree_fits_uhwi_p (nelts))
/* Bail completely on invalid bounds. */
return attrs;
char buf[40];
const char *code = pd->u.array.static_p ? "s" : "";
unsigned HOST_WIDE_INT n = tree_to_uhwi (nelts);
sprintf (buf, "%s%llu", code, (unsigned long long)n);
spec += buf;
break;
}
/* Each variable VLA bound is represented by a dollar sign. */
spec += "$";
vbchain = tree_cons (NULL_TREE, nelts, vbchain);
}
if (spec.empty () && !nobound)
return attrs;
spec.insert (0, "[");
if (nobound)
/* Ordinary array of unspecified bound is represented by a space.
It must be last in the spec. */
spec += ' ';
spec += ']';
tree acsstr = build_string (spec.length () + 1, spec.c_str ());
tree args = tree_cons (NULL_TREE, acsstr, vbchain);
tree name = get_identifier ("arg spec");
return tree_cons (name, args, attrs);
}
/* Given a parsed parameter declaration, decode it into a PARM_DECL
and push that on the current scope. EXPR is a pointer to an
expression that needs to be evaluated for the side effects of array
size expressions in the parameters. */
void
push_parm_decl (const struct c_parm *parm, tree *expr)
{
tree attrs = parm->attrs;
tree decl = grokdeclarator (parm->declarator, parm->specs, PARM, false, NULL,
&attrs, expr, NULL, DEPRECATED_NORMAL);
if (decl && DECL_P (decl))
DECL_SOURCE_LOCATION (decl) = parm->loc;
attrs = get_parm_array_spec (parm, attrs);
decl_attributes (&decl, attrs, 0);
decl = pushdecl (decl);
finish_decl (decl, input_location, NULL_TREE, NULL_TREE, NULL_TREE);
}
/* Mark all the parameter declarations to date as forward decls.
Also diagnose use of this extension. */
void
mark_forward_parm_decls (void)
{
struct c_binding *b;
if (pedantic && !current_scope->warned_forward_parm_decls)
{
pedwarn (input_location, OPT_Wpedantic,
"ISO C forbids forward parameter declarations");
current_scope->warned_forward_parm_decls = true;
}
for (b = current_scope->bindings; b; b = b->prev)
if (TREE_CODE (b->decl) == PARM_DECL)
TREE_ASM_WRITTEN (b->decl) = 1;
}
/* Build a COMPOUND_LITERAL_EXPR. TYPE is the type given in the compound
literal, which may be an incomplete array type completed by the
initializer; INIT is a CONSTRUCTOR at LOC that initializes the compound
literal. NON_CONST is true if the initializers contain something
that cannot occur in a constant expression. If ALIGNAS_ALIGN is nonzero,
it is the (valid) alignment for this compound literal, as specified
with _Alignas. */
tree
build_compound_literal (location_t loc, tree type, tree init, bool non_const,
unsigned int alignas_align)
{
/* We do not use start_decl here because we have a type, not a declarator;
and do not use finish_decl because the decl should be stored inside
the COMPOUND_LITERAL_EXPR rather than added elsewhere as a DECL_EXPR. */
tree decl;
tree complit;
tree stmt;
if (type == error_mark_node
|| init == error_mark_node)
return error_mark_node;
decl = build_decl (loc, VAR_DECL, NULL_TREE, type);
DECL_EXTERNAL (decl) = 0;
TREE_PUBLIC (decl) = 0;
TREE_STATIC (decl) = (current_scope == file_scope);
DECL_CONTEXT (decl) = current_function_decl;
TREE_USED (decl) = 1;
DECL_READ_P (decl) = 1;
DECL_ARTIFICIAL (decl) = 1;
DECL_IGNORED_P (decl) = 1;
C_DECL_COMPOUND_LITERAL_P (decl) = 1;
TREE_TYPE (decl) = type;
c_apply_type_quals_to_decl (TYPE_QUALS (strip_array_types (type)), decl);
if (alignas_align)
{
SET_DECL_ALIGN (decl, alignas_align * BITS_PER_UNIT);
DECL_USER_ALIGN (decl) = 1;
}
store_init_value (loc, decl, init, NULL_TREE);
if (TREE_CODE (type) == ARRAY_TYPE && !COMPLETE_TYPE_P (type))
{
int failure = complete_array_type (&TREE_TYPE (decl),
DECL_INITIAL (decl), true);
/* If complete_array_type returns 3, it means that the
initial value of the compound literal is empty. Allow it. */
gcc_assert (failure == 0 || failure == 3);
type = TREE_TYPE (decl);
TREE_TYPE (DECL_INITIAL (decl)) = type;
}
if (type == error_mark_node || !COMPLETE_TYPE_P (type))
{
c_incomplete_type_error (loc, NULL_TREE, type);
return error_mark_node;
}
if (TREE_STATIC (decl)
&& !verify_type_context (loc, TCTX_STATIC_STORAGE, type))
return error_mark_node;
stmt = build_stmt (DECL_SOURCE_LOCATION (decl), DECL_EXPR, decl);
complit = build1 (COMPOUND_LITERAL_EXPR, type, stmt);
TREE_SIDE_EFFECTS (complit) = 1;
layout_decl (decl, 0);
if (TREE_STATIC (decl))
{
/* This decl needs a name for the assembler output. */
set_compound_literal_name (decl);
DECL_DEFER_OUTPUT (decl) = 1;
DECL_COMDAT (decl) = 1;
pushdecl (decl);
rest_of_decl_compilation (decl, 1, 0);
}
else if (current_function_decl && !current_scope->parm_flag)
pushdecl (decl);
if (non_const)
{
complit = build2 (C_MAYBE_CONST_EXPR, type, NULL, complit);
C_MAYBE_CONST_EXPR_NON_CONST (complit) = 1;
}
return complit;
}
/* Check the type of a compound literal. Here we just check that it
is valid for C++. */
void
check_compound_literal_type (location_t loc, struct c_type_name *type_name)
{
if (warn_cxx_compat
&& (type_name->specs->typespec_kind == ctsk_tagdef
|| type_name->specs->typespec_kind == ctsk_tagfirstref
|| type_name->specs->typespec_kind == ctsk_tagfirstref_attrs))
warning_at (loc, OPT_Wc___compat,
"defining a type in a compound literal is invalid in C++");
}
/* Performs sanity checks on the TYPE and WIDTH of the bit-field NAME,
replacing with appropriate values if they are invalid. */
static void
check_bitfield_type_and_width (location_t loc, tree *type, tree *width,
tree orig_name)
{
tree type_mv;
unsigned int max_width;
unsigned HOST_WIDE_INT w;
const char *name = (orig_name
? identifier_to_locale (IDENTIFIER_POINTER (orig_name))
: _("<anonymous>"));
/* Detect and ignore out of range field width and process valid
field widths. */
if (!INTEGRAL_TYPE_P (TREE_TYPE (*width)))
{
error_at (loc, "bit-field %qs width not an integer constant", name);
*width = integer_one_node;
}
else
{
if (TREE_CODE (*width) != INTEGER_CST)
{
*width = c_fully_fold (*width, false, NULL);
if (TREE_CODE (*width) == INTEGER_CST)
pedwarn (loc, OPT_Wpedantic,
"bit-field %qs width not an integer constant expression",
name);
}
if (TREE_CODE (*width) != INTEGER_CST)
{
error_at (loc, "bit-field %qs width not an integer constant", name);
*width = integer_one_node;
}
constant_expression_warning (*width);
if (tree_int_cst_sgn (*width) < 0)
{
error_at (loc, "negative width in bit-field %qs", name);
*width = integer_one_node;
}
else if (integer_zerop (*width) && orig_name)
{
error_at (loc, "zero width for bit-field %qs", name);
*width = integer_one_node;
}
}
/* Detect invalid bit-field type. */
if (TREE_CODE (*type) != INTEGER_TYPE
&& TREE_CODE (*type) != BOOLEAN_TYPE
&& TREE_CODE (*type) != ENUMERAL_TYPE)
{
error_at (loc, "bit-field %qs has invalid type", name);
*type = unsigned_type_node;
}
if (TYPE_WARN_IF_NOT_ALIGN (*type))
{
error_at (loc, "cannot declare bit-field %qs with %<warn_if_not_aligned%> type",
name);
*type = unsigned_type_node;
}
type_mv = TYPE_MAIN_VARIANT (*type);
if (!in_system_header_at (input_location)
&& type_mv != integer_type_node
&& type_mv != unsigned_type_node
&& type_mv != boolean_type_node)
pedwarn_c90 (loc, OPT_Wpedantic,
"type of bit-field %qs is a GCC extension", name);
max_width = TYPE_PRECISION (*type);
if (compare_tree_int (*width, max_width) > 0)
{
error_at (loc, "width of %qs exceeds its type", name);
w = max_width;
*width = build_int_cst (integer_type_node, w);
}
else
w = tree_to_uhwi (*width);
if (TREE_CODE (*type) == ENUMERAL_TYPE)
{
struct lang_type *lt = TYPE_LANG_SPECIFIC (*type);
if (!lt
|| w < tree_int_cst_min_precision (lt->enum_min, TYPE_SIGN (*type))
|| w < tree_int_cst_min_precision (lt->enum_max, TYPE_SIGN (*type)))
warning_at (loc, 0, "%qs is narrower than values of its type", name);
}
}
/* Print warning about variable length array if necessary. */
static void
warn_variable_length_array (tree name, tree size)
{
if (TREE_CONSTANT (size))
{
if (name)
pedwarn_c90 (input_location, OPT_Wvla,
"ISO C90 forbids array %qE whose size "
"cannot be evaluated", name);
else
pedwarn_c90 (input_location, OPT_Wvla, "ISO C90 forbids array "
"whose size cannot be evaluated");
}
else
{
if (name)
pedwarn_c90 (input_location, OPT_Wvla,
"ISO C90 forbids variable length array %qE", name);
else
pedwarn_c90 (input_location, OPT_Wvla, "ISO C90 forbids variable "
"length array");
}
}
/* Print warning about defaulting to int if necessary. */
static void
warn_defaults_to (location_t location, int opt, const char *gmsgid, ...)
{
diagnostic_info diagnostic;
va_list ap;
rich_location richloc (line_table, location);
va_start (ap, gmsgid);
diagnostic_set_info (&diagnostic, gmsgid, &ap, &richloc,
flag_isoc99 ? DK_PEDWARN : DK_WARNING);
diagnostic.option_index = opt;
diagnostic_report_diagnostic (global_dc, &diagnostic);
va_end (ap);
}
/* Returns the smallest location != UNKNOWN_LOCATION in LOCATIONS,
considering only those c_declspec_words found in LIST, which
must be terminated by cdw_number_of_elements. */
static location_t
smallest_type_quals_location (const location_t *locations,
const c_declspec_word *list)
{
location_t loc = UNKNOWN_LOCATION;
while (*list != cdw_number_of_elements)
{
location_t newloc = locations[*list];
if (loc == UNKNOWN_LOCATION
|| (newloc != UNKNOWN_LOCATION && newloc < loc))
loc = newloc;
list++;
}
return loc;
}
/* Given declspecs and a declarator,
determine the name and type of the object declared
and construct a ..._DECL node for it.
(In one case we can return a ..._TYPE node instead.
For invalid input we sometimes return NULL_TREE.)
DECLSPECS is a c_declspecs structure for the declaration specifiers.
DECL_CONTEXT says which syntactic context this declaration is in:
NORMAL for most contexts. Make a VAR_DECL or FUNCTION_DECL or TYPE_DECL.
FUNCDEF for a function definition. Like NORMAL but a few different
error messages in each case. Return value may be zero meaning
this definition is too screwy to try to parse.
PARM for a parameter declaration (either within a function prototype
or before a function body). Make a PARM_DECL, or return void_type_node.
TYPENAME if for a typename (in a cast or sizeof).
Don't make a DECL node; just return the ..._TYPE node.
FIELD for a struct or union field; make a FIELD_DECL.
INITIALIZED is true if the decl has an initializer.
WIDTH is non-NULL for bit-fields, and is a pointer to an INTEGER_CST node
representing the width of the bit-field.
DECL_ATTRS points to the list of attributes that should be added to this
decl. Any nested attributes that belong on the decl itself will be
added to this list.
If EXPR is not NULL, any expressions that need to be evaluated as
part of evaluating variably modified types will be stored in *EXPR.
If EXPR_CONST_OPERANDS is not NULL, *EXPR_CONST_OPERANDS will be
set to indicate whether operands in *EXPR can be used in constant
expressions.
DEPRECATED_STATE is a deprecated_states value indicating whether
deprecation warnings should be suppressed.
In the TYPENAME case, DECLARATOR is really an absolute declarator.
It may also be so in the PARM case, for a prototype where the
argument type is specified but not the name.
This function is where the complicated C meanings of `static'
and `extern' are interpreted. */
static tree
grokdeclarator (const struct c_declarator *declarator,
struct c_declspecs *declspecs,
enum decl_context decl_context, bool initialized, tree *width,
tree *decl_attrs, tree *expr, bool *expr_const_operands,
enum deprecated_states deprecated_state)
{
tree type = declspecs->type;
bool threadp = declspecs->thread_p;
enum c_storage_class storage_class = declspecs->storage_class;
int constp;
int restrictp;
int volatilep;
int atomicp;
int type_quals = TYPE_UNQUALIFIED;
tree name = NULL_TREE;
bool funcdef_flag = false;
bool funcdef_syntax = false;
bool size_varies = false;
tree decl_attr = declspecs->decl_attr;
int array_ptr_quals = TYPE_UNQUALIFIED;
tree array_ptr_attrs = NULL_TREE;
bool array_parm_static = false;
bool array_parm_vla_unspec_p = false;
tree returned_attrs = NULL_TREE;
tree decl_id_attrs = NULL_TREE;
bool bitfield = width != NULL;
tree element_type;
tree orig_qual_type = NULL;
size_t orig_qual_indirect = 0;
struct c_arg_info *arg_info = 0;
addr_space_t as1, as2, address_space;
location_t loc = UNKNOWN_LOCATION;
tree expr_dummy;
bool expr_const_operands_dummy;
enum c_declarator_kind first_non_attr_kind;
unsigned int alignas_align = 0;
if (TREE_CODE (type) == ERROR_MARK)
return error_mark_node;
if (expr == NULL)
{
expr = &expr_dummy;
expr_dummy = NULL_TREE;
}
if (expr_const_operands == NULL)
expr_const_operands = &expr_const_operands_dummy;
if (declspecs->expr)
{
if (*expr)
*expr = build2 (COMPOUND_EXPR, TREE_TYPE (declspecs->expr), *expr,
declspecs->expr);
else
*expr = declspecs->expr;
}
*expr_const_operands = declspecs->expr_const_operands;
if (decl_context == FUNCDEF)
funcdef_flag = true, decl_context = NORMAL;
/* Look inside a declarator for the name being declared
and get it as an IDENTIFIER_NODE, for an error message. */
{
const struct c_declarator *decl = declarator;
first_non_attr_kind = cdk_attrs;
while (decl)
switch (decl->kind)
{
case cdk_array:
loc = decl->id_loc;
/* FALL THRU. */
case cdk_function:
case cdk_pointer:
funcdef_syntax = (decl->kind == cdk_function);
if (first_non_attr_kind == cdk_attrs)
first_non_attr_kind = decl->kind;
decl = decl->declarator;
break;
case cdk_attrs:
decl = decl->declarator;
break;
case cdk_id:
loc = decl->id_loc;
if (decl->u.id.id)
name = decl->u.id.id;
decl_id_attrs = decl->u.id.attrs;
if (first_non_attr_kind == cdk_attrs)
first_non_attr_kind = decl->kind;
decl = 0;
break;
default:
gcc_unreachable ();
}
if (name == NULL_TREE)
{
gcc_assert (decl_context == PARM
|| decl_context == TYPENAME
|| (decl_context == FIELD
&& declarator->kind == cdk_id));
gcc_assert (!initialized);
}
}
/* A function definition's declarator must have the form of
a function declarator. */
if (funcdef_flag && !funcdef_syntax)
return NULL_TREE;
/* If this looks like a function definition, make it one,
even if it occurs where parms are expected.
Then store_parm_decls will reject it and not use it as a parm. */
if (decl_context == NORMAL && !funcdef_flag && current_scope->parm_flag)
decl_context = PARM;
if (declspecs->deprecated_p && deprecated_state != DEPRECATED_SUPPRESS)
warn_deprecated_use (declspecs->type, declspecs->decl_attr);
if ((decl_context == NORMAL || decl_context == FIELD)
&& current_scope == file_scope
&& variably_modified_type_p (type, NULL_TREE))
{
if (name)
error_at (loc, "variably modified %qE at file scope", name);
else
error_at (loc, "variably modified field at file scope");
type = integer_type_node;
}
size_varies = C_TYPE_VARIABLE_SIZE (type) != 0;
/* Diagnose defaulting to "int". */
if (declspecs->default_int_p && !in_system_header_at (input_location))
{
/* Issue a warning if this is an ISO C 99 program or if
-Wreturn-type and this is a function, or if -Wimplicit;
prefer the former warning since it is more explicit. */
if ((warn_implicit_int || warn_return_type > 0 || flag_isoc99)
&& funcdef_flag)
warn_about_return_type = 1;
else
{
if (name)
warn_defaults_to (loc, OPT_Wimplicit_int,
"type defaults to %<int%> in declaration "
"of %qE", name);
else
warn_defaults_to (loc, OPT_Wimplicit_int,
"type defaults to %<int%> in type name");
}
}
/* Adjust the type if a bit-field is being declared,
-funsigned-bitfields applied and the type is not explicitly
"signed". */
if (bitfield && !flag_signed_bitfields && !declspecs->explicit_signed_p
&& TREE_CODE (type) == INTEGER_TYPE)
type = unsigned_type_for (type);
/* Figure out the type qualifiers for the declaration. There are
two ways a declaration can become qualified. One is something
like `const int i' where the `const' is explicit. Another is
something like `typedef const int CI; CI i' where the type of the
declaration contains the `const'. A third possibility is that
there is a type qualifier on the element type of a typedefed
array type, in which case we should extract that qualifier so
that c_apply_type_quals_to_decl receives the full list of
qualifiers to work with (C90 is not entirely clear about whether
duplicate qualifiers should be diagnosed in this case, but it
seems most appropriate to do so). */
element_type = strip_array_types (type);
constp = declspecs->const_p + TYPE_READONLY (element_type);
restrictp = declspecs->restrict_p + TYPE_RESTRICT (element_type);
volatilep = declspecs->volatile_p + TYPE_VOLATILE (element_type);
atomicp = declspecs->atomic_p + TYPE_ATOMIC (element_type);
as1 = declspecs->address_space;
as2 = TYPE_ADDR_SPACE (element_type);
address_space = ADDR_SPACE_GENERIC_P (as1)? as2 : as1;
if (constp > 1)
pedwarn_c90 (loc, OPT_Wpedantic, "duplicate %<const%>");
if (restrictp > 1)
pedwarn_c90 (loc, OPT_Wpedantic, "duplicate %<restrict%>");
if (volatilep > 1)
pedwarn_c90 (loc, OPT_Wpedantic, "duplicate %<volatile%>");
if (atomicp > 1)
pedwarn_c90 (loc, OPT_Wpedantic, "duplicate %<_Atomic%>");
if (!ADDR_SPACE_GENERIC_P (as1) && !ADDR_SPACE_GENERIC_P (as2) && as1 != as2)
error_at (loc, "conflicting named address spaces (%s vs %s)",
c_addr_space_name (as1), c_addr_space_name (as2));
if ((TREE_CODE (type) == ARRAY_TYPE
|| first_non_attr_kind == cdk_array)
&& TYPE_QUALS (element_type))
{
orig_qual_type = type;
type = TYPE_MAIN_VARIANT (type);
}
type_quals = ((constp ? TYPE_QUAL_CONST : 0)
| (restrictp ? TYPE_QUAL_RESTRICT : 0)
| (volatilep ? TYPE_QUAL_VOLATILE : 0)
| (atomicp ? TYPE_QUAL_ATOMIC : 0)
| ENCODE_QUAL_ADDR_SPACE (address_space));
if (type_quals != TYPE_QUALS (element_type))
orig_qual_type = NULL_TREE;
/* Applying the _Atomic qualifier to an array type (through the use
of typedefs or typeof) must be detected here. If the qualifier
is introduced later, any appearance of applying it to an array is
actually applying it to an element of that array. */
if (declspecs->atomic_p && TREE_CODE (type) == ARRAY_TYPE)
error_at (loc, "%<_Atomic%>-qualified array type");
/* Warn about storage classes that are invalid for certain
kinds of declarations (parameters, typenames, etc.). */
if (funcdef_flag
&& (threadp
|| storage_class == csc_auto
|| storage_class == csc_register
|| storage_class == csc_typedef))
{
if (storage_class == csc_auto)
pedwarn (loc,
(current_scope == file_scope) ? 0 : OPT_Wpedantic,
"function definition declared %<auto%>");
if (storage_class == csc_register)
error_at (loc, "function definition declared %<register%>");
if (storage_class == csc_typedef)
error_at (loc, "function definition declared %<typedef%>");
if (threadp)
error_at (loc, "function definition declared %qs",
declspecs->thread_gnu_p ? "__thread" : "_Thread_local");
threadp = false;
if (storage_class == csc_auto
|| storage_class == csc_register
|| storage_class == csc_typedef)
storage_class = csc_none;
}
else if (decl_context != NORMAL && (storage_class != csc_none || threadp))
{
if (decl_context == PARM && storage_class == csc_register)
;
else
{
switch (decl_context)
{
case FIELD:
if (name)
error_at (loc, "storage class specified for structure "
"field %qE", name);
else
error_at (loc, "storage class specified for structure field");
break;
case PARM:
if (name)
error_at (loc, "storage class specified for parameter %qE",
name);
else
error_at (loc, "storage class specified for unnamed parameter");
break;
default:
error_at (loc, "storage class specified for typename");
break;
}
storage_class = csc_none;
threadp = false;
}
}
else if (storage_class == csc_extern
&& initialized
&& !funcdef_flag)
{
/* 'extern' with initialization is invalid if not at file scope. */
if (current_scope == file_scope)
{
/* It is fine to have 'extern const' when compiling at C
and C++ intersection. */
if (!(warn_cxx_compat && constp))
warning_at (loc, 0, "%qE initialized and declared %<extern%>",
name);
}
else
error_at (loc, "%qE has both %<extern%> and initializer", name);
}
else if (current_scope == file_scope)
{
if (storage_class == csc_auto)
error_at (loc, "file-scope declaration of %qE specifies %<auto%>",
name);
if (pedantic && storage_class == csc_register)
pedwarn (input_location, OPT_Wpedantic,
"file-scope declaration of %qE specifies %<register%>", name);
}
else
{
if (storage_class == csc_extern && funcdef_flag)
error_at (loc, "nested function %qE declared %<extern%>", name);
else if (threadp && storage_class == csc_none)
{
error_at (loc, "function-scope %qE implicitly auto and declared "
"%qs", name,
declspecs->thread_gnu_p ? "__thread" : "_Thread_local");
threadp = false;
}
}
/* Now figure out the structure of the declarator proper.
Descend through it, creating more complex types, until we reach
the declared identifier (or NULL_TREE, in an absolute declarator).
At each stage we maintain an unqualified version of the type
together with any qualifiers that should be applied to it with
c_build_qualified_type; this way, array types including
multidimensional array types are first built up in unqualified
form and then the qualified form is created with
TYPE_MAIN_VARIANT pointing to the unqualified form. */
while (declarator && declarator->kind != cdk_id)
{
if (type == error_mark_node)
{
declarator = declarator->declarator;
continue;
}
/* Each level of DECLARATOR is either a cdk_array (for ...[..]),
a cdk_pointer (for *...),
a cdk_function (for ...(...)),
a cdk_attrs (for nested attributes),
or a cdk_id (for the name being declared
or the place in an absolute declarator
where the name was omitted).
For the last case, we have just exited the loop.
At this point, TYPE is the type of elements of an array,
or for a function to return, or for a pointer to point to.
After this sequence of ifs, TYPE is the type of the
array or function or pointer, and DECLARATOR has had its
outermost layer removed. */
if (array_ptr_quals != TYPE_UNQUALIFIED
|| array_ptr_attrs != NULL_TREE
|| array_parm_static)
{
/* Only the innermost declarator (making a parameter be of
array type which is converted to pointer type)
may have static or type qualifiers. */
error_at (loc, "static or type qualifiers in non-parameter array declarator");
array_ptr_quals = TYPE_UNQUALIFIED;
array_ptr_attrs = NULL_TREE;
array_parm_static = false;
}
switch (declarator->kind)
{
case cdk_attrs:
{
/* A declarator with embedded attributes. */
tree attrs = declarator->u.attrs;
const struct c_declarator *inner_decl;
int attr_flags = 0;
declarator = declarator->declarator;
/* Standard attribute syntax precisely defines what entity
an attribute in each position appertains to, so only
apply laxity about positioning to GNU attribute syntax.
Standard attributes applied to a function or array
declarator apply exactly to that type; standard
attributes applied to the identifier apply to the
declaration rather than to the type, and are specified
using a cdk_id declarator rather than using
cdk_attrs. */
inner_decl = declarator;
while (inner_decl->kind == cdk_attrs)
inner_decl = inner_decl->declarator;
if (!cxx11_attribute_p (attrs))
{
if (inner_decl->kind == cdk_id)
attr_flags |= (int) ATTR_FLAG_DECL_NEXT;
else if (inner_decl->kind == cdk_function)
attr_flags |= (int) ATTR_FLAG_FUNCTION_NEXT;
else if (inner_decl->kind == cdk_array)
attr_flags |= (int) ATTR_FLAG_ARRAY_NEXT;
}
attrs = c_warn_type_attributes (attrs);
returned_attrs = decl_attributes (&type,
chainon (returned_attrs, attrs),
attr_flags);
break;
}
case cdk_array:
{
tree itype = NULL_TREE;
tree size = declarator->u.array.dimen;
/* The index is a signed object `sizetype' bits wide. */
tree index_type = c_common_signed_type (sizetype);
array_ptr_quals = declarator->u.array.quals;
array_ptr_attrs = declarator->u.array.attrs;
array_parm_static = declarator->u.array.static_p;
array_parm_vla_unspec_p = declarator->u.array.vla_unspec_p;
declarator = declarator->declarator;
/* Check for some types that there cannot be arrays of. */
if (VOID_TYPE_P (type))
{
if (name)
error_at (loc, "declaration of %qE as array of voids", name);
else
error_at (loc, "declaration of type name as array of voids");
type = error_mark_node;
}
if (TREE_CODE (type) == FUNCTION_TYPE)
{
if (name)
error_at (loc, "declaration of %qE as array of functions",
name);
else
error_at (loc, "declaration of type name as array of "
"functions");
type = error_mark_node;
}
if (pedantic && !in_system_header_at (input_location)
&& flexible_array_type_p (type))
pedwarn (loc, OPT_Wpedantic,
"invalid use of structure with flexible array member");
if (size == error_mark_node)
type = error_mark_node;
if (type == error_mark_node)
continue;
if (!verify_type_context (loc, TCTX_ARRAY_ELEMENT, type))
{
type = error_mark_node;
continue;
}
/* If size was specified, set ITYPE to a range-type for
that size. Otherwise, ITYPE remains null. finish_decl
may figure it out from an initial value. */
if (size)
{
bool size_maybe_const = true;
bool size_int_const = (TREE_CODE (size) == INTEGER_CST
&& !TREE_OVERFLOW (size));
bool this_size_varies = false;
/* Strip NON_LVALUE_EXPRs since we aren't using as an
lvalue. */
STRIP_TYPE_NOPS (size);
if (!INTEGRAL_TYPE_P (TREE_TYPE (size)))
{
if (name)
error_at (loc, "size of array %qE has non-integer type",
name);
else
error_at (loc,
"size of unnamed array has non-integer type");
size = integer_one_node;
size_int_const = true;
}
/* This can happen with enum forward declaration. */
else if (!COMPLETE_TYPE_P (TREE_TYPE (size)))
{
if (name)
error_at (loc, "size of array %qE has incomplete type",
name);
else
error_at (loc, "size of unnamed array has incomplete "
"type");
size = integer_one_node;
size_int_const = true;
}
size = c_fully_fold (size, false, &size_maybe_const);
if (pedantic && size_maybe_const && integer_zerop (size))
{
if (name)
pedwarn (loc, OPT_Wpedantic,
"ISO C forbids zero-size array %qE", name);
else
pedwarn (loc, OPT_Wpedantic,
"ISO C forbids zero-size array");
}
if (TREE_CODE (size) == INTEGER_CST && size_maybe_const)
{
constant_expression_warning (size);
if (tree_int_cst_sgn (size) < 0)
{
if (name)
error_at (loc, "size of array %qE is negative", name);
else
error_at (loc, "size of unnamed array is negative");
size = integer_one_node;
size_int_const = true;
}
/* Handle a size folded to an integer constant but
not an integer constant expression. */
if (!size_int_const)
{
/* If this is a file scope declaration of an
ordinary identifier, this is invalid code;
diagnosing it here and not subsequently
treating the type as variable-length avoids
more confusing diagnostics later. */
if ((decl_context == NORMAL || decl_context == FIELD)
&& current_scope == file_scope)
pedwarn (input_location, 0,
"variably modified %qE at file scope",
name);
else
this_size_varies = size_varies = true;
warn_variable_length_array (name, size);
}
}
else if ((decl_context == NORMAL || decl_context == FIELD)
&& current_scope == file_scope)
{
error_at (loc, "variably modified %qE at file scope", name);
size = integer_one_node;
}
else
{
/* Make sure the array size remains visibly
nonconstant even if it is (eg) a const variable
with known value. */
this_size_varies = size_varies = true;
warn_variable_length_array (name, size);
if (sanitize_flags_p (SANITIZE_VLA)
&& current_function_decl != NULL_TREE
&& decl_context == NORMAL)
{
/* Evaluate the array size only once. */
size = save_expr (size);
size = c_fully_fold (size, false, NULL);
size = fold_build2 (COMPOUND_EXPR, TREE_TYPE (size),
ubsan_instrument_vla (loc, size),
size);
}
}
if (integer_zerop (size) && !this_size_varies)
{
/* A zero-length array cannot be represented with
an unsigned index type, which is what we'll
get with build_index_type. Create an
open-ended range instead. */
itype = build_range_type (sizetype, size, NULL_TREE);
}
else
{
/* Arrange for the SAVE_EXPR on the inside of the
MINUS_EXPR, which allows the -1 to get folded
with the +1 that happens when building TYPE_SIZE. */
if (size_varies)
size = save_expr (size);
if (this_size_varies && TREE_CODE (size) == INTEGER_CST)
size = build2 (COMPOUND_EXPR, TREE_TYPE (size),
integer_zero_node, size);
/* Compute the maximum valid index, that is, size
- 1. Do the calculation in index_type, so that
if it is a variable the computations will be
done in the proper mode. */
itype = fold_build2_loc (loc, MINUS_EXPR, index_type,
convert (index_type, size),
convert (index_type,
size_one_node));
/* The above overflows when size does not fit
in index_type.
??? While a size of INT_MAX+1 technically shouldn't
cause an overflow (because we subtract 1), handling
this case seems like an unnecessary complication. */
if (TREE_CODE (size) == INTEGER_CST
&& !int_fits_type_p (size, index_type))
{
if (name)
error_at (loc, "size of array %qE is too large",
name);
else
error_at (loc, "size of unnamed array is too large");
type = error_mark_node;
continue;
}
itype = build_index_type (itype);
}
if (this_size_varies)
{
if (TREE_SIDE_EFFECTS (size))
{
if (*expr)
*expr = build2 (COMPOUND_EXPR, TREE_TYPE (size),
*expr, size);
else
*expr = size;
}
*expr_const_operands &= size_maybe_const;
}
}
else if (decl_context == FIELD)
{
bool flexible_array_member = false;
if (array_parm_vla_unspec_p)
/* Field names can in fact have function prototype
scope so [*] is disallowed here through making
the field variably modified, not through being
something other than a declaration with function
prototype scope. */
size_varies = true;
else
{
const struct c_declarator *t = declarator;
while (t->kind == cdk_attrs)
t = t->declarator;
flexible_array_member = (t->kind == cdk_id);
}
if (flexible_array_member
&& !in_system_header_at (input_location))
pedwarn_c90 (loc, OPT_Wpedantic, "ISO C90 does not "
"support flexible array members");
/* ISO C99 Flexible array members are effectively
identical to GCC's zero-length array extension. */
if (flexible_array_member || array_parm_vla_unspec_p)
itype = build_range_type (sizetype, size_zero_node,
NULL_TREE);
}
else if (decl_context == PARM)
{
if (array_parm_vla_unspec_p)
{
itype = build_range_type (sizetype, size_zero_node, NULL_TREE);
size_varies = true;
}
}
else if (decl_context == TYPENAME)
{
if (array_parm_vla_unspec_p)
{
/* C99 6.7.5.2p4 */
warning (0, "%<[*]%> not in a declaration");
/* We use this to avoid messing up with incomplete
array types of the same type, that would
otherwise be modified below. */
itype = build_range_type (sizetype, size_zero_node,
NULL_TREE);
size_varies = true;
}
}
/* Complain about arrays of incomplete types. */
if (!COMPLETE_TYPE_P (type))
{
error_at (loc, "array type has incomplete element type %qT",
type);
/* See if we can be more helpful. */
if (TREE_CODE (type) == ARRAY_TYPE)
{
if (name)
inform (loc, "declaration of %qE as multidimensional "
"array must have bounds for all dimensions "
"except the first", name);
else
inform (loc, "declaration of multidimensional array "
"must have bounds for all dimensions except "
"the first");
}
type = error_mark_node;
}
else
/* When itype is NULL, a shared incomplete array type is
returned for all array of a given type. Elsewhere we
make sure we don't complete that type before copying
it, but here we want to make sure we don't ever
modify the shared type, so we gcc_assert (itype)
below. */
{
addr_space_t as = DECODE_QUAL_ADDR_SPACE (type_quals);
if (!ADDR_SPACE_GENERIC_P (as) && as != TYPE_ADDR_SPACE (type))
type = build_qualified_type (type,
ENCODE_QUAL_ADDR_SPACE (as));
type = build_array_type (type, itype);
}
if (type != error_mark_node)
{
if (size_varies)
{
/* It is ok to modify type here even if itype is
NULL: if size_varies, we're in a
multi-dimensional array and the inner type has
variable size, so the enclosing shared array type
must too. */
if (size && TREE_CODE (size) == INTEGER_CST)
type
= build_distinct_type_copy (TYPE_MAIN_VARIANT (type));
C_TYPE_VARIABLE_SIZE (type) = 1;
}
/* The GCC extension for zero-length arrays differs from
ISO flexible array members in that sizeof yields
zero. */
if (size && integer_zerop (size))
{
gcc_assert (itype);
type = build_distinct_type_copy (TYPE_MAIN_VARIANT (type));
TYPE_SIZE (type) = bitsize_zero_node;
TYPE_SIZE_UNIT (type) = size_zero_node;
SET_TYPE_STRUCTURAL_EQUALITY (type);
}
if (array_parm_vla_unspec_p)
{
gcc_assert (itype);
/* The type is complete. C99 6.7.5.2p4 */
type = build_distinct_type_copy (TYPE_MAIN_VARIANT (type));
TYPE_SIZE (type) = bitsize_zero_node;
TYPE_SIZE_UNIT (type) = size_zero_node;
SET_TYPE_STRUCTURAL_EQUALITY (type);
}
if (!valid_array_size_p (loc, type, name))
type = error_mark_node;
}
if (decl_context != PARM
&& (array_ptr_quals != TYPE_UNQUALIFIED
|| array_ptr_attrs != NULL_TREE
|| array_parm_static))
{
error_at (loc, "static or type qualifiers in non-parameter "
"array declarator");
array_ptr_quals = TYPE_UNQUALIFIED;
array_ptr_attrs = NULL_TREE;
array_parm_static = false;
}
orig_qual_indirect++;
break;
}
case cdk_function:
{
/* Say it's a definition only for the declarator closest
to the identifier, apart possibly from some
attributes. */
bool really_funcdef = false;
tree arg_types;
orig_qual_type = NULL_TREE;
if (funcdef_flag)
{
const struct c_declarator *t = declarator->declarator;
while (t->kind == cdk_attrs)
t = t->declarator;
really_funcdef = (t->kind == cdk_id);
}
/* Declaring a function type. Make sure we have a valid
type for the function to return. */
if (type == error_mark_node)
continue;
size_varies = false;
/* Warn about some types functions can't return. */
if (TREE_CODE (type) == FUNCTION_TYPE)
{
if (name)
error_at (loc, "%qE declared as function returning a "
"function", name);
else
error_at (loc, "type name declared as function "
"returning a function");
type = integer_type_node;
}
if (TREE_CODE (type) == ARRAY_TYPE)
{
if (name)
error_at (loc, "%qE declared as function returning an array",
name);
else
error_at (loc, "type name declared as function returning "
"an array");
type = integer_type_node;
}
/* Construct the function type and go to the next
inner layer of declarator. */
arg_info = declarator->u.arg_info;
arg_types = grokparms (arg_info, really_funcdef);
/* Type qualifiers before the return type of the function
qualify the return type, not the function type. */
if (type_quals)
{
const enum c_declspec_word ignored_quals_list[] =
{
cdw_const, cdw_volatile, cdw_restrict, cdw_address_space,
cdw_atomic, cdw_number_of_elements
};
location_t specs_loc
= smallest_type_quals_location (declspecs->locations,
ignored_quals_list);
if (specs_loc == UNKNOWN_LOCATION)
specs_loc = declspecs->locations[cdw_typedef];
if (specs_loc == UNKNOWN_LOCATION)
specs_loc = loc;
/* Type qualifiers on a function return type are
normally permitted by the standard but have no
effect, so give a warning at -Wreturn-type.
Qualifiers on a void return type are banned on
function definitions in ISO C; GCC used to used
them for noreturn functions. The resolution of C11
DR#423 means qualifiers (other than _Atomic) are
actually removed from the return type when
determining the function type. */
int quals_used = type_quals;
if (flag_isoc11)
quals_used &= TYPE_QUAL_ATOMIC;
if (quals_used && VOID_TYPE_P (type) && really_funcdef)
pedwarn (specs_loc, 0,
"function definition has qualified void "
"return type");
else
warning_at (specs_loc, OPT_Wignored_qualifiers,
"type qualifiers ignored on function "
"return type");
/* Ensure an error for restrict on invalid types; the
DR#423 resolution is not entirely clear about
this. */
if (flag_isoc11
&& (type_quals & TYPE_QUAL_RESTRICT)
&& (!POINTER_TYPE_P (type)
|| !C_TYPE_OBJECT_OR_INCOMPLETE_P (TREE_TYPE (type))))
error_at (loc, "invalid use of %<restrict%>");
type = c_build_qualified_type (type, quals_used);
}
type_quals = TYPE_UNQUALIFIED;
type = build_function_type (type, arg_types);
declarator = declarator->declarator;
/* Set the TYPE_CONTEXTs for each tagged type which is local to
the formal parameter list of this FUNCTION_TYPE to point to
the FUNCTION_TYPE node itself. */
{
c_arg_tag *tag;
unsigned ix;
FOR_EACH_VEC_SAFE_ELT_REVERSE (arg_info->tags, ix, tag)
TYPE_CONTEXT (tag->type) = type;
}
break;
}
case cdk_pointer:
{
/* Merge any constancy or volatility into the target type
for the pointer. */
if ((type_quals & TYPE_QUAL_ATOMIC)
&& TREE_CODE (type) == FUNCTION_TYPE)
{
error_at (loc,
"%<_Atomic%>-qualified function type");
type_quals &= ~TYPE_QUAL_ATOMIC;
}
else if (pedantic && TREE_CODE (type) == FUNCTION_TYPE
&& type_quals)
pedwarn (loc, OPT_Wpedantic,
"ISO C forbids qualified function types");
if (type_quals)
type = c_build_qualified_type (type, type_quals, orig_qual_type,
orig_qual_indirect);
orig_qual_type = NULL_TREE;
size_varies = false;
/* When the pointed-to type involves components of variable size,
care must be taken to ensure that the size evaluation code is
emitted early enough to dominate all the possible later uses
and late enough for the variables on which it depends to have
been assigned.
This is expected to happen automatically when the pointed-to
type has a name/declaration of it's own, but special attention
is required if the type is anonymous.
We attach an artificial TYPE_DECL to such pointed-to type
and arrange for it to be included in a DECL_EXPR. This
forces the sizes evaluation at a safe point and ensures it
is not deferred until e.g. within a deeper conditional context.
PARM contexts have no enclosing statement list that
can hold the DECL_EXPR, so we need to use a BIND_EXPR
instead, and add it to the list of expressions that
need to be evaluated.
TYPENAME contexts do have an enclosing statement list,
but it would be incorrect to use it, as the size should
only be evaluated if the containing expression is
evaluated. We might also be in the middle of an
expression with side effects on the pointed-to type size
"arguments" prior to the pointer declaration point and
the fake TYPE_DECL in the enclosing context would force
the size evaluation prior to the side effects. We therefore
use BIND_EXPRs in TYPENAME contexts too. */
if (!TYPE_NAME (type)
&& variably_modified_type_p (type, NULL_TREE))
{
tree bind = NULL_TREE;
if (decl_context == TYPENAME || decl_context == PARM)
{
bind = build3 (BIND_EXPR, void_type_node, NULL_TREE,
NULL_TREE, NULL_TREE);
TREE_SIDE_EFFECTS (bind) = 1;
BIND_EXPR_BODY (bind) = push_stmt_list ();
push_scope ();
}
tree decl = build_decl (loc, TYPE_DECL, NULL_TREE, type);
DECL_ARTIFICIAL (decl) = 1;
pushdecl (decl);
finish_decl (decl, loc, NULL_TREE, NULL_TREE, NULL_TREE);
TYPE_NAME (type) = decl;
if (bind)
{
pop_scope ();
BIND_EXPR_BODY (bind)
= pop_stmt_list (BIND_EXPR_BODY (bind));
if (*expr)
*expr = build2 (COMPOUND_EXPR, void_type_node, *expr,
bind);
else
*expr = bind;
}
}
type = c_build_pointer_type (type);
/* Process type qualifiers (such as const or volatile)
that were given inside the `*'. */
type_quals = declarator->u.pointer_quals;
declarator = declarator->declarator;
break;
}
default:
gcc_unreachable ();
}
}
*decl_attrs = chainon (returned_attrs, *decl_attrs);
*decl_attrs = chainon (decl_id_attrs, *decl_attrs);
/* Now TYPE has the actual type, apart from any qualifiers in
TYPE_QUALS. */
/* Warn about address space used for things other than static memory or
pointers. */
address_space = DECODE_QUAL_ADDR_SPACE (type_quals);
if (!ADDR_SPACE_GENERIC_P (address_space))
{
if (decl_context == NORMAL)
{
switch (storage_class)
{
case csc_auto:
error ("%qs combined with %<auto%> qualifier for %qE",
c_addr_space_name (address_space), name);
break;
case csc_register:
error ("%qs combined with %<register%> qualifier for %qE",
c_addr_space_name (address_space), name);
break;
case csc_none:
if (current_function_scope)
{
error ("%qs specified for auto variable %qE",
c_addr_space_name (address_space), name);
break;
}
break;
case csc_static:
case csc_extern:
case csc_typedef:
break;
default:
gcc_unreachable ();
}
}
else if (decl_context == PARM && TREE_CODE (type) != ARRAY_TYPE)
{
if (name)
error ("%qs specified for parameter %qE",
c_addr_space_name (address_space), name);
else
error ("%qs specified for unnamed parameter",
c_addr_space_name (address_space));
}
else if (decl_context == FIELD)
{
if (name)
error ("%qs specified for structure field %qE",
c_addr_space_name (address_space), name);
else
error ("%qs specified for structure field",
c_addr_space_name (address_space));
}
}
/* Check the type and width of a bit-field. */
if (bitfield)
{
check_bitfield_type_and_width (loc, &type, width, name);
/* C11 makes it implementation-defined (6.7.2.1#5) whether
atomic types are permitted for bit-fields; we have no code to
make bit-field accesses atomic, so disallow them. */
if (type_quals & TYPE_QUAL_ATOMIC)
{
if (name)
error_at (loc, "bit-field %qE has atomic type", name);
else
error_at (loc, "bit-field has atomic type");
type_quals &= ~TYPE_QUAL_ATOMIC;
}
}
/* Reject invalid uses of _Alignas. */
if (declspecs->alignas_p)
{
if (storage_class == csc_typedef)
error_at (loc, "alignment specified for typedef %qE", name);
else if (storage_class == csc_register)
error_at (loc, "alignment specified for %<register%> object %qE",
name);
else if (decl_context == PARM)
{
if (name)
error_at (loc, "alignment specified for parameter %qE", name);
else
error_at (loc, "alignment specified for unnamed parameter");
}
else if (bitfield)
{
if (name)
error_at (loc, "alignment specified for bit-field %qE", name);
else
error_at (loc, "alignment specified for unnamed bit-field");
}
else if (TREE_CODE (type) == FUNCTION_TYPE)
error_at (loc, "alignment specified for function %qE", name);
else if (declspecs->align_log != -1 && TYPE_P (type))
{
alignas_align = 1U << declspecs->align_log;
if (alignas_align < min_align_of_type (type))
{
if (name)
error_at (loc, "%<_Alignas%> specifiers cannot reduce "
"alignment of %qE", name);
else
error_at (loc, "%<_Alignas%> specifiers cannot reduce "
"alignment of unnamed field");
alignas_align = 0;
}
}
}
/* If this is declaring a typedef name, return a TYPE_DECL. */
if (storage_class == csc_typedef)
{
tree decl;
if ((type_quals & TYPE_QUAL_ATOMIC)
&& TREE_CODE (type) == FUNCTION_TYPE)
{
error_at (loc,
"%<_Atomic%>-qualified function type");
type_quals &= ~TYPE_QUAL_ATOMIC;
}
else if (pedantic && TREE_CODE (type) == FUNCTION_TYPE
&& type_quals)
pedwarn (loc, OPT_Wpedantic,
"ISO C forbids qualified function types");
if (type_quals)
type = c_build_qualified_type (type, type_quals, orig_qual_type,
orig_qual_indirect);
decl = build_decl (declarator->id_loc,
TYPE_DECL, declarator->u.id.id, type);
if (declspecs->explicit_signed_p)
C_TYPEDEF_EXPLICITLY_SIGNED (decl) = 1;
if (declspecs->inline_p)
pedwarn (loc, 0,"typedef %q+D declared %<inline%>", decl);
if (declspecs->noreturn_p)
pedwarn (loc, 0,"typedef %q+D declared %<_Noreturn%>", decl);
if (warn_cxx_compat && declarator->u.id.id != NULL_TREE)
{
struct c_binding *b = I_TAG_BINDING (declarator->u.id.id);
if (b != NULL
&& b->decl != NULL_TREE
&& (B_IN_CURRENT_SCOPE (b)
|| (current_scope == file_scope && B_IN_EXTERNAL_SCOPE (b)))
&& TYPE_MAIN_VARIANT (b->decl) != TYPE_MAIN_VARIANT (type))
{
auto_diagnostic_group d;
if (warning_at (declarator->id_loc, OPT_Wc___compat,
("using %qD as both a typedef and a tag is "
"invalid in C++"), decl)
&& b->locus != UNKNOWN_LOCATION)
inform (b->locus, "originally defined here");
}
}
return decl;
}
/* If this is a type name (such as, in a cast or sizeof),
compute the type and return it now. */
if (decl_context == TYPENAME)
{
/* Note that the grammar rejects storage classes in typenames
and fields. */
gcc_assert (storage_class == csc_none && !threadp
&& !declspecs->inline_p && !declspecs->noreturn_p);
if ((type_quals & TYPE_QUAL_ATOMIC)
&& TREE_CODE (type) == FUNCTION_TYPE)
{
error_at (loc,
"%<_Atomic%>-qualified function type");
type_quals &= ~TYPE_QUAL_ATOMIC;
}
else if (pedantic && TREE_CODE (type) == FUNCTION_TYPE
&& type_quals)
pedwarn (loc, OPT_Wpedantic,
"ISO C forbids const or volatile function types");
if (type_quals)
type = c_build_qualified_type (type, type_quals, orig_qual_type,
orig_qual_indirect);
return type;
}
if (pedantic && decl_context == FIELD
&& variably_modified_type_p (type, NULL_TREE))
{
/* C99 6.7.2.1p8 */
pedwarn (loc, OPT_Wpedantic, "a member of a structure or union cannot "
"have a variably modified type");
}
/* Aside from typedefs and type names (handle above),
`void' at top level (not within pointer)
is allowed only in public variables.
We don't complain about parms either, but that is because
a better error message can be made later. */
if (VOID_TYPE_P (type) && decl_context != PARM
&& !((decl_context != FIELD && TREE_CODE (type) != FUNCTION_TYPE)
&& (storage_class == csc_extern
|| (current_scope == file_scope
&& !(storage_class == csc_static
|| storage_class == csc_register)))))
{
error_at (loc, "variable or field %qE declared void", name);
type = integer_type_node;
}
/* Now create the decl, which may be a VAR_DECL, a PARM_DECL
or a FUNCTION_DECL, depending on DECL_CONTEXT and TYPE. */
{
tree decl;
if (decl_context == PARM)
{
tree promoted_type;
bool array_parameter_p = false;
/* A parameter declared as an array of T is really a pointer to T.
One declared as a function is really a pointer to a function. */
if (TREE_CODE (type) == ARRAY_TYPE)
{
/* Transfer const-ness of array into that of type pointed to. */
type = TREE_TYPE (type);
if (orig_qual_type != NULL_TREE)
{
if (orig_qual_indirect == 0)
orig_qual_type = TREE_TYPE (orig_qual_type);
else
orig_qual_indirect--;
}
if (type_quals)
type = c_build_qualified_type (type, type_quals, orig_qual_type,
orig_qual_indirect);
type = c_build_pointer_type (type);
type_quals = array_ptr_quals;
if (type_quals)
type = c_build_qualified_type (type, type_quals);
/* We don't yet implement attributes in this context. */
if (array_ptr_attrs != NULL_TREE)
warning_at (loc, OPT_Wattributes,
"attributes in parameter array declarator ignored");
size_varies = false;
array_parameter_p = true;
}
else if (TREE_CODE (type) == FUNCTION_TYPE)
{
if (type_quals & TYPE_QUAL_ATOMIC)
{
error_at (loc,
"%<_Atomic%>-qualified function type");
type_quals &= ~TYPE_QUAL_ATOMIC;
}
else if (type_quals)
pedwarn (loc, OPT_Wpedantic,
"ISO C forbids qualified function types");
if (type_quals)
type = c_build_qualified_type (type, type_quals);
type = c_build_pointer_type (type);
type_quals = TYPE_UNQUALIFIED;
}
else if (type_quals)
type = c_build_qualified_type (type, type_quals);
decl = build_decl (declarator->id_loc,
PARM_DECL, declarator->u.id.id, type);
if (size_varies)
C_DECL_VARIABLE_SIZE (decl) = 1;
C_ARRAY_PARAMETER (decl) = array_parameter_p;
/* Compute the type actually passed in the parmlist,
for the case where there is no prototype.
(For example, shorts and chars are passed as ints.)
When there is a prototype, this is overridden later. */
if (type == error_mark_node)
promoted_type = type;
else
promoted_type = c_type_promotes_to (type);
DECL_ARG_TYPE (decl) = promoted_type;
if (declspecs->inline_p)
pedwarn (loc, 0, "parameter %q+D declared %<inline%>", decl);
if (declspecs->noreturn_p)
pedwarn (loc, 0, "parameter %q+D declared %<_Noreturn%>", decl);
}
else if (decl_context == FIELD)
{
/* Note that the grammar rejects storage classes in typenames
and fields. */
gcc_assert (storage_class == csc_none && !threadp
&& !declspecs->inline_p && !declspecs->noreturn_p);
/* Structure field. It may not be a function. */
if (TREE_CODE (type) == FUNCTION_TYPE)
{
error_at (loc, "field %qE declared as a function", name);
type = build_pointer_type (type);
}
else if (TREE_CODE (type) != ERROR_MARK
&& !COMPLETE_OR_UNBOUND_ARRAY_TYPE_P (type))
{
if (name)
error_at (loc, "field %qE has incomplete type", name);
else
error_at (loc, "unnamed field has incomplete type");
type = error_mark_node;
}
else if (TREE_CODE (type) == ARRAY_TYPE
&& TYPE_DOMAIN (type) == NULL_TREE)
{
/* We have a flexible array member through a typedef.
Set suitable range. Whether this is a correct position
for a flexible array member will be determined elsewhere. */
if (!in_system_header_at (input_location))
pedwarn_c90 (loc, OPT_Wpedantic, "ISO C90 does not "
"support flexible array members");
type = build_distinct_type_copy (TYPE_MAIN_VARIANT (type));
TYPE_DOMAIN (type) = build_range_type (sizetype, size_zero_node,
NULL_TREE);
if (orig_qual_indirect == 0)
orig_qual_type = NULL_TREE;
}
if (type != error_mark_node
&& !verify_type_context (loc, TCTX_FIELD, type))
type = error_mark_node;
type = c_build_qualified_type (type, type_quals, orig_qual_type,
orig_qual_indirect);
decl = build_decl (declarator->id_loc,
FIELD_DECL, declarator->u.id.id, type);
DECL_NONADDRESSABLE_P (decl) = bitfield;
if (bitfield && !declarator->u.id.id)
{
TREE_NO_WARNING (decl) = 1;
DECL_PADDING_P (decl) = 1;
}
if (size_varies)
C_DECL_VARIABLE_SIZE (decl) = 1;
}
else if (TREE_CODE (type) == FUNCTION_TYPE)
{
if (storage_class == csc_register || threadp)
{
error_at (loc, "invalid storage class for function %qE", name);
}
else if (current_scope != file_scope)
{
/* Function declaration not at file scope. Storage
classes other than `extern' are not allowed, C99
6.7.1p5, and `extern' makes no difference. However,
GCC allows 'auto', perhaps with 'inline', to support
nested functions. */
if (storage_class == csc_auto)
pedwarn (loc, OPT_Wpedantic,
"invalid storage class for function %qE", name);
else if (storage_class == csc_static)
{
error_at (loc, "invalid storage class for function %qE", name);
if (funcdef_flag)
storage_class = declspecs->storage_class = csc_none;
else
return NULL_TREE;
}
}
decl = build_decl (declarator->id_loc,
FUNCTION_DECL, declarator->u.id.id, type);
decl = build_decl_attribute_variant (decl, decl_attr);
if (type_quals & TYPE_QUAL_ATOMIC)
{
error_at (loc,
"%<_Atomic%>-qualified function type");
type_quals &= ~TYPE_QUAL_ATOMIC;
}
else if (pedantic && type_quals && !DECL_IN_SYSTEM_HEADER (decl))
pedwarn (loc, OPT_Wpedantic,
"ISO C forbids qualified function types");
/* Every function declaration is an external reference
(DECL_EXTERNAL) except for those which are not at file
scope and are explicitly declared "auto". This is
forbidden by standard C (C99 6.7.1p5) and is interpreted by
GCC to signify a forward declaration of a nested function. */
if (storage_class == csc_auto && current_scope != file_scope)
DECL_EXTERNAL (decl) = 0;
/* In C99, a function which is declared 'inline' with 'extern'
is not an external reference (which is confusing). It
means that the later definition of the function must be output
in this file, C99 6.7.4p6. In GNU C89, a function declared
'extern inline' is an external reference. */
else if (declspecs->inline_p && storage_class != csc_static)
DECL_EXTERNAL (decl) = ((storage_class == csc_extern)
== flag_gnu89_inline);
else
DECL_EXTERNAL (decl) = !initialized;
/* Record absence of global scope for `static' or `auto'. */
TREE_PUBLIC (decl)
= !(storage_class == csc_static || storage_class == csc_auto);
/* For a function definition, record the argument information
block where store_parm_decls will look for it. */
if (funcdef_flag)
current_function_arg_info = arg_info;
if (declspecs->default_int_p)
C_FUNCTION_IMPLICIT_INT (decl) = 1;
/* Record presence of `inline' and `_Noreturn', if it is
reasonable. */
if (flag_hosted && MAIN_NAME_P (declarator->u.id.id))
{
if (declspecs->inline_p)
pedwarn (loc, 0, "cannot inline function %<main%>");
if (declspecs->noreturn_p)
pedwarn (loc, 0, "%<main%> declared %<_Noreturn%>");
}
else
{
if (declspecs->inline_p)
/* Record that the function is declared `inline'. */
DECL_DECLARED_INLINE_P (decl) = 1;
if (declspecs->noreturn_p)
{
if (flag_isoc99)
pedwarn_c99 (loc, OPT_Wpedantic,
"ISO C99 does not support %<_Noreturn%>");
else
pedwarn_c99 (loc, OPT_Wpedantic,
"ISO C90 does not support %<_Noreturn%>");
TREE_THIS_VOLATILE (decl) = 1;
}
}
}
else
{
/* It's a variable. */
/* An uninitialized decl with `extern' is a reference. */
int extern_ref = !initialized && storage_class == csc_extern;
type = c_build_qualified_type (type, type_quals, orig_qual_type,
orig_qual_indirect);
/* C99 6.2.2p7: It is invalid (compile-time undefined
behavior) to create an 'extern' declaration for a
variable if there is a global declaration that is
'static' and the global declaration is not visible.
(If the static declaration _is_ currently visible,
the 'extern' declaration is taken to refer to that decl.) */
if (extern_ref && current_scope != file_scope)
{
tree global_decl = identifier_global_value (declarator->u.id.id);
tree visible_decl = lookup_name (declarator->u.id.id);
if (global_decl
&& global_decl != visible_decl
&& VAR_P (global_decl)
&& !TREE_PUBLIC (global_decl))
error_at (loc, "variable previously declared %<static%> "
"redeclared %<extern%>");
}
decl = build_decl (declarator->id_loc,
VAR_DECL, declarator->u.id.id, type);
if (size_varies)
C_DECL_VARIABLE_SIZE (decl) = 1;
if (declspecs->inline_p)
pedwarn (loc, 0, "variable %q+D declared %<inline%>", decl);
if (declspecs->noreturn_p)
pedwarn (loc, 0, "variable %q+D declared %<_Noreturn%>", decl);
/* At file scope, an initialized extern declaration may follow
a static declaration. In that case, DECL_EXTERNAL will be
reset later in start_decl. */
DECL_EXTERNAL (decl) = (storage_class == csc_extern);
/* At file scope, the presence of a `static' or `register' storage
class specifier, or the absence of all storage class specifiers
makes this declaration a definition (perhaps tentative). Also,
the absence of `static' makes it public. */
if (current_scope == file_scope)
{
TREE_PUBLIC (decl) = storage_class != csc_static;
TREE_STATIC (decl) = !extern_ref;
}
/* Not at file scope, only `static' makes a static definition. */
else
{
TREE_STATIC (decl) = (storage_class == csc_static);
TREE_PUBLIC (decl) = extern_ref;
}
if (threadp)
set_decl_tls_model (decl, decl_default_tls_model (decl));
}
if ((storage_class == csc_extern
|| (storage_class == csc_none
&& TREE_CODE (type) == FUNCTION_TYPE
&& !funcdef_flag))
&& variably_modified_type_p (type, NULL_TREE))
{
/* C99 6.7.5.2p2 */
if (TREE_CODE (type) == FUNCTION_TYPE)
error_at (loc, "non-nested function with variably modified type");
else
error_at (loc, "object with variably modified type must have "
"no linkage");
}
/* For nested functions disqualify ones taking VLAs by value
from inlining since the middle-end cannot deal with this.
??? We should arrange for those to be passed by reference
with emitting the copy on the caller side in the frontend. */
if (storage_class == csc_none
&& TREE_CODE (type) == FUNCTION_TYPE)
for (tree al = TYPE_ARG_TYPES (type); al; al = TREE_CHAIN (al))
{
tree arg = TREE_VALUE (al);
if (arg != error_mark_node
&& C_TYPE_VARIABLE_SIZE (arg))
{
DECL_UNINLINABLE (decl) = 1;
break;
}
}
/* Record `register' declaration for warnings on &
and in case doing stupid register allocation. */
if (storage_class == csc_register)
{
C_DECL_REGISTER (decl) = 1;
DECL_REGISTER (decl) = 1;
}
/* Record constancy and volatility. */
c_apply_type_quals_to_decl (type_quals, decl);
/* Apply _Alignas specifiers. */
if (alignas_align)
{
SET_DECL_ALIGN (decl, alignas_align * BITS_PER_UNIT);
DECL_USER_ALIGN (decl) = 1;
}
/* If a type has volatile components, it should be stored in memory.
Otherwise, the fact that those components are volatile
will be ignored, and would even crash the compiler.
Of course, this only makes sense on VAR,PARM, and RESULT decl's. */
if (C_TYPE_FIELDS_VOLATILE (TREE_TYPE (decl))
&& (VAR_P (decl) || TREE_CODE (decl) == PARM_DECL
|| TREE_CODE (decl) == RESULT_DECL))
{
/* It is not an error for a structure with volatile fields to
be declared register, but reset DECL_REGISTER since it
cannot actually go in a register. */
int was_reg = C_DECL_REGISTER (decl);
C_DECL_REGISTER (decl) = 0;
DECL_REGISTER (decl) = 0;
c_mark_addressable (decl);
C_DECL_REGISTER (decl) = was_reg;
}
/* This is the earliest point at which we might know the assembler
name of a variable. Thus, if it's known before this, die horribly. */
gcc_assert (!HAS_DECL_ASSEMBLER_NAME_P (decl)
|| !DECL_ASSEMBLER_NAME_SET_P (decl));
if (warn_cxx_compat
&& VAR_P (decl)
&& TREE_PUBLIC (decl)
&& TREE_STATIC (decl)
&& (RECORD_OR_UNION_TYPE_P (TREE_TYPE (decl))
|| TREE_CODE (TREE_TYPE (decl)) == ENUMERAL_TYPE)
&& TYPE_NAME (TREE_TYPE (decl)) == NULL_TREE)
warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wc___compat,
("non-local variable %qD with anonymous type is "
"questionable in C++"),
decl);
return decl;
}
}
/* Decode the parameter-list info for a function type or function definition.
The argument is the value returned by `get_parm_info' (or made in c-parse.c
if there is an identifier list instead of a parameter decl list).
These two functions are separate because when a function returns
or receives functions then each is called multiple times but the order
of calls is different. The last call to `grokparms' is always the one
that contains the formal parameter names of a function definition.
Return a list of arg types to use in the FUNCTION_TYPE for this function.
FUNCDEF_FLAG is true for a function definition, false for
a mere declaration. A nonempty identifier-list gets an error message
when FUNCDEF_FLAG is false. */
static tree
grokparms (struct c_arg_info *arg_info, bool funcdef_flag)
{
tree arg_types = arg_info->types;
if (funcdef_flag && arg_info->had_vla_unspec)
{
/* A function definition isn't function prototype scope C99 6.2.1p4. */
/* C99 6.7.5.2p4 */
error ("%<[*]%> not allowed in other than function prototype scope");
}
if (arg_types == NULL_TREE && !funcdef_flag
&& !in_system_header_at (input_location))
warning (OPT_Wstrict_prototypes,
"function declaration isn%'t a prototype");
if (arg_types == error_mark_node)
/* Don't set TYPE_ARG_TYPES in this case. */
return NULL_TREE;
else if (arg_types && TREE_CODE (TREE_VALUE (arg_types)) == IDENTIFIER_NODE)
{
if (!funcdef_flag)
{
pedwarn (input_location, 0, "parameter names (without types) in "
"function declaration");
arg_info->parms = NULL_TREE;
}
else
arg_info->parms = arg_info->types;
arg_info->types = NULL_TREE;
return NULL_TREE;
}
else
{
tree parm, type, typelt;
unsigned int parmno;
/* In C2X, convert () in a function definition to (void). */
if (flag_isoc2x
&& funcdef_flag
&& !arg_types
&& !arg_info->parms)
arg_types = arg_info->types = void_list_node;
/* If there is a parameter of incomplete type in a definition,
this is an error. In a declaration this is valid, and a
struct or union type may be completed later, before any calls
or definition of the function. In the case where the tag was
first declared within the parameter list, a warning has
already been given. If a parameter has void type, then
however the function cannot be defined or called, so
warn. */
for (parm = arg_info->parms, typelt = arg_types, parmno = 1;
parm;
parm = DECL_CHAIN (parm), typelt = TREE_CHAIN (typelt), parmno++)
{
type = TREE_VALUE (typelt);
if (type == error_mark_node)
continue;
if (!COMPLETE_TYPE_P (type))
{
if (funcdef_flag)
{
if (DECL_NAME (parm))
error_at (input_location,
"parameter %u (%q+D) has incomplete type",
parmno, parm);
else
error_at (DECL_SOURCE_LOCATION (parm),
"parameter %u has incomplete type",
parmno);
TREE_VALUE (typelt) = error_mark_node;
TREE_TYPE (parm) = error_mark_node;
arg_types = NULL_TREE;
}
else if (VOID_TYPE_P (type))
{
if (DECL_NAME (parm))
warning_at (input_location, 0,
"parameter %u (%q+D) has void type",
parmno, parm);
else
warning_at (DECL_SOURCE_LOCATION (parm), 0,
"parameter %u has void type",
parmno);
}
}
if (DECL_NAME (parm) && TREE_USED (parm))
warn_if_shadowing (parm);
}
return arg_types;
}
}
/* Allocate and initialize a c_arg_info structure from the parser's
obstack. */
struct c_arg_info *
build_arg_info (void)
{
struct c_arg_info *ret = XOBNEW (&parser_obstack, struct c_arg_info);
ret->parms = NULL_TREE;
ret->tags = NULL;
ret->types = NULL_TREE;
ret->others = NULL_TREE;
ret->pending_sizes = NULL;
ret->had_vla_unspec = 0;
return ret;
}
/* Take apart the current scope and return a c_arg_info structure with
info on a parameter list just parsed.
This structure is later fed to 'grokparms' and 'store_parm_decls'.
ELLIPSIS being true means the argument list ended in '...' so don't
append a sentinel (void_list_node) to the end of the type-list.
EXPR is NULL or an expression that needs to be evaluated for the
side effects of array size expressions in the parameters. */
struct c_arg_info *
get_parm_info (bool ellipsis, tree expr)
{
struct c_binding *b = current_scope->bindings;
struct c_arg_info *arg_info = build_arg_info ();
tree parms = NULL_TREE;
vec<c_arg_tag, va_gc> *tags = NULL;
tree types = NULL_TREE;
tree others = NULL_TREE;
bool gave_void_only_once_err = false;
arg_info->had_vla_unspec = current_scope->had_vla_unspec;
/* The bindings in this scope must not get put into a block.
We will take care of deleting the binding nodes. */
current_scope->bindings = 0;
/* This function is only called if there was *something* on the
parameter list. */
gcc_assert (b);
/* A parameter list consisting solely of 'void' indicates that the
function takes no arguments. But if the 'void' is qualified
(by 'const' or 'volatile'), or has a storage class specifier
('register'), then the behavior is undefined; issue an error.
Typedefs for 'void' are OK (see DR#157). */
if (b->prev == 0 /* one binding */
&& TREE_CODE (b->decl) == PARM_DECL /* which is a parameter */
&& !DECL_NAME (b->decl) /* anonymous */
&& VOID_TYPE_P (TREE_TYPE (b->decl))) /* of void type */
{
if (TYPE_QUALS (TREE_TYPE (b->decl)) != TYPE_UNQUALIFIED
|| C_DECL_REGISTER (b->decl))
error_at (b->locus, "%<void%> as only parameter may not be qualified");
/* There cannot be an ellipsis. */
if (ellipsis)
error_at (b->locus, "%<void%> must be the only parameter");
arg_info->types = void_list_node;
return arg_info;
}
if (!ellipsis)
types = void_list_node;
/* Break up the bindings list into parms, tags, types, and others;
apply sanity checks; purge the name-to-decl bindings. */
while (b)
{
tree decl = b->decl;
tree type = TREE_TYPE (decl);
c_arg_tag tag;
const char *keyword;
switch (TREE_CODE (decl))
{
case PARM_DECL:
if (b->id)
{
gcc_assert (I_SYMBOL_BINDING (b->id) == b);
I_SYMBOL_BINDING (b->id) = b->shadowed;
}
/* Check for forward decls that never got their actual decl. */
if (TREE_ASM_WRITTEN (decl))
error_at (b->locus,
"parameter %q+D has just a forward declaration", decl);
/* Check for (..., void, ...) and issue an error. */
else if (VOID_TYPE_P (type) && !DECL_NAME (decl))
{
if (!gave_void_only_once_err)
{
error_at (b->locus, "%<void%> must be the only parameter");
gave_void_only_once_err = true;
}
}
else
{
/* Valid parameter, add it to the list. */
DECL_CHAIN (decl) = parms;
parms = decl;
/* Since there is a prototype, args are passed in their
declared types. The back end may override this later. */
DECL_ARG_TYPE (decl) = type;
types = tree_cons (0, type, types);
}
break;
case ENUMERAL_TYPE: keyword = "enum"; goto tag;
case UNION_TYPE: keyword = "union"; goto tag;
case RECORD_TYPE: keyword = "struct"; goto tag;
tag:
/* Types may not have tag-names, in which case the type
appears in the bindings list with b->id NULL. */
if (b->id)
{
gcc_assert (I_TAG_BINDING (b->id) == b);
I_TAG_BINDING (b->id) = b->shadowed;
}
/* Warn about any struct, union or enum tags defined in a
parameter list. The scope of such types is limited to
the parameter list, which is rarely if ever desirable
(it's impossible to call such a function with type-
correct arguments). An anonymous union parm type is
meaningful as a GNU extension, so don't warn for that. */
if (TREE_CODE (decl) != UNION_TYPE || b->id != NULL_TREE)
{
if (b->id)
/* The %s will be one of 'struct', 'union', or 'enum'. */
warning_at (b->locus, 0,
"%<%s %E%> declared inside parameter list"
" will not be visible outside of this definition or"
" declaration", keyword, b->id);
else
/* The %s will be one of 'struct', 'union', or 'enum'. */
warning_at (b->locus, 0,
"anonymous %s declared inside parameter list"
" will not be visible outside of this definition or"
" declaration", keyword);
}
tag.id = b->id;
tag.type = decl;
vec_safe_push (tags, tag);
break;
case FUNCTION_DECL:
/* FUNCTION_DECLs appear when there is an implicit function
declaration in the parameter list. */
gcc_assert (b->nested || seen_error ());
goto set_shadowed;
case CONST_DECL:
case TYPE_DECL:
/* CONST_DECLs appear here when we have an embedded enum,
and TYPE_DECLs appear here when we have an embedded struct
or union. No warnings for this - we already warned about the
type itself. */
/* When we reinsert this decl in the function body, we need
to reconstruct whether it was marked as nested. */
gcc_assert (!b->nested);
DECL_CHAIN (decl) = others;
others = decl;
/* fall through */
case ERROR_MARK:
set_shadowed:
/* error_mark_node appears here when we have an undeclared
variable. Just throw it away. */
if (b->id)
{
gcc_assert (I_SYMBOL_BINDING (b->id) == b);
I_SYMBOL_BINDING (b->id) = b->shadowed;
}
break;
/* Other things that might be encountered. */
case LABEL_DECL:
case VAR_DECL:
default:
gcc_unreachable ();
}
b = free_binding_and_advance (b);
}
arg_info->parms = parms;
arg_info->tags = tags;
arg_info->types = types;
arg_info->others = others;
arg_info->pending_sizes = expr;
return arg_info;
}
/* Get the struct, enum or union (CODE says which) with tag NAME.
Define the tag as a forward-reference with location LOC if it is
not defined. HAVE_STD_ATTRS says whether any standard attributes
were present after the struct, union or enum keyword; ATTRS are the
standard attributes present there. Return a c_typespec structure
for the type specifier. */
struct c_typespec
parser_xref_tag (location_t loc, enum tree_code code, tree name,
bool have_std_attrs, tree attrs)
{
struct c_typespec ret;
tree ref;
location_t refloc;
ret.expr = NULL_TREE;
ret.expr_const_operands = true;
/* If a cross reference is requested, look up the type
already defined for this tag and return it. */
ref = lookup_tag (code, name, false, &refloc);
/* If this is the right type of tag, return what we found.
(This reference will be shadowed by shadow_tag later if appropriate.)
If this is the wrong type of tag, do not return it. If it was the
wrong type in the same scope, we will have had an error
message already; if in a different scope and declaring
a name, pending_xref_error will give an error message; but if in a
different scope and not declaring a name, this tag should
shadow the previous declaration of a different type of tag, and
this would not work properly if we return the reference found.
(For example, with "struct foo" in an outer scope, "union foo;"
must shadow that tag with a new one of union type.) */
ret.kind = (ref
? (have_std_attrs ? ctsk_tagref_attrs : ctsk_tagref)
: (have_std_attrs ? ctsk_tagfirstref_attrs : ctsk_tagfirstref));
if (ref && TREE_CODE (ref) == code)
{
decl_attributes (&ref, attrs, (int) ATTR_FLAG_TYPE_IN_PLACE);
if (C_TYPE_DEFINED_IN_STRUCT (ref)
&& loc != UNKNOWN_LOCATION
&& warn_cxx_compat)
{
switch (code)
{
case ENUMERAL_TYPE:
warning_at (loc, OPT_Wc___compat,
("enum type defined in struct or union "
"is not visible in C++"));
inform (refloc, "enum type defined here");
break;
case RECORD_TYPE:
warning_at (loc, OPT_Wc___compat,
("struct defined in struct or union "
"is not visible in C++"));
inform (refloc, "struct defined here");
break;
case UNION_TYPE:
warning_at (loc, OPT_Wc___compat,
("union defined in struct or union "
"is not visible in C++"));
inform (refloc, "union defined here");
break;
default:
gcc_unreachable();
}
}
ret.spec = ref;
return ret;
}
/* If no such tag is yet defined, create a forward-reference node
and record it as the "definition".
When a real declaration of this type is found,
the forward-reference will be altered into a real type. */
ref = make_node (code);
if (code == ENUMERAL_TYPE)
{
/* Give the type a default layout like unsigned int
to avoid crashing if it does not get defined. */
SET_TYPE_MODE (ref, TYPE_MODE (unsigned_type_node));
SET_TYPE_ALIGN (ref, TYPE_ALIGN (unsigned_type_node));
TYPE_USER_ALIGN (ref) = 0;
TYPE_UNSIGNED (ref) = 1;
TYPE_PRECISION (ref) = TYPE_PRECISION (unsigned_type_node);
TYPE_MIN_VALUE (ref) = TYPE_MIN_VALUE (unsigned_type_node);
TYPE_MAX_VALUE (ref) = TYPE_MAX_VALUE (unsigned_type_node);
}
pushtag (loc, name, ref);
decl_attributes (&ref, attrs, (int) ATTR_FLAG_TYPE_IN_PLACE);
ret.spec = ref;
return ret;
}
/* Get the struct, enum or union (CODE says which) with tag NAME.
Define the tag as a forward-reference if it is not defined.
Return a tree for the type. */
tree
xref_tag (enum tree_code code, tree name)
{
return parser_xref_tag (input_location, code, name, false, NULL_TREE).spec;
}
/* Make sure that the tag NAME is defined *in the current scope*
at least as a forward reference.
LOC is the location of the struct's definition.
CODE says which kind of tag NAME ought to be.
This stores the current value of the file static STRUCT_PARSE_INFO
in *ENCLOSING_STRUCT_PARSE_INFO, and points STRUCT_PARSE_INFO at a
new c_struct_parse_info structure. The old value of
STRUCT_PARSE_INFO is restored in finish_struct. */
tree
start_struct (location_t loc, enum tree_code code, tree name,
class c_struct_parse_info **enclosing_struct_parse_info)
{
/* If there is already a tag defined at this scope
(as a forward reference), just return it. */
tree ref = NULL_TREE;
location_t refloc = UNKNOWN_LOCATION;
if (name != NULL_TREE)
ref = lookup_tag (code, name, true, &refloc);
if (ref && TREE_CODE (ref) == code)
{
if (TYPE_STUB_DECL (ref))
refloc = DECL_SOURCE_LOCATION (TYPE_STUB_DECL (ref));
if (TYPE_SIZE (ref))
{
if (code == UNION_TYPE)
error_at (loc, "redefinition of %<union %E%>", name);
else
error_at (loc, "redefinition of %<struct %E%>", name);
if (refloc != UNKNOWN_LOCATION)
inform (refloc, "originally defined here");
/* Don't create structures using a name already in use. */
ref = NULL_TREE;
}
else if (C_TYPE_BEING_DEFINED (ref))
{
if (code == UNION_TYPE)
error_at (loc, "nested redefinition of %<union %E%>", name);
else
error_at (loc, "nested redefinition of %<struct %E%>", name);
/* Don't bother to report "originally defined here" for a
nested redefinition; the original definition should be
obvious. */
/* Don't create structures that contain themselves. */
ref = NULL_TREE;
}
}
/* Otherwise create a forward-reference just so the tag is in scope. */
if (ref == NULL_TREE || TREE_CODE (ref) != code)
{
ref = make_node (code);
pushtag (loc, name, ref);
}
C_TYPE_BEING_DEFINED (ref) = 1;
for (tree v = TYPE_MAIN_VARIANT (ref); v; v = TYPE_NEXT_VARIANT (v))
TYPE_PACKED (v) = flag_pack_struct;
*enclosing_struct_parse_info = struct_parse_info;
struct_parse_info = new c_struct_parse_info ();
/* FIXME: This will issue a warning for a use of a type defined
within a statement expr used within sizeof, et. al. This is not
terribly serious as C++ doesn't permit statement exprs within
sizeof anyhow. */
if (warn_cxx_compat && (in_sizeof || in_typeof || in_alignof))
warning_at (loc, OPT_Wc___compat,
"defining type in %qs expression is invalid in C++",
(in_sizeof
? "sizeof"
: (in_typeof ? "typeof" : "alignof")));
return ref;
}
/* Process the specs, declarator and width (NULL if omitted)
of a structure component, returning a FIELD_DECL node.
WIDTH is non-NULL for bit-fields only, and is an INTEGER_CST node.
DECL_ATTRS is as for grokdeclarator.
LOC is the location of the structure component.
This is done during the parsing of the struct declaration.
The FIELD_DECL nodes are chained together and the lot of them
are ultimately passed to `build_struct' to make the RECORD_TYPE node. */
tree
grokfield (location_t loc,
struct c_declarator *declarator, struct c_declspecs *declspecs,
tree width, tree *decl_attrs)
{
tree value;
if (declarator->kind == cdk_id && declarator->u.id.id == NULL_TREE
&& width == NULL_TREE)
{
/* This is an unnamed decl.
If we have something of the form "union { list } ;" then this
is the anonymous union extension. Similarly for struct.
If this is something of the form "struct foo;", then
If MS or Plan 9 extensions are enabled, this is handled as
an anonymous struct.
Otherwise this is a forward declaration of a structure tag.
If this is something of the form "foo;" and foo is a TYPE_DECL, then
If foo names a structure or union without a tag, then this
is an anonymous struct (this is permitted by C11).
If MS or Plan 9 extensions are enabled and foo names a
structure, then again this is an anonymous struct.
Otherwise this is an error.
Oh what a horrid tangled web we weave. I wonder if MS consciously
took this from Plan 9 or if it was an accident of implementation
that took root before someone noticed the bug... */
tree type = declspecs->type;
bool ok = false;
if (RECORD_OR_UNION_TYPE_P (type)
&& (flag_ms_extensions
|| flag_plan9_extensions
|| !declspecs->typedef_p))
{
if (flag_ms_extensions || flag_plan9_extensions)
ok = true;
else if (TYPE_NAME (type) == NULL)
ok = true;
else
ok = false;
}
if (!ok)
{
pedwarn (loc, 0, "declaration does not declare anything");
return NULL_TREE;
}
if (flag_isoc99)
pedwarn_c99 (loc, OPT_Wpedantic,
"ISO C99 doesn%'t support unnamed structs/unions");
else
pedwarn_c99 (loc, OPT_Wpedantic,
"ISO C90 doesn%'t support unnamed structs/unions");
}
value = grokdeclarator (declarator, declspecs, FIELD, false,
width ? &width : NULL, decl_attrs, NULL, NULL,
DEPRECATED_NORMAL);
finish_decl (value, loc, NULL_TREE, NULL_TREE, NULL_TREE);
DECL_INITIAL (value) = width;
if (width)
SET_DECL_C_BIT_FIELD (value);
if (warn_cxx_compat && DECL_NAME (value) != NULL_TREE)
{
/* If we currently have a binding for this field, set the
in_struct field in the binding, so that we warn about lookups
which find it. */
struct c_binding *b = I_SYMBOL_BINDING (DECL_NAME (value));
if (b != NULL)
{
/* If the in_struct field is not yet set, push it on a list
to be cleared when this struct is finished. */
if (!b->in_struct)
{
struct_parse_info->fields.safe_push (b);
b->in_struct = 1;
}
}
}
return value;
}
/* Subroutine of detect_field_duplicates: return whether X and Y,
which are both fields in the same struct, have duplicate field
names. */
static bool
is_duplicate_field (tree x, tree y)
{
if (DECL_NAME (x) != NULL_TREE && DECL_NAME (x) == DECL_NAME (y))
return true;
/* When using -fplan9-extensions, an anonymous field whose name is a
typedef can duplicate a field name. */
if (flag_plan9_extensions
&& (DECL_NAME (x) == NULL_TREE || DECL_NAME (y) == NULL_TREE))
{
tree xt, xn, yt, yn;
xt = TREE_TYPE (x);
if (DECL_NAME (x) != NULL_TREE)
xn = DECL_NAME (x);
else if (RECORD_OR_UNION_TYPE_P (xt)
&& TYPE_NAME (xt) != NULL_TREE
&& TREE_CODE (TYPE_NAME (xt)) == TYPE_DECL)
xn = DECL_NAME (TYPE_NAME (xt));
else
xn = NULL_TREE;
yt = TREE_TYPE (y);
if (DECL_NAME (y) != NULL_TREE)
yn = DECL_NAME (y);
else if (RECORD_OR_UNION_TYPE_P (yt)
&& TYPE_NAME (yt) != NULL_TREE
&& TREE_CODE (TYPE_NAME (yt)) == TYPE_DECL)
yn = DECL_NAME (TYPE_NAME (yt));
else
yn = NULL_TREE;
if (xn != NULL_TREE && xn == yn)
return true;
}
return false;
}
/* Subroutine of detect_field_duplicates: add the fields of FIELDLIST
to HTAB, giving errors for any duplicates. */
static void
detect_field_duplicates_hash (tree fieldlist,
hash_table<nofree_ptr_hash <tree_node> > *htab)
{
tree x, y;
tree_node **slot;
for (x = fieldlist; x ; x = DECL_CHAIN (x))
if ((y = DECL_NAME (x)) != NULL_TREE)
{
slot = htab->find_slot (y, INSERT);
if (*slot)
{
error ("duplicate member %q+D", x);
DECL_NAME (x) = NULL_TREE;
}
*slot = y;
}
else if (RECORD_OR_UNION_TYPE_P (TREE_TYPE (x)))
{
detect_field_duplicates_hash (TYPE_FIELDS (TREE_TYPE (x)), htab);
/* When using -fplan9-extensions, an anonymous field whose
name is a typedef can duplicate a field name. */
if (flag_plan9_extensions
&& TYPE_NAME (TREE_TYPE (x)) != NULL_TREE
&& TREE_CODE (TYPE_NAME (TREE_TYPE (x))) == TYPE_DECL)
{
tree xn = DECL_NAME (TYPE_NAME (TREE_TYPE (x)));
slot = htab->find_slot (xn, INSERT);
if (*slot)
error ("duplicate member %q+D", TYPE_NAME (TREE_TYPE (x)));
*slot = xn;
}
}
}
/* Generate an error for any duplicate field names in FIELDLIST. Munge
the list such that this does not present a problem later. */
static void
detect_field_duplicates (tree fieldlist)
{
tree x, y;
int timeout = 10;
/* If the struct is the list of instance variables of an Objective-C
class, then we need to check all the instance variables of
superclasses when checking for duplicates (since you can't have
an instance variable in a subclass with the same name as an
instance variable in a superclass). We pass on this job to the
Objective-C compiler. objc_detect_field_duplicates() will return
false if we are not checking the list of instance variables and
the C frontend should proceed with the standard field duplicate
checks. If we are checking the list of instance variables, the
ObjC frontend will do the check, emit the errors if needed, and
then return true. */
if (c_dialect_objc ())
if (objc_detect_field_duplicates (false))
return;
/* First, see if there are more than "a few" fields.
This is trivially true if there are zero or one fields. */
if (!fieldlist || !DECL_CHAIN (fieldlist))
return;
x = fieldlist;
do {
timeout--;
if (DECL_NAME (x) == NULL_TREE
&& RECORD_OR_UNION_TYPE_P (TREE_TYPE (x)))
timeout = 0;
x = DECL_CHAIN (x);
} while (timeout > 0 && x);
/* If there were "few" fields and no anonymous structures or unions,
avoid the overhead of allocating a hash table. Instead just do
the nested traversal thing. */
if (timeout > 0)
{
for (x = DECL_CHAIN (fieldlist); x; x = DECL_CHAIN (x))
/* When using -fplan9-extensions, we can have duplicates
between typedef names and fields. */
if (DECL_NAME (x)
|| (flag_plan9_extensions
&& DECL_NAME (x) == NULL_TREE
&& RECORD_OR_UNION_TYPE_P (TREE_TYPE (x))
&& TYPE_NAME (TREE_TYPE (x)) != NULL_TREE
&& TREE_CODE (TYPE_NAME (TREE_TYPE (x))) == TYPE_DECL))
{
for (y = fieldlist; y != x; y = TREE_CHAIN (y))
if (is_duplicate_field (y, x))
{
error ("duplicate member %q+D", x);
DECL_NAME (x) = NULL_TREE;
}
}
}
else
{
hash_table<nofree_ptr_hash <tree_node> > htab (37);
detect_field_duplicates_hash (fieldlist, &htab);
}
}
/* Finish up struct info used by -Wc++-compat. */
static void
warn_cxx_compat_finish_struct (tree fieldlist, enum tree_code code,
location_t record_loc)
{
unsigned int ix;
tree x;
struct c_binding *b;
if (fieldlist == NULL_TREE)
{
if (code == RECORD_TYPE)
warning_at (record_loc, OPT_Wc___compat,
"empty struct has size 0 in C, size 1 in C++");
else
warning_at (record_loc, OPT_Wc___compat,
"empty union has size 0 in C, size 1 in C++");
}
/* Set the C_TYPE_DEFINED_IN_STRUCT flag for each type defined in
the current struct. We do this now at the end of the struct
because the flag is used to issue visibility warnings, and we
only want to issue those warnings if the type is referenced
outside of the struct declaration. */
FOR_EACH_VEC_ELT (struct_parse_info->struct_types, ix, x)
C_TYPE_DEFINED_IN_STRUCT (x) = 1;
/* The TYPEDEFS_SEEN field of STRUCT_PARSE_INFO is a list of
typedefs used when declaring fields in this struct. If the name
of any of the fields is also a typedef name then the struct would
not parse in C++, because the C++ lookup rules say that the
typedef name would be looked up in the context of the struct, and
would thus be the field rather than the typedef. */
if (!struct_parse_info->typedefs_seen.is_empty ()
&& fieldlist != NULL_TREE)
{
/* Use a hash_set<tree> using the name of the typedef. We can use
a hash_set<tree> because identifiers are interned. */
hash_set<tree> tset;
FOR_EACH_VEC_ELT (struct_parse_info->typedefs_seen, ix, x)
tset.add (DECL_NAME (x));
for (x = fieldlist; x != NULL_TREE; x = DECL_CHAIN (x))
{
if (DECL_NAME (x) != NULL_TREE
&& tset.contains (DECL_NAME (x)))
{
warning_at (DECL_SOURCE_LOCATION (x), OPT_Wc___compat,
("using %qD as both field and typedef name is "
"invalid in C++"),
x);
/* FIXME: It would be nice to report the location where
the typedef name is used. */
}
}
}
/* For each field which has a binding and which was not defined in
an enclosing struct, clear the in_struct field. */
FOR_EACH_VEC_ELT (struct_parse_info->fields, ix, b)
b->in_struct = 0;
}
/* Function to help qsort sort FIELD_DECLs by name order. */
static int
field_decl_cmp (const void *x_p, const void *y_p)
{
const tree *const x = (const tree *) x_p;
const tree *const y = (const tree *) y_p;
if (DECL_NAME (*x) == DECL_NAME (*y))
/* A nontype is "greater" than a type. */
return (TREE_CODE (*y) == TYPE_DECL) - (TREE_CODE (*x) == TYPE_DECL);
if (DECL_NAME (*x) == NULL_TREE)
return -1;
if (DECL_NAME (*y) == NULL_TREE)
return 1;
if (DECL_NAME (*x) < DECL_NAME (*y))
return -1;
return 1;
}
/* If this structure or union completes the type of any previous
variable declaration, lay it out and output its rtl. */
static void
finish_incomplete_vars (tree incomplete_vars, bool toplevel)
{
for (tree x = incomplete_vars; x; x = TREE_CHAIN (x))
{
tree decl = TREE_VALUE (x);
if (TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
layout_array_type (TREE_TYPE (decl));
if (TREE_CODE (decl) != TYPE_DECL)
{
relayout_decl (decl);
if (c_dialect_objc ())
objc_check_decl (decl);
rest_of_decl_compilation (decl, toplevel, 0);
}
}
}
/* Fill in the fields of a RECORD_TYPE or UNION_TYPE node, T.
LOC is the location of the RECORD_TYPE or UNION_TYPE's definition.
FIELDLIST is a chain of FIELD_DECL nodes for the fields.
ATTRIBUTES are attributes to be applied to the structure.
ENCLOSING_STRUCT_PARSE_INFO is the value of STRUCT_PARSE_INFO when
the struct was started. */
tree
finish_struct (location_t loc, tree t, tree fieldlist, tree attributes,
class c_struct_parse_info *enclosing_struct_parse_info)
{
tree x;
bool toplevel = file_scope == current_scope;
/* If this type was previously laid out as a forward reference,
make sure we lay it out again. */
TYPE_SIZE (t) = NULL_TREE;
decl_attributes (&t, attributes, (int) ATTR_FLAG_TYPE_IN_PLACE);
if (pedantic)
{
for (x = fieldlist; x; x = DECL_CHAIN (x))
{
if (DECL_NAME (x) != NULL_TREE)
break;
if (flag_isoc11 && RECORD_OR_UNION_TYPE_P (TREE_TYPE (x)))
break;
}
if (x == NULL_TREE)
{
if (TREE_CODE (t) == UNION_TYPE)
{
if (fieldlist)
pedwarn (loc, OPT_Wpedantic, "union has no named members");
else
pedwarn (loc, OPT_Wpedantic, "union has no members");
}
else
{
if (fieldlist)
pedwarn (loc, OPT_Wpedantic, "struct has no named members");
else
pedwarn (loc, OPT_Wpedantic, "struct has no members");
}
}
}
/* Install struct as DECL_CONTEXT of each field decl.
Also process specified field sizes, found in the DECL_INITIAL,
storing 0 there after the type has been changed to precision equal
to its width, rather than the precision of the specified standard
type. (Correct layout requires the original type to have been preserved
until now.) */
bool saw_named_field = false;
for (x = fieldlist; x; x = DECL_CHAIN (x))
{
if (TREE_TYPE (x) == error_mark_node)
continue;
DECL_CONTEXT (x) = t;
/* If any field is const, the structure type is pseudo-const. */
if (TREE_READONLY (x))
C_TYPE_FIELDS_READONLY (t) = 1;
else
{
/* A field that is pseudo-const makes the structure likewise. */
tree t1 = strip_array_types (TREE_TYPE (x));
if (RECORD_OR_UNION_TYPE_P (t1) && C_TYPE_FIELDS_READONLY (t1))
C_TYPE_FIELDS_READONLY (t) = 1;
}
/* Any field that is volatile means variables of this type must be
treated in some ways as volatile. */
if (TREE_THIS_VOLATILE (x))
C_TYPE_FIELDS_VOLATILE (t) = 1;
/* Any field of nominal variable size implies structure is too. */
if (C_DECL_VARIABLE_SIZE (x))
C_TYPE_VARIABLE_SIZE (t) = 1;
if (DECL_C_BIT_FIELD (x))
{
unsigned HOST_WIDE_INT width = tree_to_uhwi (DECL_INITIAL (x));
DECL_SIZE (x) = bitsize_int (width);
DECL_BIT_FIELD (x) = 1;
}
if (TYPE_PACKED (t)
&& (DECL_BIT_FIELD (x)
|| TYPE_ALIGN (TREE_TYPE (x)) > BITS_PER_UNIT))
DECL_PACKED (x) = 1;
/* Detect flexible array member in an invalid context. */
if (TREE_CODE (TREE_TYPE (x)) == ARRAY_TYPE
&& TYPE_SIZE (TREE_TYPE (x)) == NULL_TREE
&& TYPE_DOMAIN (TREE_TYPE (x)) != NULL_TREE
&& TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (x))) == NULL_TREE)
{
if (TREE_CODE (t) == UNION_TYPE)
{
error_at (DECL_SOURCE_LOCATION (x),
"flexible array member in union");
TREE_TYPE (x) = error_mark_node;
}
else if (DECL_CHAIN (x) != NULL_TREE)
{
error_at (DECL_SOURCE_LOCATION (x),
"flexible array member not at end of struct");
TREE_TYPE (x) = error_mark_node;
}
else if (!saw_named_field)
{
error_at (DECL_SOURCE_LOCATION (x),
"flexible array member in a struct with no named "
"members");
TREE_TYPE (x) = error_mark_node;
}
}
if (pedantic && TREE_CODE (t) == RECORD_TYPE
&& flexible_array_type_p (TREE_TYPE (x)))
pedwarn (DECL_SOURCE_LOCATION (x), OPT_Wpedantic,
"invalid use of structure with flexible array member");
if (DECL_NAME (x)
|| RECORD_OR_UNION_TYPE_P (TREE_TYPE (x)))
saw_named_field = true;
}
detect_field_duplicates (fieldlist);
/* Now we have the nearly final fieldlist. Record it,
then lay out the structure or union (including the fields). */
TYPE_FIELDS (t) = fieldlist;
maybe_apply_pragma_scalar_storage_order (t);
layout_type (t);
if (TYPE_SIZE_UNIT (t)
&& TREE_CODE (TYPE_SIZE_UNIT (t)) == INTEGER_CST
&& !TREE_OVERFLOW (TYPE_SIZE_UNIT (t))
&& !valid_constant_size_p (TYPE_SIZE_UNIT (t)))
error ("type %qT is too large", t);
/* Give bit-fields their proper types and rewrite the type of array fields
with scalar component if the enclosing type has reverse storage order. */
for (tree field = fieldlist; field; field = DECL_CHAIN (field))
{
if (TREE_CODE (field) == FIELD_DECL
&& DECL_INITIAL (field)
&& TREE_TYPE (field) != error_mark_node)
{
unsigned HOST_WIDE_INT width
= tree_to_uhwi (DECL_INITIAL (field));
tree type = TREE_TYPE (field);
if (width != TYPE_PRECISION (type))
{
TREE_TYPE (field)
= c_build_bitfield_integer_type (width, TYPE_UNSIGNED (type));
SET_DECL_MODE (field, TYPE_MODE (TREE_TYPE (field)));
}
DECL_INITIAL (field) = NULL_TREE;
}
else if (TYPE_REVERSE_STORAGE_ORDER (t)
&& TREE_CODE (field) == FIELD_DECL
&& TREE_CODE (TREE_TYPE (field)) == ARRAY_TYPE)
{
tree ftype = TREE_TYPE (field);
tree ctype = strip_array_types (ftype);
if (!RECORD_OR_UNION_TYPE_P (ctype) && TYPE_MODE (ctype) != QImode)
{
tree fmain_type = TYPE_MAIN_VARIANT (ftype);
tree *typep = &fmain_type;
do {
*typep = build_distinct_type_copy (*typep);
TYPE_REVERSE_STORAGE_ORDER (*typep) = 1;
typep = &TREE_TYPE (*typep);
} while (TREE_CODE (*typep) == ARRAY_TYPE);
TREE_TYPE (field)
= c_build_qualified_type (fmain_type, TYPE_QUALS (ftype));
}
}
}
/* Now we have the truly final field list.
Store it in this type and in the variants. */
TYPE_FIELDS (t) = fieldlist;
/* If there are lots of fields, sort so we can look through them fast.
We arbitrarily consider 16 or more elts to be "a lot". */
{
int len = 0;
for (x = fieldlist; x; x = DECL_CHAIN (x))
{
if (len > 15 || DECL_NAME (x) == NULL)
break;
len += 1;
}
if (len > 15)
{
tree *field_array;
struct lang_type *space;
struct sorted_fields_type *space2;
len += list_length (x);
/* Use the same allocation policy here that make_node uses, to
ensure that this lives as long as the rest of the struct decl.
All decls in an inline function need to be saved. */
space = ggc_cleared_alloc<struct lang_type> ();
space2 = (sorted_fields_type *) ggc_internal_alloc
(sizeof (struct sorted_fields_type) + len * sizeof (tree));
len = 0;
space->s = space2;
field_array = &space2->elts[0];
for (x = fieldlist; x; x = DECL_CHAIN (x))
{
field_array[len++] = x;
/* If there is anonymous struct or union, break out of the loop. */
if (DECL_NAME (x) == NULL)
break;
}
/* Found no anonymous struct/union. Add the TYPE_LANG_SPECIFIC. */
if (x == NULL)
{
TYPE_LANG_SPECIFIC (t) = space;
TYPE_LANG_SPECIFIC (t)->s->len = len;
field_array = TYPE_LANG_SPECIFIC (t)->s->elts;
qsort (field_array, len, sizeof (tree), field_decl_cmp);
}
}
}
/* If this was supposed to be a transparent union, but we can't
make it one, warn and turn off the flag. */
if (TREE_CODE (t) == UNION_TYPE
&& TYPE_TRANSPARENT_AGGR (t)
&& (!TYPE_FIELDS (t) || TYPE_MODE (t) != DECL_MODE (TYPE_FIELDS (t))))
{
TYPE_TRANSPARENT_AGGR (t) = 0;
warning_at (loc, 0, "union cannot be made transparent");
}
tree incomplete_vars = C_TYPE_INCOMPLETE_VARS (TYPE_MAIN_VARIANT (t));
for (x = TYPE_MAIN_VARIANT (t); x; x = TYPE_NEXT_VARIANT (x))
{
TYPE_FIELDS (x) = TYPE_FIELDS (t);
TYPE_LANG_SPECIFIC (x) = TYPE_LANG_SPECIFIC (t);
TYPE_TRANSPARENT_AGGR (x) = TYPE_TRANSPARENT_AGGR (t);
C_TYPE_FIELDS_READONLY (x) = C_TYPE_FIELDS_READONLY (t);
C_TYPE_FIELDS_VOLATILE (x) = C_TYPE_FIELDS_VOLATILE (t);
C_TYPE_VARIABLE_SIZE (x) = C_TYPE_VARIABLE_SIZE (t);
C_TYPE_INCOMPLETE_VARS (x) = NULL_TREE;
}
/* Update type location to the one of the definition, instead of e.g.
a forward declaration. */
if (TYPE_STUB_DECL (t))
DECL_SOURCE_LOCATION (TYPE_STUB_DECL (t)) = loc;
/* Finish debugging output for this type. */
rest_of_type_compilation (t, toplevel);
finish_incomplete_vars (incomplete_vars, toplevel);
/* If we're inside a function proper, i.e. not file-scope and not still
parsing parameters, then arrange for the size of a variable sized type
to be bound now. */
if (building_stmt_list_p () && variably_modified_type_p (t, NULL_TREE))
add_stmt (build_stmt (loc,
DECL_EXPR, build_decl (loc, TYPE_DECL, NULL, t)));
if (warn_cxx_compat)
warn_cxx_compat_finish_struct (fieldlist, TREE_CODE (t), loc);
delete struct_parse_info;
struct_parse_info = enclosing_struct_parse_info;
/* If this struct is defined inside a struct, add it to
struct_types. */
if (warn_cxx_compat
&& struct_parse_info != NULL
&& !in_sizeof && !in_typeof && !in_alignof)
struct_parse_info->struct_types.safe_push (t);
return t;
}
static struct {
gt_pointer_operator new_value;
void *cookie;
} resort_data;
/* This routine compares two fields like field_decl_cmp but using the
pointer operator in resort_data. */
static int
resort_field_decl_cmp (const void *x_p, const void *y_p)
{
const tree *const x = (const tree *) x_p;
const tree *const y = (const tree *) y_p;
if (DECL_NAME (*x) == DECL_NAME (*y))
/* A nontype is "greater" than a type. */
return (TREE_CODE (*y) == TYPE_DECL) - (TREE_CODE (*x) == TYPE_DECL);
if (DECL_NAME (*x) == NULL_TREE)
return -1;
if (DECL_NAME (*y) == NULL_TREE)
return 1;
{
tree d1 = DECL_NAME (*x);
tree d2 = DECL_NAME (*y);
resort_data.new_value (&d1, resort_data.cookie);
resort_data.new_value (&d2, resort_data.cookie);
if (d1 < d2)
return -1;
}
return 1;
}
/* Resort DECL_SORTED_FIELDS because pointers have been reordered. */
void
resort_sorted_fields (void *obj,
void * ARG_UNUSED (orig_obj),
gt_pointer_operator new_value,
void *cookie)
{
struct sorted_fields_type *sf = (struct sorted_fields_type *) obj;
resort_data.new_value = new_value;
resort_data.cookie = cookie;
qsort (&sf->elts[0], sf->len, sizeof (tree),
resort_field_decl_cmp);
}
/* Lay out the type T, and its element type, and so on. */
static void
layout_array_type (tree t)
{
if (TREE_CODE (TREE_TYPE (t)) == ARRAY_TYPE)
layout_array_type (TREE_TYPE (t));
layout_type (t);
}
/* Begin compiling the definition of an enumeration type.
NAME is its name (or null if anonymous).
LOC is the enum's location.
Returns the type object, as yet incomplete.
Also records info about it so that build_enumerator
may be used to declare the individual values as they are read. */
tree
start_enum (location_t loc, struct c_enum_contents *the_enum, tree name)
{
tree enumtype = NULL_TREE;
location_t enumloc = UNKNOWN_LOCATION;
/* If this is the real definition for a previous forward reference,
fill in the contents in the same object that used to be the
forward reference. */
if (name != NULL_TREE)
enumtype = lookup_tag (ENUMERAL_TYPE, name, true, &enumloc);
if (enumtype == NULL_TREE || TREE_CODE (enumtype) != ENUMERAL_TYPE)
{
enumtype = make_node (ENUMERAL_TYPE);
pushtag (loc, name, enumtype);
}
/* Update type location to the one of the definition, instead of e.g.
a forward declaration. */
else if (TYPE_STUB_DECL (enumtype))
{
enumloc = DECL_SOURCE_LOCATION (TYPE_STUB_DECL (enumtype));
DECL_SOURCE_LOCATION (TYPE_STUB_DECL (enumtype)) = loc;
}
if (C_TYPE_BEING_DEFINED (enumtype))
error_at (loc, "nested redefinition of %<enum %E%>", name);
C_TYPE_BEING_DEFINED (enumtype) = 1;
if (TYPE_VALUES (enumtype) != NULL_TREE)
{
/* This enum is a named one that has been declared already. */
error_at (loc, "redeclaration of %<enum %E%>", name);
if (enumloc != UNKNOWN_LOCATION)
inform (enumloc, "originally defined here");
/* Completely replace its old definition.
The old enumerators remain defined, however. */
TYPE_VALUES (enumtype) = NULL_TREE;
}
the_enum->enum_next_value = integer_zero_node;
the_enum->enum_overflow = 0;
if (flag_short_enums)
for (tree v = TYPE_MAIN_VARIANT (enumtype); v; v = TYPE_NEXT_VARIANT (v))
TYPE_PACKED (v) = 1;
/* FIXME: This will issue a warning for a use of a type defined
within sizeof in a statement expr. This is not terribly serious
as C++ doesn't permit statement exprs within sizeof anyhow. */
if (warn_cxx_compat && (in_sizeof || in_typeof || in_alignof))
warning_at (loc, OPT_Wc___compat,
"defining type in %qs expression is invalid in C++",
(in_sizeof
? "sizeof"
: (in_typeof ? "typeof" : "alignof")));
return enumtype;
}
/* After processing and defining all the values of an enumeration type,
install their decls in the enumeration type and finish it off.
ENUMTYPE is the type object, VALUES a list of decl-value pairs,
and ATTRIBUTES are the specified attributes.
Returns ENUMTYPE. */
tree
finish_enum (tree enumtype, tree values, tree attributes)
{
tree pair, tem;
tree minnode = NULL_TREE, maxnode = NULL_TREE;
int precision;
signop sign;
bool toplevel = (file_scope == current_scope);
struct lang_type *lt;
decl_attributes (&enumtype, attributes, (int) ATTR_FLAG_TYPE_IN_PLACE);
/* Calculate the maximum value of any enumerator in this type. */
if (values == error_mark_node)
minnode = maxnode = integer_zero_node;
else
{
minnode = maxnode = TREE_VALUE (values);
for (pair = TREE_CHAIN (values); pair; pair = TREE_CHAIN (pair))
{
tree value = TREE_VALUE (pair);
if (tree_int_cst_lt (maxnode, value))
maxnode = value;
if (tree_int_cst_lt (value, minnode))
minnode = value;
}
}
/* Construct the final type of this enumeration. It is the same
as one of the integral types - the narrowest one that fits, except
that normally we only go as narrow as int - and signed iff any of
the values are negative. */
sign = (tree_int_cst_sgn (minnode) >= 0) ? UNSIGNED : SIGNED;
precision = MAX (tree_int_cst_min_precision (minnode, sign),
tree_int_cst_min_precision (maxnode, sign));
/* If the precision of the type was specified with an attribute and it
was too small, give an error. Otherwise, use it. */
if (TYPE_PRECISION (enumtype) && lookup_attribute ("mode", attributes))
{
if (precision > TYPE_PRECISION (enumtype))
{
TYPE_PRECISION (enumtype) = 0;
error ("specified mode too small for enumerated values");
}
else
precision = TYPE_PRECISION (enumtype);
}
else
TYPE_PRECISION (enumtype) = 0;
if (TYPE_PACKED (enumtype)
|| precision > TYPE_PRECISION (integer_type_node)
|| TYPE_PRECISION (enumtype))
{
tem = c_common_type_for_size (precision, sign == UNSIGNED ? 1 : 0);
if (tem == NULL)
{
warning (0, "enumeration values exceed range of largest integer");
tem = long_long_integer_type_node;
}
}
else
tem = sign == UNSIGNED ? unsigned_type_node : integer_type_node;
TYPE_MIN_VALUE (enumtype) = TYPE_MIN_VALUE (tem);
TYPE_MAX_VALUE (enumtype) = TYPE_MAX_VALUE (tem);
TYPE_UNSIGNED (enumtype) = TYPE_UNSIGNED (tem);
SET_TYPE_ALIGN (enumtype, TYPE_ALIGN (tem));
TYPE_SIZE (enumtype) = NULL_TREE;
TYPE_PRECISION (enumtype) = TYPE_PRECISION (tem);
layout_type (enumtype);
if (values != error_mark_node)
{
/* Change the type of the enumerators to be the enum type. We
need to do this irrespective of the size of the enum, for
proper type checking. Replace the DECL_INITIALs of the
enumerators, and the value slots of the list, with copies
that have the enum type; they cannot be modified in place
because they may be shared (e.g. integer_zero_node) Finally,
change the purpose slots to point to the names of the decls. */
for (pair = values; pair; pair = TREE_CHAIN (pair))
{
tree enu = TREE_PURPOSE (pair);
tree ini = DECL_INITIAL (enu);
TREE_TYPE (enu) = enumtype;
/* The ISO C Standard mandates enumerators to have type int,
even though the underlying type of an enum type is
unspecified. However, GCC allows enumerators of any
integer type as an extensions. build_enumerator()
converts any enumerators that fit in an int to type int,
to avoid promotions to unsigned types when comparing
integers with enumerators that fit in the int range.
When -pedantic is given, build_enumerator() would have
already warned about those that don't fit. Here we
convert the rest to the enumerator type. */
if (TREE_TYPE (ini) != integer_type_node)
ini = convert (enumtype, ini);
DECL_INITIAL (enu) = ini;
TREE_PURPOSE (pair) = DECL_NAME (enu);
TREE_VALUE (pair) = ini;
}
TYPE_VALUES (enumtype) = values;
}
/* Record the min/max values so that we can warn about bit-field
enumerations that are too small for the values. */
lt = ggc_cleared_alloc<struct lang_type> ();
lt->enum_min = minnode;
lt->enum_max = maxnode;
TYPE_LANG_SPECIFIC (enumtype) = lt;
/* Fix up all variant types of this enum type. */
tree incomplete_vars = C_TYPE_INCOMPLETE_VARS (TYPE_MAIN_VARIANT (enumtype));
for (tem = TYPE_MAIN_VARIANT (enumtype); tem; tem = TYPE_NEXT_VARIANT (tem))
{
C_TYPE_INCOMPLETE_VARS (tem) = NULL_TREE;
if (tem == enumtype)
continue;
TYPE_VALUES (tem) = TYPE_VALUES (enumtype);
TYPE_MIN_VALUE (tem) = TYPE_MIN_VALUE (enumtype);
TYPE_MAX_VALUE (tem) = TYPE_MAX_VALUE (enumtype);
TYPE_SIZE (tem) = TYPE_SIZE (enumtype);
TYPE_SIZE_UNIT (tem) = TYPE_SIZE_UNIT (enumtype);
SET_TYPE_MODE (tem, TYPE_MODE (enumtype));
TYPE_PRECISION (tem) = TYPE_PRECISION (enumtype);
SET_TYPE_ALIGN (tem, TYPE_ALIGN (enumtype));
TYPE_USER_ALIGN (tem) = TYPE_USER_ALIGN (enumtype);
TYPE_UNSIGNED (tem) = TYPE_UNSIGNED (enumtype);
TYPE_LANG_SPECIFIC (tem) = TYPE_LANG_SPECIFIC (enumtype);
}
/* Finish debugging output for this type. */
rest_of_type_compilation (enumtype, toplevel);
finish_incomplete_vars (incomplete_vars, toplevel);
/* If this enum is defined inside a struct, add it to
struct_types. */
if (warn_cxx_compat
&& struct_parse_info != NULL
&& !in_sizeof && !in_typeof && !in_alignof)
struct_parse_info->struct_types.safe_push (enumtype);
C_TYPE_BEING_DEFINED (enumtype) = 0;
return enumtype;
}
/* Build and install a CONST_DECL for one value of the
current enumeration type (one that was begun with start_enum).
DECL_LOC is the location of the enumerator.
LOC is the location of the '=' operator if any, DECL_LOC otherwise.
Return a tree-list containing the CONST_DECL and its value.
Assignment of sequential values by default is handled here. */
tree
build_enumerator (location_t decl_loc, location_t loc,
struct c_enum_contents *the_enum, tree name, tree value)
{
tree decl, type;
/* Validate and default VALUE. */
if (value != NULL_TREE)
{
/* Don't issue more errors for error_mark_node (i.e. an
undeclared identifier) - just ignore the value expression. */
if (value == error_mark_node)
value = NULL_TREE;
else if (!INTEGRAL_TYPE_P (TREE_TYPE (value)))
{
error_at (loc, "enumerator value for %qE is not an integer constant",
name);
value = NULL_TREE;
}
else
{
if (TREE_CODE (value) != INTEGER_CST)
{
value = c_fully_fold (value, false, NULL);
if (TREE_CODE (value) == INTEGER_CST)
pedwarn (loc, OPT_Wpedantic,
"enumerator value for %qE is not an integer "
"constant expression", name);
}
if (TREE_CODE (value) != INTEGER_CST)
{
error ("enumerator value for %qE is not an integer constant",
name);
value = NULL_TREE;
}
else
{
value = default_conversion (value);
constant_expression_warning (value);
}
}
}
/* Default based on previous value. */
/* It should no longer be possible to have NON_LVALUE_EXPR
in the default. */
if (value == NULL_TREE)
{
value = the_enum->enum_next_value;
if (the_enum->enum_overflow)
error_at (loc, "overflow in enumeration values");
}
/* Even though the underlying type of an enum is unspecified, the
type of enumeration constants is explicitly defined as int
(6.4.4.3/2 in the C99 Standard). GCC allows any integer type as
an extension. */
else if (!int_fits_type_p (value, integer_type_node))
pedwarn (loc, OPT_Wpedantic,
"ISO C restricts enumerator values to range of %<int%>");
/* The ISO C Standard mandates enumerators to have type int, even
though the underlying type of an enum type is unspecified.
However, GCC allows enumerators of any integer type as an
extensions. Here we convert any enumerators that fit in an int
to type int, to avoid promotions to unsigned types when comparing
integers with enumerators that fit in the int range. When
-pedantic is given, we would have already warned about those that
don't fit. We have to do this here rather than in finish_enum
because this value may be used to define more enumerators. */
if (int_fits_type_p (value, integer_type_node))
value = convert (integer_type_node, value);
/* Set basis for default for next value. */
the_enum->enum_next_value
= build_binary_op (EXPR_LOC_OR_LOC (value, input_location),
PLUS_EXPR, value, integer_one_node, false);
the_enum->enum_overflow = tree_int_cst_lt (the_enum->enum_next_value, value);
/* Now create a declaration for the enum value name. */
type = TREE_TYPE (value);
type = c_common_type_for_size (MAX (TYPE_PRECISION (type),
TYPE_PRECISION (integer_type_node)),
(TYPE_PRECISION (type)
>= TYPE_PRECISION (integer_type_node)
&& TYPE_UNSIGNED (type)));
decl = build_decl (decl_loc, CONST_DECL, name, type);
DECL_INITIAL (decl) = convert (type, value);
pushdecl (decl);
return tree_cons (decl, value, NULL_TREE);
}
/* Implement LANG_HOOKS_SIMULATE_ENUM_DECL. */
tree
c_simulate_enum_decl (location_t loc, const char *name,
vec<string_int_pair> values)
{
location_t saved_loc = input_location;
input_location = loc;
struct c_enum_contents the_enum;
tree enumtype = start_enum (loc, &the_enum, get_identifier (name));
tree value_chain = NULL_TREE;
string_int_pair *value;
unsigned int i;
FOR_EACH_VEC_ELT (values, i, value)
{
tree decl = build_enumerator (loc, loc, &the_enum,
get_identifier (value->first),
build_int_cst (integer_type_node,
value->second));
TREE_CHAIN (decl) = value_chain;
value_chain = decl;
}
finish_enum (enumtype, nreverse (value_chain), NULL_TREE);
input_location = saved_loc;
return enumtype;
}
/* Create the FUNCTION_DECL for a function definition.
DECLSPECS, DECLARATOR and ATTRIBUTES are the parts of
the declaration; they describe the function's name and the type it returns,
but twisted together in a fashion that parallels the syntax of C.
This function creates a binding context for the function body
as well as setting up the FUNCTION_DECL in current_function_decl.
Returns true on success. If the DECLARATOR is not suitable for a function
(it defines a datum instead), we return false to report a parse error. */
bool
start_function (struct c_declspecs *declspecs, struct c_declarator *declarator,
tree attributes)
{
tree decl1, old_decl;
tree restype, resdecl;
location_t loc;
current_function_returns_value = 0; /* Assume, until we see it does. */
current_function_returns_null = 0;
current_function_returns_abnormally = 0;
warn_about_return_type = 0;
c_switch_stack = NULL;
/* Indicate no valid break/continue context. */
in_statement = 0;
decl1 = grokdeclarator (declarator, declspecs, FUNCDEF, true, NULL,
&attributes, NULL, NULL, DEPRECATED_NORMAL);
invoke_plugin_callbacks (PLUGIN_START_PARSE_FUNCTION, decl1);
/* If the declarator is not suitable for a function definition,
cause a syntax error. */
if (decl1 == NULL_TREE
|| TREE_CODE (decl1) != FUNCTION_DECL)
return false;
loc = DECL_SOURCE_LOCATION (decl1);
/* A nested function is not global. */
if (current_function_decl != NULL_TREE)
TREE_PUBLIC (decl1) = 0;
c_decl_attributes (&decl1, attributes, 0);
if (DECL_DECLARED_INLINE_P (decl1)
&& DECL_UNINLINABLE (decl1)
&& lookup_attribute ("noinline", DECL_ATTRIBUTES (decl1)))
warning_at (loc, OPT_Wattributes,
"inline function %qD given attribute %qs",
decl1, "noinline");
/* Handle gnu_inline attribute. */
if (declspecs->inline_p
&& !flag_gnu89_inline
&& TREE_CODE (decl1) == FUNCTION_DECL
&& (lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (decl1))
|| current_function_decl))
{
if (declspecs->storage_class != csc_static)
DECL_EXTERNAL (decl1) = !DECL_EXTERNAL (decl1);
}
announce_function (decl1);
if (!COMPLETE_OR_VOID_TYPE_P (TREE_TYPE (TREE_TYPE (decl1))))
{
error_at (loc, "return type is an incomplete type");
/* Make it return void instead. */
TREE_TYPE (decl1)
= build_function_type (void_type_node,
TYPE_ARG_TYPES (TREE_TYPE (decl1)));
}
if (warn_about_return_type)
warn_defaults_to (loc, flag_isoc99 ? OPT_Wimplicit_int
: (warn_return_type > 0 ? OPT_Wreturn_type
: OPT_Wimplicit_int),
"return type defaults to %<int%>");
/* Make the init_value nonzero so pushdecl knows this is not tentative.
error_mark_node is replaced below (in pop_scope) with the BLOCK. */
DECL_INITIAL (decl1) = error_mark_node;
/* If this definition isn't a prototype and we had a prototype declaration
before, copy the arg type info from that prototype. */
old_decl = lookup_name_in_scope (DECL_NAME (decl1), current_scope);
if (old_decl && TREE_CODE (old_decl) != FUNCTION_DECL)
old_decl = NULL_TREE;
current_function_prototype_locus = UNKNOWN_LOCATION;
current_function_prototype_built_in = false;
current_function_prototype_arg_types = NULL_TREE;
if (!prototype_p (TREE_TYPE (decl1)))
{
if (old_decl != NULL_TREE
&& TREE_CODE (TREE_TYPE (old_decl)) == FUNCTION_TYPE
&& comptypes (TREE_TYPE (TREE_TYPE (decl1)),
TREE_TYPE (TREE_TYPE (old_decl))))
{
if (stdarg_p (TREE_TYPE (old_decl)))
{
auto_diagnostic_group d;
warning_at (loc, 0, "%q+D defined as variadic function "
"without prototype", decl1);
locate_old_decl (old_decl);
}
TREE_TYPE (decl1) = composite_type (TREE_TYPE (old_decl),
TREE_TYPE (decl1));
current_function_prototype_locus = DECL_SOURCE_LOCATION (old_decl);
current_function_prototype_built_in
= C_DECL_BUILTIN_PROTOTYPE (old_decl);
current_function_prototype_arg_types
= TYPE_ARG_TYPES (TREE_TYPE (decl1));
}
if (TREE_PUBLIC (decl1))
{
/* If there is an external prototype declaration of this
function, record its location but do not copy information
to this decl. This may be an invisible declaration
(built-in or in a scope which has finished) or simply
have more refined argument types than any declaration
found above. */
struct c_binding *b;
for (b = I_SYMBOL_BINDING (DECL_NAME (decl1)); b; b = b->shadowed)
if (B_IN_SCOPE (b, external_scope))
break;
if (b)
{
tree ext_decl, ext_type;
ext_decl = b->decl;
ext_type = b->u.type ? b->u.type : TREE_TYPE (ext_decl);
if (TREE_CODE (ext_type) == FUNCTION_TYPE
&& comptypes (TREE_TYPE (TREE_TYPE (decl1)),
TREE_TYPE (ext_type)))
{
current_function_prototype_locus
= DECL_SOURCE_LOCATION (ext_decl);
current_function_prototype_built_in
= C_DECL_BUILTIN_PROTOTYPE (ext_decl);
current_function_prototype_arg_types
= TYPE_ARG_TYPES (ext_type);
}
}
}
}
/* Optionally warn of old-fashioned def with no previous prototype. */
if (warn_strict_prototypes
&& old_decl != error_mark_node
&& !prototype_p (TREE_TYPE (decl1))
&& C_DECL_ISNT_PROTOTYPE (old_decl))
warning_at (loc, OPT_Wstrict_prototypes,
"function declaration isn%'t a prototype");
/* Optionally warn of any global def with no previous prototype. */
else if (warn_missing_prototypes
&& old_decl != error_mark_node
&& TREE_PUBLIC (decl1)
&& !MAIN_NAME_P (DECL_NAME (decl1))
&& C_DECL_ISNT_PROTOTYPE (old_decl)
&& !DECL_DECLARED_INLINE_P (decl1))
warning_at (loc, OPT_Wmissing_prototypes,
"no previous prototype for %qD", decl1);
/* Optionally warn of any def with no previous prototype
if the function has already been used. */
else if (warn_missing_prototypes
&& old_decl != NULL_TREE
&& old_decl != error_mark_node
&& TREE_USED (old_decl)
&& !prototype_p (TREE_TYPE (old_decl)))
warning_at (loc, OPT_Wmissing_prototypes,
"%qD was used with no prototype before its definition", decl1);
/* Optionally warn of any global def with no previous declaration. */
else if (warn_missing_declarations
&& TREE_PUBLIC (decl1)
&& old_decl == NULL_TREE
&& !MAIN_NAME_P (DECL_NAME (decl1))
&& !DECL_DECLARED_INLINE_P (decl1))
warning_at (loc, OPT_Wmissing_declarations,
"no previous declaration for %qD",
decl1);
/* Optionally warn of any def with no previous declaration
if the function has already been used. */
else if (warn_missing_declarations
&& old_decl != NULL_TREE
&& old_decl != error_mark_node
&& TREE_USED (old_decl)
&& C_DECL_IMPLICIT (old_decl))
warning_at (loc, OPT_Wmissing_declarations,
"%qD was used with no declaration before its definition", decl1);
/* This function exists in static storage.
(This does not mean `static' in the C sense!) */
TREE_STATIC (decl1) = 1;
/* This is the earliest point at which we might know the assembler
name of the function. Thus, if it's set before this, die horribly. */
gcc_assert (!DECL_ASSEMBLER_NAME_SET_P (decl1));
/* If #pragma weak was used, mark the decl weak now. */
if (current_scope == file_scope)
maybe_apply_pragma_weak (decl1);
/* Warn for unlikely, improbable, or stupid declarations of `main'. */
if (warn_main && MAIN_NAME_P (DECL_NAME (decl1)))
{
if (TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (decl1)))
!= integer_type_node)
pedwarn (loc, OPT_Wmain, "return type of %qD is not %<int%>", decl1);
else if (TYPE_ATOMIC (TREE_TYPE (TREE_TYPE (decl1))))
pedwarn (loc, OPT_Wmain, "%<_Atomic%>-qualified return type of %qD",
decl1);
check_main_parameter_types (decl1);
if (!TREE_PUBLIC (decl1))
pedwarn (loc, OPT_Wmain,
"%qD is normally a non-static function", decl1);
}
tree parms = current_function_arg_info->parms;
if (old_decl)
{
location_t origloc = DECL_SOURCE_LOCATION (old_decl);
warn_parm_array_mismatch (origloc, old_decl, parms);
}
/* Record the decl so that the function name is defined.
If we already have a decl for this name, and it is a FUNCTION_DECL,
use the old decl. */
current_function_decl = pushdecl (decl1);
if (tree access = build_attr_access_from_parms (parms, false))
decl_attributes (¤t_function_decl, access, 0, old_decl);
push_scope ();
declare_parm_level ();
restype = TREE_TYPE (TREE_TYPE (current_function_decl));
resdecl = build_decl (loc, RESULT_DECL, NULL_TREE, restype);
DECL_ARTIFICIAL (resdecl) = 1;
DECL_IGNORED_P (resdecl) = 1;
DECL_RESULT (current_function_decl) = resdecl;
start_fname_decls ();
return true;
}
/* Subroutine of store_parm_decls which handles new-style function
definitions (prototype format). The parms already have decls, so we
need only record them as in effect and complain if any redundant
old-style parm decls were written. */
static void
store_parm_decls_newstyle (tree fndecl, const struct c_arg_info *arg_info)
{
tree decl;
c_arg_tag *tag;
unsigned ix;
if (current_scope->bindings)
{
error_at (DECL_SOURCE_LOCATION (fndecl),
"old-style parameter declarations in prototyped "
"function definition");
/* Get rid of the old-style declarations. */
pop_scope ();
push_scope ();
}
/* Don't issue this warning for nested functions, and don't issue this
warning if we got here because ARG_INFO_TYPES was error_mark_node
(this happens when a function definition has just an ellipsis in
its parameter list). */
else if (!in_system_header_at (input_location)
&& !current_function_scope
&& arg_info->types != error_mark_node)
warning_at (DECL_SOURCE_LOCATION (fndecl), OPT_Wtraditional,
"traditional C rejects ISO C style function definitions");
/* Now make all the parameter declarations visible in the function body.
We can bypass most of the grunt work of pushdecl. */
for (decl = arg_info->parms; decl; decl = DECL_CHAIN (decl))
{
DECL_CONTEXT (decl) = current_function_decl;
if (DECL_NAME (decl))
{
bind (DECL_NAME (decl), decl, current_scope,
/*invisible=*/false, /*nested=*/false,
UNKNOWN_LOCATION);
if (!TREE_USED (decl))
warn_if_shadowing (decl);
}
else
error_at (DECL_SOURCE_LOCATION (decl), "parameter name omitted");
}
/* Record the parameter list in the function declaration. */
DECL_ARGUMENTS (fndecl) = arg_info->parms;
/* Now make all the ancillary declarations visible, likewise. */
for (decl = arg_info->others; decl; decl = DECL_CHAIN (decl))
{
DECL_CONTEXT (decl) = current_function_decl;
if (DECL_NAME (decl))
bind (DECL_NAME (decl), decl, current_scope,
/*invisible=*/false,
/*nested=*/(TREE_CODE (decl) == FUNCTION_DECL),
UNKNOWN_LOCATION);
}
/* And all the tag declarations. */
FOR_EACH_VEC_SAFE_ELT_REVERSE (arg_info->tags, ix, tag)
if (tag->id)
bind (tag->id, tag->type, current_scope,
/*invisible=*/false, /*nested=*/false, UNKNOWN_LOCATION);
}
/* Subroutine of store_parm_decls which handles old-style function
definitions (separate parameter list and declarations). */
static void
store_parm_decls_oldstyle (tree fndecl, const struct c_arg_info *arg_info)
{
struct c_binding *b;
tree parm, decl, last;
tree parmids = arg_info->parms;
hash_set<tree> seen_args;
if (!in_system_header_at (input_location))
{
if (flag_isoc2x)
pedwarn (DECL_SOURCE_LOCATION (fndecl),
OPT_Wold_style_definition, "old-style function definition");
else
warning_at (DECL_SOURCE_LOCATION (fndecl),
OPT_Wold_style_definition,
"old-style function definition");
}
if (current_scope->had_vla_unspec)
error ("%<[*]%> not allowed in other than function prototype scope");
/* Match each formal parameter name with its declaration. Save each
decl in the appropriate TREE_PURPOSE slot of the parmids chain. */
for (parm = parmids; parm; parm = TREE_CHAIN (parm))
{
if (TREE_VALUE (parm) == NULL_TREE)
{
error_at (DECL_SOURCE_LOCATION (fndecl),
"parameter name missing from parameter list");
TREE_PURPOSE (parm) = NULL_TREE;
continue;
}
b = I_SYMBOL_BINDING (TREE_VALUE (parm));
if (b && B_IN_CURRENT_SCOPE (b))
{
decl = b->decl;
/* Skip erroneous parameters. */
if (decl == error_mark_node)
continue;
/* If we got something other than a PARM_DECL it is an error. */
if (TREE_CODE (decl) != PARM_DECL)
{
error_at (DECL_SOURCE_LOCATION (decl),
"%qD declared as a non-parameter", decl);
continue;
}
/* If the declaration is already marked, we have a duplicate
name. Complain and ignore the duplicate. */
else if (seen_args.contains (decl))
{
error_at (DECL_SOURCE_LOCATION (decl),
"multiple parameters named %qD", decl);
TREE_PURPOSE (parm) = NULL_TREE;
continue;
}
/* If the declaration says "void", complain and turn it into
an int. */
else if (VOID_TYPE_P (TREE_TYPE (decl)))
{
error_at (DECL_SOURCE_LOCATION (decl),
"parameter %qD declared with void type", decl);
TREE_TYPE (decl) = integer_type_node;
DECL_ARG_TYPE (decl) = integer_type_node;
layout_decl (decl, 0);
}
warn_if_shadowing (decl);
}
/* If no declaration found, default to int. */
else
{
/* FIXME diagnostics: This should be the location of the argument,
not the FNDECL. E.g., for an old-style declaration
int f10(v) { blah; }
We should use the location of the V, not the F10.
Unfortunately, the V is an IDENTIFIER_NODE which has no
location. In the future we need locations for c_arg_info
entries.
See gcc.dg/Wshadow-3.c for an example of this problem. */
decl = build_decl (DECL_SOURCE_LOCATION (fndecl),
PARM_DECL, TREE_VALUE (parm), integer_type_node);
DECL_ARG_TYPE (decl) = TREE_TYPE (decl);
pushdecl (decl);
warn_if_shadowing (decl);
if (flag_isoc99)
pedwarn (DECL_SOURCE_LOCATION (decl),
OPT_Wimplicit_int, "type of %qD defaults to %<int%>",
decl);
else
warning_at (DECL_SOURCE_LOCATION (decl),
OPT_Wmissing_parameter_type,
"type of %qD defaults to %<int%>", decl);
}
TREE_PURPOSE (parm) = decl;
seen_args.add (decl);
}
/* Now examine the parms chain for incomplete declarations
and declarations with no corresponding names. */
for (b = current_scope->bindings; b; b = b->prev)
{
parm = b->decl;
if (TREE_CODE (parm) != PARM_DECL)
continue;
if (TREE_TYPE (parm) != error_mark_node
&& !COMPLETE_TYPE_P (TREE_TYPE (parm)))
{
error_at (DECL_SOURCE_LOCATION (parm),
"parameter %qD has incomplete type", parm);
TREE_TYPE (parm) = error_mark_node;
}
if (!seen_args.contains (parm))
{
error_at (DECL_SOURCE_LOCATION (parm),
"declaration for parameter %qD but no such parameter",
parm);
/* Pretend the parameter was not missing.
This gets us to a standard state and minimizes
further error messages. */
parmids = chainon (parmids, tree_cons (parm, 0, 0));
}
}
/* Chain the declarations together in the order of the list of
names. Store that chain in the function decl, replacing the
list of names. Update the current scope to match. */
DECL_ARGUMENTS (fndecl) = NULL_TREE;
for (parm = parmids; parm; parm = TREE_CHAIN (parm))
if (TREE_PURPOSE (parm))
break;
if (parm && TREE_PURPOSE (parm))
{
last = TREE_PURPOSE (parm);
DECL_ARGUMENTS (fndecl) = last;
for (parm = TREE_CHAIN (parm); parm; parm = TREE_CHAIN (parm))
if (TREE_PURPOSE (parm))
{
DECL_CHAIN (last) = TREE_PURPOSE (parm);
last = TREE_PURPOSE (parm);
}
DECL_CHAIN (last) = NULL_TREE;
}
/* If there was a previous prototype,
set the DECL_ARG_TYPE of each argument according to
the type previously specified, and report any mismatches. */
if (current_function_prototype_arg_types)
{
tree type;
for (parm = DECL_ARGUMENTS (fndecl),
type = current_function_prototype_arg_types;
parm || (type != NULL_TREE
&& TREE_VALUE (type) != error_mark_node
&& TYPE_MAIN_VARIANT (TREE_VALUE (type)) != void_type_node);
parm = DECL_CHAIN (parm), type = TREE_CHAIN (type))
{
if (parm == NULL_TREE
|| type == NULL_TREE
|| (TREE_VALUE (type) != error_mark_node
&& TYPE_MAIN_VARIANT (TREE_VALUE (type)) == void_type_node))
{
if (current_function_prototype_built_in)
warning_at (DECL_SOURCE_LOCATION (fndecl),
0, "number of arguments doesn%'t match "
"built-in prototype");
else
{
/* FIXME diagnostics: This should be the location of
FNDECL, but there is bug when a prototype is
declared inside function context, but defined
outside of it (e.g., gcc.dg/pr15698-2.c). In
which case FNDECL gets the location of the
prototype, not the definition. */
error_at (input_location,
"number of arguments doesn%'t match prototype");
error_at (current_function_prototype_locus,
"prototype declaration");
}
break;
}
/* Type for passing arg must be consistent with that
declared for the arg. ISO C says we take the unqualified
type for parameters declared with qualified type. */
if (TREE_TYPE (parm) != error_mark_node
&& TREE_VALUE (type) != error_mark_node
&& ((TYPE_ATOMIC (DECL_ARG_TYPE (parm))
!= TYPE_ATOMIC (TREE_VALUE (type)))
|| !comptypes (TYPE_MAIN_VARIANT (DECL_ARG_TYPE (parm)),
TYPE_MAIN_VARIANT (TREE_VALUE (type)))))
{
if ((TYPE_ATOMIC (DECL_ARG_TYPE (parm))
== TYPE_ATOMIC (TREE_VALUE (type)))
&& (TYPE_MAIN_VARIANT (TREE_TYPE (parm))
== TYPE_MAIN_VARIANT (TREE_VALUE (type))))
{
/* Adjust argument to match prototype. E.g. a previous
`int foo(float);' prototype causes
`int foo(x) float x; {...}' to be treated like
`int foo(float x) {...}'. This is particularly
useful for argument types like uid_t. */
DECL_ARG_TYPE (parm) = TREE_TYPE (parm);
if (targetm.calls.promote_prototypes (TREE_TYPE (current_function_decl))
&& INTEGRAL_TYPE_P (TREE_TYPE (parm))
&& (TYPE_PRECISION (TREE_TYPE (parm))
< TYPE_PRECISION (integer_type_node)))
DECL_ARG_TYPE (parm)
= c_type_promotes_to (TREE_TYPE (parm));
/* ??? Is it possible to get here with a
built-in prototype or will it always have
been diagnosed as conflicting with an
old-style definition and discarded? */
if (current_function_prototype_built_in)
warning_at (DECL_SOURCE_LOCATION (parm),
OPT_Wpedantic, "promoted argument %qD "
"doesn%'t match built-in prototype", parm);
else
{
pedwarn (DECL_SOURCE_LOCATION (parm),
OPT_Wpedantic, "promoted argument %qD "
"doesn%'t match prototype", parm);
pedwarn (current_function_prototype_locus, OPT_Wpedantic,
"prototype declaration");
}
}
else
{
if (current_function_prototype_built_in)
warning_at (DECL_SOURCE_LOCATION (parm),
0, "argument %qD doesn%'t match "
"built-in prototype", parm);
else
{
error_at (DECL_SOURCE_LOCATION (parm),
"argument %qD doesn%'t match prototype", parm);
error_at (current_function_prototype_locus,
"prototype declaration");
}
}
}
}
TYPE_ACTUAL_ARG_TYPES (TREE_TYPE (fndecl)) = NULL_TREE;
}
/* Otherwise, create a prototype that would match. */
else
{
tree actual = NULL_TREE, last = NULL_TREE, type;
for (parm = DECL_ARGUMENTS (fndecl); parm; parm = DECL_CHAIN (parm))
{
type = tree_cons (NULL_TREE, DECL_ARG_TYPE (parm), NULL_TREE);
if (last)
TREE_CHAIN (last) = type;
else
actual = type;
last = type;
}
type = tree_cons (NULL_TREE, void_type_node, NULL_TREE);
if (last)
TREE_CHAIN (last) = type;
else
actual = type;
/* We are going to assign a new value for the TYPE_ACTUAL_ARG_TYPES
of the type of this function, but we need to avoid having this
affect the types of other similarly-typed functions, so we must
first force the generation of an identical (but separate) type
node for the relevant function type. The new node we create
will be a variant of the main variant of the original function
type. */
TREE_TYPE (fndecl) = build_variant_type_copy (TREE_TYPE (fndecl));
TYPE_ACTUAL_ARG_TYPES (TREE_TYPE (fndecl)) = actual;
}
}
/* Store parameter declarations passed in ARG_INFO into the current
function declaration. */
void
store_parm_decls_from (struct c_arg_info *arg_info)
{
current_function_arg_info = arg_info;
store_parm_decls ();
}
/* Called by walk_tree to look for and update context-less labels
or labels with context in the parent function. */
static tree
set_labels_context_r (tree *tp, int *walk_subtrees, void *data)
{
tree ctx = static_cast<tree>(data);
if (TREE_CODE (*tp) == LABEL_EXPR
&& (DECL_CONTEXT (LABEL_EXPR_LABEL (*tp)) == NULL_TREE
|| DECL_CONTEXT (LABEL_EXPR_LABEL (*tp)) == DECL_CONTEXT (ctx)))
{
DECL_CONTEXT (LABEL_EXPR_LABEL (*tp)) = ctx;
*walk_subtrees = 0;
}
return NULL_TREE;
}
/* Store the parameter declarations into the current function declaration.
This is called after parsing the parameter declarations, before
digesting the body of the function.
For an old-style definition, construct a prototype out of the old-style
parameter declarations and inject it into the function's type. */
void
store_parm_decls (void)
{
tree fndecl = current_function_decl;
bool proto;
/* The argument information block for FNDECL. */
struct c_arg_info *arg_info = current_function_arg_info;
current_function_arg_info = 0;
/* True if this definition is written with a prototype. In C2X, an
empty argument list was converted to (void) in grokparms; in
older C standard versions, it does not give the function a type
with a prototype for future calls. */
proto = arg_info->types != 0;
if (proto)
store_parm_decls_newstyle (fndecl, arg_info);
else
store_parm_decls_oldstyle (fndecl, arg_info);
/* The next call to push_scope will be a function body. */
next_is_function_body = true;
/* Write a record describing this function definition to the prototypes
file (if requested). */
gen_aux_info_record (fndecl, 1, 0, proto);
/* Initialize the RTL code for the function. */
allocate_struct_function (fndecl, false);
if (warn_unused_local_typedefs)
cfun->language = ggc_cleared_alloc<language_function> ();
/* Begin the statement tree for this function. */
DECL_SAVED_TREE (fndecl) = push_stmt_list ();
/* ??? Insert the contents of the pending sizes list into the function
to be evaluated. The only reason left to have this is
void foo(int n, int array[n++])
because we throw away the array type in favor of a pointer type, and
thus won't naturally see the SAVE_EXPR containing the increment. All
other pending sizes would be handled by gimplify_parameters. */
if (arg_info->pending_sizes)
{
/* In very special circumstances, e.g. for code like
_Atomic int i = 5;
void f (int a[i += 2]) {}
we need to execute the atomic assignment on function entry.
But in this case, it is not just a straight store, it has the
op= form, which means that build_atomic_assign has generated
gotos, labels, etc. Because at that time the function decl
for F has not been created yet, those labels do not have any
function context. But we have the fndecl now, so update the
labels accordingly. gimplify_expr would crash otherwise.
Or with nested functions the labels could be created with parent
function's context, while when the statement is emitted at the
start of the nested function, it needs the nested function's
context. */
walk_tree_without_duplicates (&arg_info->pending_sizes,
set_labels_context_r, fndecl);
add_stmt (arg_info->pending_sizes);
}
}
/* Store PARM_DECLs in PARMS into scope temporarily. Used for
c_finish_omp_declare_simd for function prototypes. No diagnostics
should be done. */
void
temp_store_parm_decls (tree fndecl, tree parms)
{
push_scope ();
for (tree p = parms; p; p = DECL_CHAIN (p))
{
DECL_CONTEXT (p) = fndecl;
if (DECL_NAME (p))
bind (DECL_NAME (p), p, current_scope,
/*invisible=*/false, /*nested=*/false,
UNKNOWN_LOCATION);
}
}
/* Undo what temp_store_parm_decls did. */
void
temp_pop_parm_decls (void)
{
/* Clear all bindings in this temporary scope, so that
pop_scope doesn't create a BLOCK. */
struct c_binding *b = current_scope->bindings;
current_scope->bindings = NULL;
for (; b; b = free_binding_and_advance (b))
{
gcc_assert (TREE_CODE (b->decl) == PARM_DECL
|| b->decl == error_mark_node);
gcc_assert (I_SYMBOL_BINDING (b->id) == b);
I_SYMBOL_BINDING (b->id) = b->shadowed;
if (b->shadowed && b->shadowed->u.type)
TREE_TYPE (b->shadowed->decl) = b->shadowed->u.type;
}
pop_scope ();
}
/* Finish up a function declaration and compile that function
all the way to assembler language output. Then free the storage
for the function definition.
This is called after parsing the body of the function definition. */
void
finish_function (location_t end_loc)
{
tree fndecl = current_function_decl;
if (c_dialect_objc ())
objc_finish_function ();
if (TREE_CODE (fndecl) == FUNCTION_DECL
&& targetm.calls.promote_prototypes (TREE_TYPE (fndecl)))
{
tree args = DECL_ARGUMENTS (fndecl);
for (; args; args = DECL_CHAIN (args))
{
tree type = TREE_TYPE (args);
if (INTEGRAL_TYPE_P (type)
&& TYPE_PRECISION (type) < TYPE_PRECISION (integer_type_node))
DECL_ARG_TYPE (args) = c_type_promotes_to (type);
}
}
if (DECL_INITIAL (fndecl) && DECL_INITIAL (fndecl) != error_mark_node)
BLOCK_SUPERCONTEXT (DECL_INITIAL (fndecl)) = fndecl;
/* Must mark the RESULT_DECL as being in this function. */
if (DECL_RESULT (fndecl) && DECL_RESULT (fndecl) != error_mark_node)
DECL_CONTEXT (DECL_RESULT (fndecl)) = fndecl;
if (MAIN_NAME_P (DECL_NAME (fndecl)) && flag_hosted
&& TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (fndecl)))
== integer_type_node && flag_isoc99)
{
/* Hack. We don't want the middle-end to warn that this return
is unreachable, so we mark its location as special. Using
UNKNOWN_LOCATION has the problem that it gets clobbered in
annotate_one_with_locus. A cleaner solution might be to
ensure ! should_carry_locus_p (stmt), but that needs a flag.
*/
c_finish_return (BUILTINS_LOCATION, integer_zero_node, NULL_TREE);
}
/* Tie off the statement tree for this function. */
DECL_SAVED_TREE (fndecl) = pop_stmt_list (DECL_SAVED_TREE (fndecl));
finish_fname_decls ();
/* Complain if there's no return statement only if option specified on
command line. */
if (warn_return_type > 0
&& TREE_CODE (TREE_TYPE (TREE_TYPE (fndecl))) != VOID_TYPE
&& !current_function_returns_value && !current_function_returns_null
/* Don't complain if we are no-return. */
&& !current_function_returns_abnormally
/* Don't complain if we are declared noreturn. */
&& !TREE_THIS_VOLATILE (fndecl)
/* Don't warn for main(). */
&& !MAIN_NAME_P (DECL_NAME (fndecl))
/* Or if they didn't actually specify a return type. */
&& !C_FUNCTION_IMPLICIT_INT (fndecl)
/* Normally, with -Wreturn-type, flow will complain, but we might
optimize out static functions. */
&& !TREE_PUBLIC (fndecl)
&& targetm.warn_func_return (fndecl)
&& warning (OPT_Wreturn_type,
"no return statement in function returning non-void"))
TREE_NO_WARNING (fndecl) = 1;
/* Complain about parameters that are only set, but never otherwise used. */
if (warn_unused_but_set_parameter)
{
tree decl;
for (decl = DECL_ARGUMENTS (fndecl);
decl;
decl = DECL_CHAIN (decl))
if (TREE_USED (decl)
&& TREE_CODE (decl) == PARM_DECL
&& !DECL_READ_P (decl)
&& DECL_NAME (decl)
&& !DECL_ARTIFICIAL (decl)
&& !TREE_NO_WARNING (decl))
warning_at (DECL_SOURCE_LOCATION (decl),
OPT_Wunused_but_set_parameter,
"parameter %qD set but not used", decl);
}
/* Complain about locally defined typedefs that are not used in this
function. */
maybe_warn_unused_local_typedefs ();
/* Possibly warn about unused parameters. */
if (warn_unused_parameter)
do_warn_unused_parameter (fndecl);
/* Store the end of the function, so that we get good line number
info for the epilogue. */
cfun->function_end_locus = end_loc;
/* Finalize the ELF visibility for the function. */
c_determine_visibility (fndecl);
/* For GNU C extern inline functions disregard inline limits. */
if (DECL_EXTERNAL (fndecl)
&& DECL_DECLARED_INLINE_P (fndecl)
&& (flag_gnu89_inline
|| lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (fndecl))))
DECL_DISREGARD_INLINE_LIMITS (fndecl) = 1;
/* Genericize before inlining. Delay genericizing nested functions
until their parent function is genericized. Since finalizing
requires GENERIC, delay that as well. */
if (DECL_INITIAL (fndecl) && DECL_INITIAL (fndecl) != error_mark_node
&& !undef_nested_function)
{
if (!decl_function_context (fndecl))
{
invoke_plugin_callbacks (PLUGIN_PRE_GENERICIZE, fndecl);
c_genericize (fndecl);
/* ??? Objc emits functions after finalizing the compilation unit.
This should be cleaned up later and this conditional removed. */
if (symtab->global_info_ready)
{
cgraph_node::add_new_function (fndecl, false);
return;
}
cgraph_node::finalize_function (fndecl, false);
}
else
{
/* Register this function with cgraph just far enough to get it
added to our parent's nested function list. Handy, since the
C front end doesn't have such a list. */
(void) cgraph_node::get_create (fndecl);
}
}
if (!decl_function_context (fndecl))
undef_nested_function = false;
if (cfun->language != NULL)
{
ggc_free (cfun->language);
cfun->language = NULL;
}
/* We're leaving the context of this function, so zap cfun.
It's still in DECL_STRUCT_FUNCTION, and we'll restore it in
tree_rest_of_compilation. */
set_cfun (NULL);
invoke_plugin_callbacks (PLUGIN_FINISH_PARSE_FUNCTION, current_function_decl);
current_function_decl = NULL;
}
/* Check the declarations given in a for-loop for satisfying the C99
constraints. If exactly one such decl is found, return it. LOC is
the location of the opening parenthesis of the for loop. The last
parameter allows you to control the "for loop initial declarations
are only allowed in C99 mode". Normally, you should pass
flag_isoc99 as that parameter. But in some cases (Objective-C
foreach loop, for example) we want to run the checks in this
function even if not in C99 mode, so we allow the caller to turn
off the error about not being in C99 mode.
*/
tree
check_for_loop_decls (location_t loc, bool turn_off_iso_c99_error)
{
struct c_binding *b;
tree one_decl = NULL_TREE;
int n_decls = 0;
if (!turn_off_iso_c99_error)
{
static bool hint = true;
/* If we get here, declarations have been used in a for loop without
the C99 for loop scope. This doesn't make much sense, so don't
allow it. */
error_at (loc, "%<for%> loop initial declarations "
"are only allowed in C99 or C11 mode");
if (hint)
{
inform (loc,
"use option %<-std=c99%>, %<-std=gnu99%>, %<-std=c11%> or "
"%<-std=gnu11%> to compile your code");
hint = false;
}
return NULL_TREE;
}
else
pedwarn_c90 (loc, OPT_Wpedantic, "ISO C90 does not support %<for%> loop "
"initial declarations");
/* C99 subclause 6.8.5 paragraph 3:
[#3] The declaration part of a for statement shall only
declare identifiers for objects having storage class auto or
register.
It isn't clear whether, in this sentence, "identifiers" binds to
"shall only declare" or to "objects" - that is, whether all identifiers
declared must be identifiers for objects, or whether the restriction
only applies to those that are. (A question on this in comp.std.c
in November 2000 received no answer.) We implement the strictest
interpretation, to avoid creating an extension which later causes
problems. */
for (b = current_scope->bindings; b; b = b->prev)
{
tree id = b->id;
tree decl = b->decl;
if (!id)
continue;
switch (TREE_CODE (decl))
{
case VAR_DECL:
{
location_t decl_loc = DECL_SOURCE_LOCATION (decl);
if (TREE_STATIC (decl))
error_at (decl_loc,
"declaration of static variable %qD in %<for%> loop "
"initial declaration", decl);
else if (DECL_EXTERNAL (decl))
error_at (decl_loc,
"declaration of %<extern%> variable %qD in %<for%> loop "
"initial declaration", decl);
}
break;
case RECORD_TYPE:
error_at (loc,
"%<struct %E%> declared in %<for%> loop initial "
"declaration", id);
break;
case UNION_TYPE:
error_at (loc,
"%<union %E%> declared in %<for%> loop initial declaration",
id);
break;
case ENUMERAL_TYPE:
error_at (loc, "%<enum %E%> declared in %<for%> loop "
"initial declaration", id);
break;
default:
error_at (loc, "declaration of non-variable "
"%qD in %<for%> loop initial declaration", decl);
}
n_decls++;
one_decl = decl;
}
return n_decls == 1 ? one_decl : NULL_TREE;
}
/* Save and reinitialize the variables
used during compilation of a C function. */
void
c_push_function_context (void)
{
struct language_function *p = cfun->language;
/* cfun->language might have been already allocated by the use of
-Wunused-local-typedefs. In that case, just re-use it. */
if (p == NULL)
cfun->language = p = ggc_cleared_alloc<language_function> ();
p->base.x_stmt_tree = c_stmt_tree;
c_stmt_tree.x_cur_stmt_list = vec_safe_copy (c_stmt_tree.x_cur_stmt_list);
p->x_in_statement = in_statement;
p->x_switch_stack = c_switch_stack;
p->arg_info = current_function_arg_info;
p->returns_value = current_function_returns_value;
p->returns_null = current_function_returns_null;
p->returns_abnormally = current_function_returns_abnormally;
p->warn_about_return_type = warn_about_return_type;
push_function_context ();
}
/* Restore the variables used during compilation of a C function. */
void
c_pop_function_context (void)
{
struct language_function *p;
pop_function_context ();
p = cfun->language;
/* When -Wunused-local-typedefs is in effect, cfun->languages is
used to store data throughout the life time of the current cfun,
So don't deallocate it. */
if (!warn_unused_local_typedefs)
cfun->language = NULL;
if (DECL_STRUCT_FUNCTION (current_function_decl) == 0
&& DECL_SAVED_TREE (current_function_decl) == NULL_TREE)
{
/* Stop pointing to the local nodes about to be freed. */
/* But DECL_INITIAL must remain nonzero so we know this
was an actual function definition. */
DECL_INITIAL (current_function_decl) = error_mark_node;
DECL_ARGUMENTS (current_function_decl) = NULL_TREE;
}
c_stmt_tree = p->base.x_stmt_tree;
p->base.x_stmt_tree.x_cur_stmt_list = NULL;
in_statement = p->x_in_statement;
c_switch_stack = p->x_switch_stack;
current_function_arg_info = p->arg_info;
current_function_returns_value = p->returns_value;
current_function_returns_null = p->returns_null;
current_function_returns_abnormally = p->returns_abnormally;
warn_about_return_type = p->warn_about_return_type;
}
/* The functions below are required for functionality of doing
function at once processing in the C front end. Currently these
functions are not called from anywhere in the C front end, but as
these changes continue, that will change. */
/* Returns the stmt_tree (if any) to which statements are currently
being added. If there is no active statement-tree, NULL is
returned. */
stmt_tree
current_stmt_tree (void)
{
return &c_stmt_tree;
}
/* Return the global value of T as a symbol. */
tree
identifier_global_value (tree t)
{
struct c_binding *b;
for (b = I_SYMBOL_BINDING (t); b; b = b->shadowed)
if (B_IN_FILE_SCOPE (b) || B_IN_EXTERNAL_SCOPE (b))
return b->decl;
return NULL_TREE;
}
/* Return the global value of tag T as a symbol. */
tree
identifier_global_tag (tree t)
{
struct c_binding *b;
for (b = I_TAG_BINDING (t); b; b = b->shadowed)
if (B_IN_FILE_SCOPE (b) || B_IN_EXTERNAL_SCOPE (b))
return b->decl;
return NULL_TREE;
}
/* Returns true if NAME refers to a built-in function or function-like
operator. */
bool
names_builtin_p (const char *name)
{
tree id = get_identifier (name);
if (tree decl = identifier_global_value (id))
return TREE_CODE (decl) == FUNCTION_DECL && DECL_IS_BUILTIN (decl);
/* Also detect common reserved C words that aren't strictly built-in
functions. */
switch (C_RID_CODE (id))
{
case RID_BUILTIN_CONVERTVECTOR:
case RID_BUILTIN_HAS_ATTRIBUTE:
case RID_BUILTIN_SHUFFLE:
case RID_CHOOSE_EXPR:
case RID_OFFSETOF:
case RID_TYPES_COMPATIBLE_P:
return true;
default:
break;
}
return false;
}
/* In C, the only C-linkage public declaration is at file scope. */
tree
c_linkage_bindings (tree name)
{
return identifier_global_value (name);
}
/* Record a builtin type for C. If NAME is non-NULL, it is the name used;
otherwise the name is found in ridpointers from RID_INDEX. */
void
record_builtin_type (enum rid rid_index, const char *name, tree type)
{
tree id, decl;
if (name == 0)
id = ridpointers[(int) rid_index];
else
id = get_identifier (name);
decl = build_decl (UNKNOWN_LOCATION, TYPE_DECL, id, type);
pushdecl (decl);
if (debug_hooks->type_decl)
debug_hooks->type_decl (decl, false);
}
/* Build the void_list_node (void_type_node having been created). */
tree
build_void_list_node (void)
{
tree t = build_tree_list (NULL_TREE, void_type_node);
return t;
}
/* Return a c_parm structure with the given SPECS, ATTRS and DECLARATOR. */
struct c_parm *
build_c_parm (struct c_declspecs *specs, tree attrs,
struct c_declarator *declarator,
location_t loc)
{
struct c_parm *ret = XOBNEW (&parser_obstack, struct c_parm);
ret->specs = specs;
ret->attrs = attrs;
ret->declarator = declarator;
ret->loc = loc;
return ret;
}
/* Return a declarator with nested attributes. TARGET is the inner
declarator to which these attributes apply. ATTRS are the
attributes. */
struct c_declarator *
build_attrs_declarator (tree attrs, struct c_declarator *target)
{
struct c_declarator *ret = XOBNEW (&parser_obstack, struct c_declarator);
ret->kind = cdk_attrs;
ret->declarator = target;
ret->u.attrs = attrs;
return ret;
}
/* Return a declarator for a function with arguments specified by ARGS
and return type specified by TARGET. */
struct c_declarator *
build_function_declarator (struct c_arg_info *args,
struct c_declarator *target)
{
struct c_declarator *ret = XOBNEW (&parser_obstack, struct c_declarator);
ret->kind = cdk_function;
ret->declarator = target;
ret->u.arg_info = args;
return ret;
}
/* Return a declarator for the identifier IDENT (which may be
NULL_TREE for an abstract declarator). */
struct c_declarator *
build_id_declarator (tree ident)
{
struct c_declarator *ret = XOBNEW (&parser_obstack, struct c_declarator);
ret->kind = cdk_id;
ret->declarator = 0;
ret->u.id.id = ident;
ret->u.id.attrs = NULL_TREE;
/* Default value - may get reset to a more precise location. */
ret->id_loc = input_location;
return ret;
}
/* Return something to represent absolute declarators containing a *.
TARGET is the absolute declarator that the * contains.
TYPE_QUALS_ATTRS is a structure for type qualifiers and attributes
to apply to the pointer type. */
struct c_declarator *
make_pointer_declarator (struct c_declspecs *type_quals_attrs,
struct c_declarator *target)
{
tree attrs;
int quals = 0;
struct c_declarator *itarget = target;
struct c_declarator *ret = XOBNEW (&parser_obstack, struct c_declarator);
if (type_quals_attrs)
{
attrs = type_quals_attrs->attrs;
quals = quals_from_declspecs (type_quals_attrs);
if (attrs != NULL_TREE)
itarget = build_attrs_declarator (attrs, target);
}
ret->kind = cdk_pointer;
ret->declarator = itarget;
ret->u.pointer_quals = quals;
return ret;
}
/* Return a pointer to a structure for an empty list of declaration
specifiers. */
struct c_declspecs *
build_null_declspecs (void)
{
struct c_declspecs *ret = XOBNEW (&parser_obstack, struct c_declspecs);
memset (ret, 0, sizeof *ret);
ret->align_log = -1;
ret->typespec_word = cts_none;
ret->storage_class = csc_none;
ret->expr_const_operands = true;
ret->typespec_kind = ctsk_none;
ret->address_space = ADDR_SPACE_GENERIC;
return ret;
}
/* Add the address space ADDRSPACE to the declaration specifiers
SPECS, returning SPECS. */
struct c_declspecs *
declspecs_add_addrspace (location_t location,
struct c_declspecs *specs, addr_space_t as)
{
specs->non_sc_seen_p = true;
specs->declspecs_seen_p = true;
specs->non_std_attrs_seen_p = true;
if (!ADDR_SPACE_GENERIC_P (specs->address_space)
&& specs->address_space != as)
error ("incompatible address space qualifiers %qs and %qs",
c_addr_space_name (as),
c_addr_space_name (specs->address_space));
else
{
specs->address_space = as;
specs->locations[cdw_address_space] = location;
}
return specs;
}
/* Add the type qualifier QUAL to the declaration specifiers SPECS,
returning SPECS. */
struct c_declspecs *
declspecs_add_qual (location_t loc,
struct c_declspecs *specs, tree qual)
{
enum rid i;
bool dupe = false;
specs->non_sc_seen_p = true;
specs->declspecs_seen_p = true;
specs->non_std_attrs_seen_p = true;
gcc_assert (TREE_CODE (qual) == IDENTIFIER_NODE
&& C_IS_RESERVED_WORD (qual));
i = C_RID_CODE (qual);
location_t prev_loc = UNKNOWN_LOCATION;
switch (i)
{
case RID_CONST:
dupe = specs->const_p;
specs->const_p = true;
prev_loc = specs->locations[cdw_const];
specs->locations[cdw_const] = loc;
break;
case RID_VOLATILE:
dupe = specs->volatile_p;
specs->volatile_p = true;
prev_loc = specs->locations[cdw_volatile];
specs->locations[cdw_volatile] = loc;
break;
case RID_RESTRICT:
dupe = specs->restrict_p;
specs->restrict_p = true;
prev_loc = specs->locations[cdw_restrict];
specs->locations[cdw_restrict] = loc;
break;
case RID_ATOMIC:
dupe = specs->atomic_p;
specs->atomic_p = true;
prev_loc = specs->locations[cdw_atomic];
specs->locations[cdw_atomic] = loc;
break;
default:
gcc_unreachable ();
}
if (dupe)
{
bool warned = pedwarn_c90 (loc, OPT_Wpedantic,
"duplicate %qE declaration specifier", qual);
if (!warned
&& warn_duplicate_decl_specifier
&& prev_loc >= RESERVED_LOCATION_COUNT
&& !from_macro_expansion_at (prev_loc)
&& !from_macro_expansion_at (loc))
warning_at (loc, OPT_Wduplicate_decl_specifier,
"duplicate %qE declaration specifier", qual);
}
return specs;
}
/* Add the type specifier TYPE to the declaration specifiers SPECS,
returning SPECS. */
struct c_declspecs *
declspecs_add_type (location_t loc, struct c_declspecs *specs,
struct c_typespec spec)
{
tree type = spec.spec;
specs->non_sc_seen_p = true;
specs->declspecs_seen_p = true;
specs->non_std_attrs_seen_p = true;
specs->typespec_kind = spec.kind;
if (TREE_DEPRECATED (type))
specs->deprecated_p = true;
/* Handle type specifier keywords. */
if (TREE_CODE (type) == IDENTIFIER_NODE
&& C_IS_RESERVED_WORD (type)
&& C_RID_CODE (type) != RID_CXX_COMPAT_WARN)
{
enum rid i = C_RID_CODE (type);
if (specs->type)
{
error_at (loc, "two or more data types in declaration specifiers");
return specs;
}
if ((int) i <= (int) RID_LAST_MODIFIER)
{
/* "long", "short", "signed", "unsigned", "_Complex" or "_Sat". */
bool dupe = false;
switch (i)
{
case RID_LONG:
if (specs->long_long_p)
{
error_at (loc, "%<long long long%> is too long for GCC");
break;
}
if (specs->long_p)
{
if (specs->typespec_word == cts_double)
{
error_at (loc,
("both %<long long%> and %<double%> in "
"declaration specifiers"));
break;
}
pedwarn_c90 (loc, OPT_Wlong_long,
"ISO C90 does not support %<long long%>");
specs->long_long_p = 1;
specs->locations[cdw_long_long] = loc;
break;
}
if (specs->short_p)
error_at (loc,
("both %<long%> and %<short%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_auto_type)
error_at (loc,
("both %<long%> and %<__auto_type%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_void)
error_at (loc,
("both %<long%> and %<void%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_int_n)
error_at (loc,
("both %<long%> and %<__int%d%> in "
"declaration specifiers"),
int_n_data[specs->int_n_idx].bitsize);
else if (specs->typespec_word == cts_bool)
error_at (loc,
("both %<long%> and %<_Bool%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_char)
error_at (loc,
("both %<long%> and %<char%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_float)
error_at (loc,
("both %<long%> and %<float%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_floatn_nx)
error_at (loc,
("both %<long%> and %<_Float%d%s%> in "
"declaration specifiers"),
floatn_nx_types[specs->floatn_nx_idx].n,
(floatn_nx_types[specs->floatn_nx_idx].extended
? "x"
: ""));
else if (specs->typespec_word == cts_dfloat32)
error_at (loc,
("both %<long%> and %<_Decimal32%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_dfloat64)
error_at (loc,
("both %<long%> and %<_Decimal64%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_dfloat128)
error_at (loc,
("both %<long%> and %<_Decimal128%> in "
"declaration specifiers"));
else
{
specs->long_p = true;
specs->locations[cdw_long] = loc;
}
break;
case RID_SHORT:
dupe = specs->short_p;
if (specs->long_p)
error_at (loc,
("both %<long%> and %<short%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_auto_type)
error_at (loc,
("both %<short%> and %<__auto_type%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_void)
error_at (loc,
("both %<short%> and %<void%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_int_n)
error_at (loc,
("both %<short%> and %<__int%d%> in "
"declaration specifiers"),
int_n_data[specs->int_n_idx].bitsize);
else if (specs->typespec_word == cts_bool)
error_at (loc,
("both %<short%> and %<_Bool%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_char)
error_at (loc,
("both %<short%> and %<char%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_float)
error_at (loc,
("both %<short%> and %<float%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_double)
error_at (loc,
("both %<short%> and %<double%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_floatn_nx)
error_at (loc,
("both %<short%> and %<_Float%d%s%> in "
"declaration specifiers"),
floatn_nx_types[specs->floatn_nx_idx].n,
(floatn_nx_types[specs->floatn_nx_idx].extended
? "x"
: ""));
else if (specs->typespec_word == cts_dfloat32)
error_at (loc,
("both %<short%> and %<_Decimal32%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_dfloat64)
error_at (loc,
("both %<short%> and %<_Decimal64%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_dfloat128)
error_at (loc,
("both %<short%> and %<_Decimal128%> in "
"declaration specifiers"));
else
{
specs->short_p = true;
specs->locations[cdw_short] = loc;
}
break;
case RID_SIGNED:
dupe = specs->signed_p;
if (specs->unsigned_p)
error_at (loc,
("both %<signed%> and %<unsigned%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_auto_type)
error_at (loc,
("both %<signed%> and %<__auto_type%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_void)
error_at (loc,
("both %<signed%> and %<void%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_bool)
error_at (loc,
("both %<signed%> and %<_Bool%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_float)
error_at (loc,
("both %<signed%> and %<float%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_double)
error_at (loc,
("both %<signed%> and %<double%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_floatn_nx)
error_at (loc,
("both %<signed%> and %<_Float%d%s%> in "
"declaration specifiers"),
floatn_nx_types[specs->floatn_nx_idx].n,
(floatn_nx_types[specs->floatn_nx_idx].extended
? "x"
: ""));
else if (specs->typespec_word == cts_dfloat32)
error_at (loc,
("both %<signed%> and %<_Decimal32%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_dfloat64)
error_at (loc,
("both %<signed%> and %<_Decimal64%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_dfloat128)
error_at (loc,
("both %<signed%> and %<_Decimal128%> in "
"declaration specifiers"));
else
{
specs->signed_p = true;
specs->locations[cdw_signed] = loc;
}
break;
case RID_UNSIGNED:
dupe = specs->unsigned_p;
if (specs->signed_p)
error_at (loc,
("both %<signed%> and %<unsigned%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_auto_type)
error_at (loc,
("both %<unsigned%> and %<__auto_type%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_void)
error_at (loc,
("both %<unsigned%> and %<void%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_bool)
error_at (loc,
("both %<unsigned%> and %<_Bool%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_float)
error_at (loc,
("both %<unsigned%> and %<float%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_double)
error_at (loc,
("both %<unsigned%> and %<double%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_floatn_nx)
error_at (loc,
("both %<unsigned%> and %<_Float%d%s%> in "
"declaration specifiers"),
floatn_nx_types[specs->floatn_nx_idx].n,
(floatn_nx_types[specs->floatn_nx_idx].extended
? "x"
: ""));
else if (specs->typespec_word == cts_dfloat32)
error_at (loc,
("both %<unsigned%> and %<_Decimal32%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_dfloat64)
error_at (loc,
("both %<unsigned%> and %<_Decimal64%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_dfloat128)
error_at (loc,
("both %<unsigned%> and %<_Decimal128%> in "
"declaration specifiers"));
else
{
specs->unsigned_p = true;
specs->locations[cdw_unsigned] = loc;
}
break;
case RID_COMPLEX:
dupe = specs->complex_p;
if (!in_system_header_at (loc))
pedwarn_c90 (loc, OPT_Wpedantic,
"ISO C90 does not support complex types");
if (specs->typespec_word == cts_auto_type)
error_at (loc,
("both %<complex%> and %<__auto_type%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_void)
error_at (loc,
("both %<complex%> and %<void%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_bool)
error_at (loc,
("both %<complex%> and %<_Bool%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_dfloat32)
error_at (loc,
("both %<complex%> and %<_Decimal32%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_dfloat64)
error_at (loc,
("both %<complex%> and %<_Decimal64%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_dfloat128)
error_at (loc,
("both %<complex%> and %<_Decimal128%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_fract)
error_at (loc,
("both %<complex%> and %<_Fract%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_accum)
error_at (loc,
("both %<complex%> and %<_Accum%> in "
"declaration specifiers"));
else if (specs->saturating_p)
error_at (loc,
("both %<complex%> and %<_Sat%> in "
"declaration specifiers"));
else
{
specs->complex_p = true;
specs->locations[cdw_complex] = loc;
}
break;
case RID_SAT:
dupe = specs->saturating_p;
pedwarn (loc, OPT_Wpedantic,
"ISO C does not support saturating types");
if (specs->typespec_word == cts_int_n)
{
error_at (loc,
("both %<_Sat%> and %<__int%d%> in "
"declaration specifiers"),
int_n_data[specs->int_n_idx].bitsize);
}
else if (specs->typespec_word == cts_auto_type)
error_at (loc,
("both %<_Sat%> and %<__auto_type%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_void)
error_at (loc,
("both %<_Sat%> and %<void%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_bool)
error_at (loc,
("both %<_Sat%> and %<_Bool%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_char)
error_at (loc,
("both %<_Sat%> and %<char%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_int)
error_at (loc,
("both %<_Sat%> and %<int%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_float)
error_at (loc,
("both %<_Sat%> and %<float%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_double)
error_at (loc,
("both %<_Sat%> and %<double%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_floatn_nx)
error_at (loc,
("both %<_Sat%> and %<_Float%d%s%> in "
"declaration specifiers"),
floatn_nx_types[specs->floatn_nx_idx].n,
(floatn_nx_types[specs->floatn_nx_idx].extended
? "x"
: ""));
else if (specs->typespec_word == cts_dfloat32)
error_at (loc,
("both %<_Sat%> and %<_Decimal32%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_dfloat64)
error_at (loc,
("both %<_Sat%> and %<_Decimal64%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_dfloat128)
error_at (loc,
("both %<_Sat%> and %<_Decimal128%> in "
"declaration specifiers"));
else if (specs->complex_p)
error_at (loc,
("both %<_Sat%> and %<complex%> in "
"declaration specifiers"));
else
{
specs->saturating_p = true;
specs->locations[cdw_saturating] = loc;
}
break;
default:
gcc_unreachable ();
}
if (dupe)
error_at (loc, "duplicate %qE", type);
return specs;
}
else
{
/* "void", "_Bool", "char", "int", "float", "double",
"_FloatN", "_FloatNx", "_Decimal32", "__intN",
"_Decimal64", "_Decimal128", "_Fract", "_Accum" or
"__auto_type". */
if (specs->typespec_word != cts_none)
{
error_at (loc,
"two or more data types in declaration specifiers");
return specs;
}
switch (i)
{
case RID_AUTO_TYPE:
if (specs->long_p)
error_at (loc,
("both %<long%> and %<__auto_type%> in "
"declaration specifiers"));
else if (specs->short_p)
error_at (loc,
("both %<short%> and %<__auto_type%> in "
"declaration specifiers"));
else if (specs->signed_p)
error_at (loc,
("both %<signed%> and %<__auto_type%> in "
"declaration specifiers"));
else if (specs->unsigned_p)
error_at (loc,
("both %<unsigned%> and %<__auto_type%> in "
"declaration specifiers"));
else if (specs->complex_p)
error_at (loc,
("both %<complex%> and %<__auto_type%> in "
"declaration specifiers"));
else if (specs->saturating_p)
error_at (loc,
("both %<_Sat%> and %<__auto_type%> in "
"declaration specifiers"));
else
{
specs->typespec_word = cts_auto_type;
specs->locations[cdw_typespec] = loc;
}
return specs;
case RID_INT_N_0:
case RID_INT_N_1:
case RID_INT_N_2:
case RID_INT_N_3:
specs->int_n_idx = i - RID_INT_N_0;
if (!in_system_header_at (input_location)
/* If the INT_N type ends in "__", and so is of the format
"__intN__", don't pedwarn. */
&& (strncmp (IDENTIFIER_POINTER (type)
+ (IDENTIFIER_LENGTH (type) - 2), "__", 2) != 0))
pedwarn (loc, OPT_Wpedantic,
"ISO C does not support %<__int%d%> types",
int_n_data[specs->int_n_idx].bitsize);
if (specs->long_p)
error_at (loc,
("both %<__int%d%> and %<long%> in "
"declaration specifiers"),
int_n_data[specs->int_n_idx].bitsize);
else if (specs->saturating_p)
error_at (loc,
("both %<_Sat%> and %<__int%d%> in "
"declaration specifiers"),
int_n_data[specs->int_n_idx].bitsize);
else if (specs->short_p)
error_at (loc,
("both %<__int%d%> and %<short%> in "
"declaration specifiers"),
int_n_data[specs->int_n_idx].bitsize);
else if (! int_n_enabled_p[specs->int_n_idx])
{
specs->typespec_word = cts_int_n;
error_at (loc,
"%<__int%d%> is not supported on this target",
int_n_data[specs->int_n_idx].bitsize);
}
else
{
specs->typespec_word = cts_int_n;
specs->locations[cdw_typespec] = loc;
}
return specs;
case RID_VOID:
if (specs->long_p)
error_at (loc,
("both %<long%> and %<void%> in "
"declaration specifiers"));
else if (specs->short_p)
error_at (loc,
("both %<short%> and %<void%> in "
"declaration specifiers"));
else if (specs->signed_p)
error_at (loc,
("both %<signed%> and %<void%> in "
"declaration specifiers"));
else if (specs->unsigned_p)
error_at (loc,
("both %<unsigned%> and %<void%> in "
"declaration specifiers"));
else if (specs->complex_p)
error_at (loc,
("both %<complex%> and %<void%> in "
"declaration specifiers"));
else if (specs->saturating_p)
error_at (loc,
("both %<_Sat%> and %<void%> in "
"declaration specifiers"));
else
{
specs->typespec_word = cts_void;
specs->locations[cdw_typespec] = loc;
}
return specs;
case RID_BOOL:
if (!in_system_header_at (loc))
pedwarn_c90 (loc, OPT_Wpedantic,
"ISO C90 does not support boolean types");
if (specs->long_p)
error_at (loc,
("both %<long%> and %<_Bool%> in "
"declaration specifiers"));
else if (specs->short_p)
error_at (loc,
("both %<short%> and %<_Bool%> in "
"declaration specifiers"));
else if (specs->signed_p)
error_at (loc,
("both %<signed%> and %<_Bool%> in "
"declaration specifiers"));
else if (specs->unsigned_p)
error_at (loc,
("both %<unsigned%> and %<_Bool%> in "
"declaration specifiers"));
else if (specs->complex_p)
error_at (loc,
("both %<complex%> and %<_Bool%> in "
"declaration specifiers"));
else if (specs->saturating_p)
error_at (loc,
("both %<_Sat%> and %<_Bool%> in "
"declaration specifiers"));
else
{
specs->typespec_word = cts_bool;
specs->locations[cdw_typespec] = loc;
}
return specs;
case RID_CHAR:
if (specs->long_p)
error_at (loc,
("both %<long%> and %<char%> in "
"declaration specifiers"));
else if (specs->short_p)
error_at (loc,
("both %<short%> and %<char%> in "
"declaration specifiers"));
else if (specs->saturating_p)
error_at (loc,
("both %<_Sat%> and %<char%> in "
"declaration specifiers"));
else
{
specs->typespec_word = cts_char;
specs->locations[cdw_typespec] = loc;
}
return specs;
case RID_INT:
if (specs->saturating_p)
error_at (loc,
("both %<_Sat%> and %<int%> in "
"declaration specifiers"));
else
{
specs->typespec_word = cts_int;
specs->locations[cdw_typespec] = loc;
}
return specs;
case RID_FLOAT:
if (specs->long_p)
error_at (loc,
("both %<long%> and %<float%> in "
"declaration specifiers"));
else if (specs->short_p)
error_at (loc,
("both %<short%> and %<float%> in "
"declaration specifiers"));
else if (specs->signed_p)
error_at (loc,
("both %<signed%> and %<float%> in "
"declaration specifiers"));
else if (specs->unsigned_p)
error_at (loc,
("both %<unsigned%> and %<float%> in "
"declaration specifiers"));
else if (specs->saturating_p)
error_at (loc,
("both %<_Sat%> and %<float%> in "
"declaration specifiers"));
else
{
specs->typespec_word = cts_float;
specs->locations[cdw_typespec] = loc;
}
return specs;
case RID_DOUBLE:
if (specs->long_long_p)
error_at (loc,
("both %<long long%> and %<double%> in "
"declaration specifiers"));
else if (specs->short_p)
error_at (loc,
("both %<short%> and %<double%> in "
"declaration specifiers"));
else if (specs->signed_p)
error_at (loc,
("both %<signed%> and %<double%> in "
"declaration specifiers"));
else if (specs->unsigned_p)
error_at (loc,
("both %<unsigned%> and %<double%> in "
"declaration specifiers"));
else if (specs->saturating_p)
error_at (loc,
("both %<_Sat%> and %<double%> in "
"declaration specifiers"));
else
{
specs->typespec_word = cts_double;
specs->locations[cdw_typespec] = loc;
}
return specs;
CASE_RID_FLOATN_NX:
specs->floatn_nx_idx = i - RID_FLOATN_NX_FIRST;
if (!in_system_header_at (input_location))
pedwarn (loc, OPT_Wpedantic,
"ISO C does not support the %<_Float%d%s%> type",
floatn_nx_types[specs->floatn_nx_idx].n,
(floatn_nx_types[specs->floatn_nx_idx].extended
? "x"
: ""));
if (specs->long_p)
error_at (loc,
("both %<long%> and %<_Float%d%s%> in "
"declaration specifiers"),
floatn_nx_types[specs->floatn_nx_idx].n,
(floatn_nx_types[specs->floatn_nx_idx].extended
? "x"
: ""));
else if (specs->short_p)
error_at (loc,
("both %<short%> and %<_Float%d%s%> in "
"declaration specifiers"),
floatn_nx_types[specs->floatn_nx_idx].n,
(floatn_nx_types[specs->floatn_nx_idx].extended
? "x"
: ""));
else if (specs->signed_p)
error_at (loc,
("both %<signed%> and %<_Float%d%s%> in "
"declaration specifiers"),
floatn_nx_types[specs->floatn_nx_idx].n,
(floatn_nx_types[specs->floatn_nx_idx].extended
? "x"
: ""));
else if (specs->unsigned_p)
error_at (loc,
("both %<unsigned%> and %<_Float%d%s%> in "
"declaration specifiers"),
floatn_nx_types[specs->floatn_nx_idx].n,
(floatn_nx_types[specs->floatn_nx_idx].extended
? "x"
: ""));
else if (specs->saturating_p)
error_at (loc,
("both %<_Sat%> and %<_Float%d%s%> in "
"declaration specifiers"),
floatn_nx_types[specs->floatn_nx_idx].n,
(floatn_nx_types[specs->floatn_nx_idx].extended
? "x"
: ""));
else if (FLOATN_NX_TYPE_NODE (specs->floatn_nx_idx) == NULL_TREE)
{
specs->typespec_word = cts_floatn_nx;
error_at (loc,
"%<_Float%d%s%> is not supported on this target",
floatn_nx_types[specs->floatn_nx_idx].n,
(floatn_nx_types[specs->floatn_nx_idx].extended
? "x"
: ""));
}
else
{
specs->typespec_word = cts_floatn_nx;
specs->locations[cdw_typespec] = loc;
}
return specs;
case RID_DFLOAT32:
case RID_DFLOAT64:
case RID_DFLOAT128:
{
const char *str;
if (i == RID_DFLOAT32)
str = "_Decimal32";
else if (i == RID_DFLOAT64)
str = "_Decimal64";
else
str = "_Decimal128";
if (specs->long_long_p)
error_at (loc,
("both %<long long%> and %qs in "
"declaration specifiers"),
str);
if (specs->long_p)
error_at (loc,
("both %<long%> and %qs in "
"declaration specifiers"),
str);
else if (specs->short_p)
error_at (loc,
("both %<short%> and %qs in "
"declaration specifiers"),
str);
else if (specs->signed_p)
error_at (loc,
("both %<signed%> and %qs in "
"declaration specifiers"),
str);
else if (specs->unsigned_p)
error_at (loc,
("both %<unsigned%> and %qs in "
"declaration specifiers"),
str);
else if (specs->complex_p)
error_at (loc,
("both %<complex%> and %qs in "
"declaration specifiers"),
str);
else if (specs->saturating_p)
error_at (loc,
("both %<_Sat%> and %qs in "
"declaration specifiers"),
str);
else if (i == RID_DFLOAT32)
specs->typespec_word = cts_dfloat32;
else if (i == RID_DFLOAT64)
specs->typespec_word = cts_dfloat64;
else
specs->typespec_word = cts_dfloat128;
specs->locations[cdw_typespec] = loc;
}
if (!targetm.decimal_float_supported_p ())
error_at (loc,
("decimal floating-point not supported "
"for this target"));
pedwarn_c11 (loc, OPT_Wpedantic,
"ISO C does not support decimal floating-point "
"before C2X");
return specs;
case RID_FRACT:
case RID_ACCUM:
{
const char *str;
if (i == RID_FRACT)
str = "_Fract";
else
str = "_Accum";
if (specs->complex_p)
error_at (loc,
("both %<complex%> and %qs in "
"declaration specifiers"),
str);
else if (i == RID_FRACT)
specs->typespec_word = cts_fract;
else
specs->typespec_word = cts_accum;
specs->locations[cdw_typespec] = loc;
}
if (!targetm.fixed_point_supported_p ())
error_at (loc,
"fixed-point types not supported for this target");
pedwarn (loc, OPT_Wpedantic,
"ISO C does not support fixed-point types");
return specs;
default:
/* ObjC reserved word "id", handled below. */
break;
}
}
}
/* Now we have a typedef (a TYPE_DECL node), an identifier (some
form of ObjC type, cases such as "int" and "long" being handled
above), a TYPE (struct, union, enum and typeof specifiers) or an
ERROR_MARK. In none of these cases may there have previously
been any type specifiers. */
if (specs->type || specs->typespec_word != cts_none
|| specs->long_p || specs->short_p || specs->signed_p
|| specs->unsigned_p || specs->complex_p)
error_at (loc, "two or more data types in declaration specifiers");
else if (TREE_CODE (type) == TYPE_DECL)
{
if (TREE_TYPE (type) == error_mark_node)
; /* Allow the type to default to int to avoid cascading errors. */
else
{
specs->type = TREE_TYPE (type);
specs->decl_attr = DECL_ATTRIBUTES (type);
specs->typedef_p = true;
specs->explicit_signed_p = C_TYPEDEF_EXPLICITLY_SIGNED (type);
specs->locations[cdw_typedef] = loc;
/* If this typedef name is defined in a struct, then a C++
lookup would return a different value. */
if (warn_cxx_compat
&& I_SYMBOL_BINDING (DECL_NAME (type))->in_struct)
warning_at (loc, OPT_Wc___compat,
"C++ lookup of %qD would return a field, not a type",
type);
/* If we are parsing a struct, record that a struct field
used a typedef. */
if (warn_cxx_compat && struct_parse_info != NULL)
struct_parse_info->typedefs_seen.safe_push (type);
}
}
else if (TREE_CODE (type) == IDENTIFIER_NODE)
{
tree t = lookup_name (type);
if (!t || TREE_CODE (t) != TYPE_DECL)
error_at (loc, "%qE fails to be a typedef or built in type", type);
else if (TREE_TYPE (t) == error_mark_node)
;
else
{
specs->type = TREE_TYPE (t);
specs->locations[cdw_typespec] = loc;
}
}
else
{
if (TREE_CODE (type) != ERROR_MARK && spec.kind == ctsk_typeof)
{
specs->typedef_p = true;
specs->locations[cdw_typedef] = loc;
if (spec.expr)
{
if (specs->expr)
specs->expr = build2 (COMPOUND_EXPR, TREE_TYPE (spec.expr),
specs->expr, spec.expr);
else
specs->expr = spec.expr;
specs->expr_const_operands &= spec.expr_const_operands;
}
}
specs->type = type;
}
return specs;
}
/* Add the storage class specifier or function specifier SCSPEC to the
declaration specifiers SPECS, returning SPECS. */
struct c_declspecs *
declspecs_add_scspec (location_t loc,
struct c_declspecs *specs,
tree scspec)
{
enum rid i;
enum c_storage_class n = csc_none;
bool dupe = false;
specs->declspecs_seen_p = true;
specs->non_std_attrs_seen_p = true;
gcc_assert (TREE_CODE (scspec) == IDENTIFIER_NODE
&& C_IS_RESERVED_WORD (scspec));
i = C_RID_CODE (scspec);
if (specs->non_sc_seen_p)
warning (OPT_Wold_style_declaration,
"%qE is not at beginning of declaration", scspec);
switch (i)
{
case RID_INLINE:
/* C99 permits duplicate inline. Although of doubtful utility,
it seems simplest to permit it in gnu89 mode as well, as
there is also little utility in maintaining this as a
difference between gnu89 and C99 inline. */
dupe = false;
specs->inline_p = true;
specs->locations[cdw_inline] = loc;
break;
case RID_NORETURN:
/* Duplicate _Noreturn is permitted. */
dupe = false;
specs->noreturn_p = true;
specs->locations[cdw_noreturn] = loc;
break;
case RID_THREAD:
dupe = specs->thread_p;
if (specs->storage_class == csc_auto)
error ("%qE used with %<auto%>", scspec);
else if (specs->storage_class == csc_register)
error ("%qE used with %<register%>", scspec);
else if (specs->storage_class == csc_typedef)
error ("%qE used with %<typedef%>", scspec);
else
{
specs->thread_p = true;
specs->thread_gnu_p = (strcmp (IDENTIFIER_POINTER (scspec),
"__thread") == 0);
/* A diagnostic is not required for the use of this
identifier in the implementation namespace; only diagnose
it for the C11 spelling because of existing code using
the other spelling. */
if (!specs->thread_gnu_p)
{
if (flag_isoc99)
pedwarn_c99 (loc, OPT_Wpedantic,
"ISO C99 does not support %qE", scspec);
else
pedwarn_c99 (loc, OPT_Wpedantic,
"ISO C90 does not support %qE", scspec);
}
specs->locations[cdw_thread] = loc;
}
break;
case RID_AUTO:
n = csc_auto;
break;
case RID_EXTERN:
n = csc_extern;
/* Diagnose "__thread extern". */
if (specs->thread_p && specs->thread_gnu_p)
error ("%<__thread%> before %<extern%>");
break;
case RID_REGISTER:
n = csc_register;
break;
case RID_STATIC:
n = csc_static;
/* Diagnose "__thread static". */
if (specs->thread_p && specs->thread_gnu_p)
error ("%<__thread%> before %<static%>");
break;
case RID_TYPEDEF:
n = csc_typedef;
break;
default:
gcc_unreachable ();
}
if (n != csc_none && n == specs->storage_class)
dupe = true;
if (dupe)
{
if (i == RID_THREAD)
error ("duplicate %<_Thread_local%> or %<__thread%>");
else
error ("duplicate %qE", scspec);
}
if (n != csc_none)
{
if (specs->storage_class != csc_none && n != specs->storage_class)
{
error ("multiple storage classes in declaration specifiers");
}
else
{
specs->storage_class = n;
specs->locations[cdw_storage_class] = loc;
if (n != csc_extern && n != csc_static && specs->thread_p)
{
error ("%qs used with %qE",
specs->thread_gnu_p ? "__thread" : "_Thread_local",
scspec);
specs->thread_p = false;
}
}
}
return specs;
}
/* Add the attributes ATTRS to the declaration specifiers SPECS,
returning SPECS. */
struct c_declspecs *
declspecs_add_attrs (location_t loc, struct c_declspecs *specs, tree attrs)
{
specs->attrs = chainon (attrs, specs->attrs);
specs->locations[cdw_attributes] = loc;
specs->declspecs_seen_p = true;
/* In the case of standard attributes at the start of the
declaration, the caller will reset this. */
specs->non_std_attrs_seen_p = true;
return specs;
}
/* Add an _Alignas specifier (expression ALIGN, or type whose
alignment is ALIGN) to the declaration specifiers SPECS, returning
SPECS. */
struct c_declspecs *
declspecs_add_alignas (location_t loc,
struct c_declspecs *specs, tree align)
{
specs->alignas_p = true;
specs->locations[cdw_alignas] = loc;
if (align == error_mark_node)
return specs;
/* Only accept the alignment if it's valid and greater than
the current one. Zero is invalid but by C11 required to
be silently ignored. */
int align_log = check_user_alignment (align, false, /* warn_zero = */false);
if (align_log > specs->align_log)
specs->align_log = align_log;
return specs;
}
/* Combine "long", "short", "signed", "unsigned" and "_Complex" type
specifiers with any other type specifier to determine the resulting
type. This is where ISO C checks on complex types are made, since
"_Complex long" is a prefix of the valid ISO C type "_Complex long
double". Also apply postfix standard attributes to modify the type. */
struct c_declspecs *
finish_declspecs (struct c_declspecs *specs)
{
/* If a type was specified as a whole, we have no modifiers and are
done. */
if (specs->type != NULL_TREE)
{
gcc_assert (!specs->long_p && !specs->long_long_p && !specs->short_p
&& !specs->signed_p && !specs->unsigned_p
&& !specs->complex_p);
/* Set a dummy type. */
if (TREE_CODE (specs->type) == ERROR_MARK)
specs->type = integer_type_node;
goto handle_postfix_attrs;
}
/* If none of "void", "_Bool", "char", "int", "float" or "double"
has been specified, treat it as "int" unless "_Complex" is
present and there are no other specifiers. If we just have
"_Complex", it is equivalent to "_Complex double", but e.g.
"_Complex short" is equivalent to "_Complex short int". */
if (specs->typespec_word == cts_none)
{
if (specs->saturating_p)
{
error_at (specs->locations[cdw_saturating],
"%<_Sat%> is used without %<_Fract%> or %<_Accum%>");
if (!targetm.fixed_point_supported_p ())
error_at (specs->locations[cdw_saturating],
"fixed-point types not supported for this target");
specs->typespec_word = cts_fract;
}
else if (specs->long_p || specs->short_p
|| specs->signed_p || specs->unsigned_p)
{
specs->typespec_word = cts_int;
}
else if (specs->complex_p)
{
specs->typespec_word = cts_double;
pedwarn (specs->locations[cdw_complex], OPT_Wpedantic,
"ISO C does not support plain %<complex%> meaning "
"%<double complex%>");
}
else
{
specs->typespec_word = cts_int;
specs->default_int_p = true;
/* We don't diagnose this here because grokdeclarator will
give more specific diagnostics according to whether it is
a function definition. */
}
}
/* If "signed" was specified, record this to distinguish "int" and
"signed int" in the case of a bit-field with
-funsigned-bitfields. */
specs->explicit_signed_p = specs->signed_p;
/* Now compute the actual type. */
switch (specs->typespec_word)
{
case cts_auto_type:
gcc_assert (!specs->long_p && !specs->short_p
&& !specs->signed_p && !specs->unsigned_p
&& !specs->complex_p);
/* Type to be filled in later. */
if (specs->postfix_attrs)
error ("%<__auto_type%> followed by %<[[]]%> attributes");
break;
case cts_void:
gcc_assert (!specs->long_p && !specs->short_p
&& !specs->signed_p && !specs->unsigned_p
&& !specs->complex_p);
specs->type = void_type_node;
break;
case cts_bool:
gcc_assert (!specs->long_p && !specs->short_p
&& !specs->signed_p && !specs->unsigned_p
&& !specs->complex_p);
specs->type = boolean_type_node;
break;
case cts_char:
gcc_assert (!specs->long_p && !specs->short_p);
gcc_assert (!(specs->signed_p && specs->unsigned_p));
if (specs->signed_p)
specs->type = signed_char_type_node;
else if (specs->unsigned_p)
specs->type = unsigned_char_type_node;
else
specs->type = char_type_node;
if (specs->complex_p)
{
pedwarn (specs->locations[cdw_complex], OPT_Wpedantic,
"ISO C does not support complex integer types");
specs->type = build_complex_type (specs->type);
}
break;
case cts_int_n:
gcc_assert (!specs->long_p && !specs->short_p && !specs->long_long_p);
gcc_assert (!(specs->signed_p && specs->unsigned_p));
if (! int_n_enabled_p[specs->int_n_idx])
specs->type = integer_type_node;
else
specs->type = (specs->unsigned_p
? int_n_trees[specs->int_n_idx].unsigned_type
: int_n_trees[specs->int_n_idx].signed_type);
if (specs->complex_p)
{
pedwarn (specs->locations[cdw_complex], OPT_Wpedantic,
"ISO C does not support complex integer types");
specs->type = build_complex_type (specs->type);
}
break;
case cts_int:
gcc_assert (!(specs->long_p && specs->short_p));
gcc_assert (!(specs->signed_p && specs->unsigned_p));
if (specs->long_long_p)
specs->type = (specs->unsigned_p
? long_long_unsigned_type_node
: long_long_integer_type_node);
else if (specs->long_p)
specs->type = (specs->unsigned_p
? long_unsigned_type_node
: long_integer_type_node);
else if (specs->short_p)
specs->type = (specs->unsigned_p
? short_unsigned_type_node
: short_integer_type_node);
else
specs->type = (specs->unsigned_p
? unsigned_type_node
: integer_type_node);
if (specs->complex_p)
{
pedwarn (specs->locations[cdw_complex], OPT_Wpedantic,
"ISO C does not support complex integer types");
specs->type = build_complex_type (specs->type);
}
break;
case cts_float:
gcc_assert (!specs->long_p && !specs->short_p
&& !specs->signed_p && !specs->unsigned_p);
specs->type = (specs->complex_p
? complex_float_type_node
: float_type_node);
break;
case cts_double:
gcc_assert (!specs->long_long_p && !specs->short_p
&& !specs->signed_p && !specs->unsigned_p);
if (specs->long_p)
{
specs->type = (specs->complex_p
? complex_long_double_type_node
: long_double_type_node);
}
else
{
specs->type = (specs->complex_p
? complex_double_type_node
: double_type_node);
}
break;
case cts_floatn_nx:
gcc_assert (!specs->long_p && !specs->short_p
&& !specs->signed_p && !specs->unsigned_p);
if (FLOATN_NX_TYPE_NODE (specs->floatn_nx_idx) == NULL_TREE)
specs->type = integer_type_node;
else if (specs->complex_p)
specs->type = COMPLEX_FLOATN_NX_TYPE_NODE (specs->floatn_nx_idx);
else
specs->type = FLOATN_NX_TYPE_NODE (specs->floatn_nx_idx);
break;
case cts_dfloat32:
case cts_dfloat64:
case cts_dfloat128:
gcc_assert (!specs->long_p && !specs->long_long_p && !specs->short_p
&& !specs->signed_p && !specs->unsigned_p && !specs->complex_p);
if (!targetm.decimal_float_supported_p ())
specs->type = integer_type_node;
else if (specs->typespec_word == cts_dfloat32)
specs->type = dfloat32_type_node;
else if (specs->typespec_word == cts_dfloat64)
specs->type = dfloat64_type_node;
else
specs->type = dfloat128_type_node;
break;
case cts_fract:
gcc_assert (!specs->complex_p);
if (!targetm.fixed_point_supported_p ())
specs->type = integer_type_node;
else if (specs->saturating_p)
{
if (specs->long_long_p)
specs->type = specs->unsigned_p
? sat_unsigned_long_long_fract_type_node
: sat_long_long_fract_type_node;
else if (specs->long_p)
specs->type = specs->unsigned_p
? sat_unsigned_long_fract_type_node
: sat_long_fract_type_node;
else if (specs->short_p)
specs->type = specs->unsigned_p
? sat_unsigned_short_fract_type_node
: sat_short_fract_type_node;
else
specs->type = specs->unsigned_p
? sat_unsigned_fract_type_node
: sat_fract_type_node;
}
else
{
if (specs->long_long_p)
specs->type = specs->unsigned_p
? unsigned_long_long_fract_type_node
: long_long_fract_type_node;
else if (specs->long_p)
specs->type = specs->unsigned_p
? unsigned_long_fract_type_node
: long_fract_type_node;
else if (specs->short_p)
specs->type = specs->unsigned_p
? unsigned_short_fract_type_node
: short_fract_type_node;
else
specs->type = specs->unsigned_p
? unsigned_fract_type_node
: fract_type_node;
}
break;
case cts_accum:
gcc_assert (!specs->complex_p);
if (!targetm.fixed_point_supported_p ())
specs->type = integer_type_node;
else if (specs->saturating_p)
{
if (specs->long_long_p)
specs->type = specs->unsigned_p
? sat_unsigned_long_long_accum_type_node
: sat_long_long_accum_type_node;
else if (specs->long_p)
specs->type = specs->unsigned_p
? sat_unsigned_long_accum_type_node
: sat_long_accum_type_node;
else if (specs->short_p)
specs->type = specs->unsigned_p
? sat_unsigned_short_accum_type_node
: sat_short_accum_type_node;
else
specs->type = specs->unsigned_p
? sat_unsigned_accum_type_node
: sat_accum_type_node;
}
else
{
if (specs->long_long_p)
specs->type = specs->unsigned_p
? unsigned_long_long_accum_type_node
: long_long_accum_type_node;
else if (specs->long_p)
specs->type = specs->unsigned_p
? unsigned_long_accum_type_node
: long_accum_type_node;
else if (specs->short_p)
specs->type = specs->unsigned_p
? unsigned_short_accum_type_node
: short_accum_type_node;
else
specs->type = specs->unsigned_p
? unsigned_accum_type_node
: accum_type_node;
}
break;
default:
gcc_unreachable ();
}
handle_postfix_attrs:
if (specs->type != NULL)
{
specs->postfix_attrs = c_warn_type_attributes (specs->postfix_attrs);
decl_attributes (&specs->type, specs->postfix_attrs, 0);
specs->postfix_attrs = NULL_TREE;
}
return specs;
}
/* Perform final processing on one file scope's declarations (or the
external scope's declarations), GLOBALS. */
static void
c_write_global_declarations_1 (tree globals)
{
tree decl;
bool reconsider;
/* Process the decls in the order they were written. */
for (decl = globals; decl; decl = DECL_CHAIN (decl))
{
/* Check for used but undefined static functions using the C
standard's definition of "used", and set TREE_NO_WARNING so
that check_global_declaration doesn't repeat the check. */
if (TREE_CODE (decl) == FUNCTION_DECL
&& DECL_INITIAL (decl) == NULL_TREE
&& DECL_EXTERNAL (decl)
&& !TREE_PUBLIC (decl))
{
if (C_DECL_USED (decl))
{
if (pedwarn (input_location, 0, "%q+F used but never defined",
decl))
TREE_NO_WARNING (decl) = 1;
}
/* For -Wunused-function warn about unused static prototypes. */
else if (warn_unused_function
&& ! DECL_ARTIFICIAL (decl)
&& ! TREE_NO_WARNING (decl))
{
if (warning (OPT_Wunused_function,
"%q+F declared %<static%> but never defined",
decl))
TREE_NO_WARNING (decl) = 1;
}
}
wrapup_global_declaration_1 (decl);
}
do
{
reconsider = false;
for (decl = globals; decl; decl = DECL_CHAIN (decl))
reconsider |= wrapup_global_declaration_2 (decl);
}
while (reconsider);
}
/* Preserve the external declarations scope across a garbage collect. */
static GTY(()) tree ext_block;
/* Collect all references relevant to SOURCE_FILE. */
static void
collect_all_refs (const char *source_file)
{
tree t;
unsigned i;
FOR_EACH_VEC_ELT (*all_translation_units, i, t)
collect_ada_nodes (BLOCK_VARS (DECL_INITIAL (t)), source_file);
collect_ada_nodes (BLOCK_VARS (ext_block), source_file);
}
/* Collect source file references at global level. */
static void
collect_source_refs (void)
{
tree t;
tree decls;
tree decl;
unsigned i;
FOR_EACH_VEC_ELT (*all_translation_units, i, t)
{
decls = DECL_INITIAL (t);
for (decl = BLOCK_VARS (decls); decl; decl = TREE_CHAIN (decl))
if (!DECL_IS_BUILTIN (decl))
collect_source_ref (DECL_SOURCE_FILE (decl));
}
for (decl = BLOCK_VARS (ext_block); decl; decl = TREE_CHAIN (decl))
if (!DECL_IS_BUILTIN (decl))
collect_source_ref (DECL_SOURCE_FILE (decl));
}
/* Perform any final parser cleanups and generate initial debugging
information. */
void
c_parse_final_cleanups (void)
{
tree t;
unsigned i;
/* We don't want to do this if generating a PCH. */
if (pch_file)
return;
timevar_stop (TV_PHASE_PARSING);
timevar_start (TV_PHASE_DEFERRED);
/* Do the Objective-C stuff. This is where all the Objective-C
module stuff gets generated (symtab, class/protocol/selector
lists etc). */
if (c_dialect_objc ())
objc_write_global_declarations ();
/* Close the external scope. */
ext_block = pop_scope ();
external_scope = 0;
gcc_assert (!current_scope);
/* Handle -fdump-ada-spec[-slim]. */
if (flag_dump_ada_spec || flag_dump_ada_spec_slim)
{
/* Build a table of files to generate specs for */
collect_source_ref (main_input_filename);
if (!flag_dump_ada_spec_slim)
collect_source_refs ();
dump_ada_specs (collect_all_refs, NULL);
}
/* Process all file scopes in this compilation, and the external_scope,
through wrapup_global_declarations. */
FOR_EACH_VEC_ELT (*all_translation_units, i, t)
c_write_global_declarations_1 (BLOCK_VARS (DECL_INITIAL (t)));
c_write_global_declarations_1 (BLOCK_VARS (ext_block));
timevar_stop (TV_PHASE_DEFERRED);
timevar_start (TV_PHASE_PARSING);
ext_block = NULL;
}
/* Register reserved keyword WORD as qualifier for address space AS. */
void
c_register_addr_space (const char *word, addr_space_t as)
{
int rid = RID_FIRST_ADDR_SPACE + as;
tree id;
/* Address space qualifiers are only supported
in C with GNU extensions enabled. */
if (c_dialect_objc () || flag_no_asm)
return;
id = get_identifier (word);
C_SET_RID_CODE (id, rid);
C_IS_RESERVED_WORD (id) = 1;
ridpointers [rid] = id;
}
/* Return identifier to look up for omp declare reduction. */
tree
c_omp_reduction_id (enum tree_code reduction_code, tree reduction_id)
{
const char *p = NULL;
switch (reduction_code)
{
case PLUS_EXPR: p = "+"; break;
case MULT_EXPR: p = "*"; break;
case MINUS_EXPR: p = "-"; break;
case BIT_AND_EXPR: p = "&"; break;
case BIT_XOR_EXPR: p = "^"; break;
case BIT_IOR_EXPR: p = "|"; break;
case TRUTH_ANDIF_EXPR: p = "&&"; break;
case TRUTH_ORIF_EXPR: p = "||"; break;
case MIN_EXPR: p = "min"; break;
case MAX_EXPR: p = "max"; break;
default:
break;
}
if (p == NULL)
{
if (TREE_CODE (reduction_id) != IDENTIFIER_NODE)
return error_mark_node;
p = IDENTIFIER_POINTER (reduction_id);
}
const char prefix[] = "omp declare reduction ";
size_t lenp = sizeof (prefix);
size_t len = strlen (p);
char *name = XALLOCAVEC (char, lenp + len);
memcpy (name, prefix, lenp - 1);
memcpy (name + lenp - 1, p, len + 1);
return get_identifier (name);
}
/* Lookup REDUCTION_ID in the current scope, or create an artificial
VAR_DECL, bind it into the current scope and return it. */
tree
c_omp_reduction_decl (tree reduction_id)
{
struct c_binding *b = I_SYMBOL_BINDING (reduction_id);
if (b != NULL && B_IN_CURRENT_SCOPE (b))
return b->decl;
tree decl = build_decl (BUILTINS_LOCATION, VAR_DECL,
reduction_id, integer_type_node);
DECL_ARTIFICIAL (decl) = 1;
DECL_EXTERNAL (decl) = 1;
TREE_STATIC (decl) = 1;
TREE_PUBLIC (decl) = 0;
bind (reduction_id, decl, current_scope, true, false, BUILTINS_LOCATION);
return decl;
}
/* Lookup REDUCTION_ID in the first scope where it has entry for TYPE. */
tree
c_omp_reduction_lookup (tree reduction_id, tree type)
{
struct c_binding *b = I_SYMBOL_BINDING (reduction_id);
while (b)
{
tree t;
for (t = DECL_INITIAL (b->decl); t; t = TREE_CHAIN (t))
if (comptypes (TREE_PURPOSE (t), type))
return TREE_VALUE (t);
b = b->shadowed;
}
return error_mark_node;
}
/* Helper function called via walk_tree, to diagnose invalid
#pragma omp declare reduction combiners or initializers. */
tree
c_check_omp_declare_reduction_r (tree *tp, int *, void *data)
{
tree *vars = (tree *) data;
if (SSA_VAR_P (*tp)
&& !DECL_ARTIFICIAL (*tp)
&& *tp != vars[0]
&& *tp != vars[1])
{
location_t loc = DECL_SOURCE_LOCATION (vars[0]);
if (strcmp (IDENTIFIER_POINTER (DECL_NAME (vars[0])), "omp_out") == 0)
error_at (loc, "%<#pragma omp declare reduction%> combiner refers to "
"variable %qD which is not %<omp_out%> nor %<omp_in%>",
*tp);
else
error_at (loc, "%<#pragma omp declare reduction%> initializer refers "
"to variable %qD which is not %<omp_priv%> nor "
"%<omp_orig%>",
*tp);
return *tp;
}
return NULL_TREE;
}
bool
c_check_in_current_scope (tree decl)
{
struct c_binding *b = I_SYMBOL_BINDING (DECL_NAME (decl));
return b != NULL && B_IN_CURRENT_SCOPE (b);
}
#include "gt-c-c-decl.h"
|
GB_msort_2.c | //------------------------------------------------------------------------------
// GB_msort_2: sort a 2-by-n list of integers, using A[0:1][ ] as the key
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// A parallel mergesort of an array of 2-by-n integers. Each key
// consists of two integers.
#include "GB_msort_2.h"
//------------------------------------------------------------------------------
// GB_msort_2_binary_search: binary search for the pivot
//------------------------------------------------------------------------------
// The Pivot value is Y [pivot], and a binary search for the Pivot is made in
// the array X [p_pstart...p_end-1], which is sorted in non-decreasing order on
// input. The return value is pleft, where
//
// X [p_start ... pleft-1] <= Pivot and
// X [pleft ... p_end-1] >= Pivot holds.
//
// pleft is returned in the range p_start to p_end. If pleft is p_start, then
// the Pivot is smaller than all entries in X [p_start...p_end-1], and the left
// list X [p_start...pleft-1] is empty. If pleft is p_end, then the Pivot is
// larger than all entries in X [p_start...p_end-1], and the right list X
// [pleft...p_end-1] is empty.
static int64_t GB_msort_2_binary_search // return pleft
(
const int64_t *restrict Y_0, // Pivot is Y [pivot]
const int64_t *restrict Y_1,
const int64_t pivot,
const int64_t *restrict X_0, // search in X [p_start..p_end_-1]
const int64_t *restrict X_1,
const int64_t p_start,
const int64_t p_end
)
{
//--------------------------------------------------------------------------
// find where the Pivot appears in X
//--------------------------------------------------------------------------
// binary search of X [p_start...p_end-1] for the Pivot
int64_t pleft = p_start ;
int64_t pright = p_end - 1 ;
while (pleft < pright)
{
int64_t pmiddle = (pleft + pright) >> 1 ;
// less = (X [pmiddle] < Pivot)
bool less = GB_lt_2 (X_0, X_1, pmiddle,
Y_0, Y_1, pivot) ;
pleft = less ? (pmiddle+1) : pleft ;
pright = less ? pright : pmiddle ;
}
// binary search is narrowed down to a single item
// or it has found the list is empty:
ASSERT (pleft == pright || pleft == pright + 1) ;
// If found is true then X [pleft == pright] == Pivot. If duplicates
// appear then X [pleft] is any one of the entries equal to the Pivot
// in the list. If found is false then
// X [p_start ... pleft-1] < Pivot and
// X [pleft+1 ... p_end-1] > Pivot holds.
// The value X [pleft] may be either < or > Pivot.
bool found = (pleft == pright) && GB_eq_2 (X_0, X_1, pleft,
Y_0, Y_1, pivot) ;
// Modify pleft and pright:
if (!found && (pleft == pright))
{
if (GB_lt_2 (X_0, X_1, pleft,
Y_0, Y_1, pivot))
{
pleft++ ;
}
else
{
// pright++ ; // (not needed)
}
}
//--------------------------------------------------------------------------
// return result
//--------------------------------------------------------------------------
// If found is false then
// X [p_start ... pleft-1] < Pivot and
// X [pleft ... p_end-1] > Pivot holds,
// and pleft-1 == pright
// If X has no duplicates, then whether or not Pivot is found,
// X [p_start ... pleft-1] < Pivot and
// X [pleft ... p_end-1] >= Pivot holds.
// If X has duplicates, then whether or not Pivot is found,
// X [p_start ... pleft-1] <= Pivot and
// X [pleft ... p_end-1] >= Pivot holds.
return (pleft) ;
}
//------------------------------------------------------------------------------
// GB_msort_2_create_merge_tasks
//------------------------------------------------------------------------------
// Recursively constructs ntasks tasks to merge two arrays, Left and Right,
// into Sresult, where Left is L [pL_start...pL_end-1], Right is R
// [pR_start...pR_end-1], and Sresult is S [pS_start...pS_start+total_work-1],
// and where total_work is the total size of Left and Right.
//
// Task tid will merge L [L_task [tid] ... L_task [tid] + L_len [tid] - 1] and
// R [R_task [tid] ... R_task [tid] + R_len [tid] -1] into the merged output
// array S [S_task [tid] ... ]. The task tids created are t0 to
// t0+ntasks-1.
void GB_msort_2_create_merge_tasks
(
// output:
int64_t *restrict L_task, // L_task [t0...t0+ntasks-1] computed
int64_t *restrict L_len, // L_len [t0...t0+ntasks-1] computed
int64_t *restrict R_task, // R_task [t0...t0+ntasks-1] computed
int64_t *restrict R_len, // R_len [t0...t0+ntasks-1] computed
int64_t *restrict S_task, // S_task [t0...t0+ntasks-1] computed
// input:
const int t0, // first task tid to create
const int ntasks, // # of tasks to create
const int64_t pS_start, // merge into S [pS_start...]
const int64_t *restrict L_0, // Left = L [pL_start...pL_end-1]
const int64_t *restrict L_1,
const int64_t pL_start,
const int64_t pL_end,
const int64_t *restrict R_0, // Right = R [pR_start...pR_end-1]
const int64_t *restrict R_1,
const int64_t pR_start,
const int64_t pR_end
)
{
//--------------------------------------------------------------------------
// get problem size
//--------------------------------------------------------------------------
int64_t nleft = pL_end - pL_start ; // size of Left array
int64_t nright = pR_end - pR_start ; // size of Right array
int64_t total_work = nleft + nright ; // total work to do
ASSERT (ntasks >= 1) ;
ASSERT (total_work > 0) ;
//--------------------------------------------------------------------------
// create the tasks
//--------------------------------------------------------------------------
if (ntasks == 1)
{
//----------------------------------------------------------------------
// a single task will merge all of Left and Right into Sresult
//----------------------------------------------------------------------
L_task [t0] = pL_start ; L_len [t0] = nleft ;
R_task [t0] = pR_start ; R_len [t0] = nright ;
S_task [t0] = pS_start ;
}
else
{
//----------------------------------------------------------------------
// partition the Left and Right arrays for multiple merge tasks
//----------------------------------------------------------------------
int64_t pleft, pright ;
if (nleft >= nright)
{
// split Left in half, and search for its pivot in Right
pleft = (pL_end + pL_start) >> 1 ;
pright = GB_msort_2_binary_search (
L_0, L_1, pleft,
R_0, R_1, pR_start, pR_end) ;
}
else
{
// split Right in half, and search for its pivot in Left
pright = (pR_end + pR_start) >> 1 ;
pleft = GB_msort_2_binary_search (
R_0, R_1, pright,
L_0, L_1, pL_start, pL_end) ;
}
//----------------------------------------------------------------------
// partition the tasks according to the work of each partition
//----------------------------------------------------------------------
// work0 is the total work in the first partition
int64_t work0 = (pleft - pL_start) + (pright - pR_start) ;
int ntasks0 = (int) round ((double) ntasks *
(((double) work0) / ((double) total_work))) ;
// ensure at least one task is assigned to each partition
ntasks0 = GB_IMAX (ntasks0, 1) ;
ntasks0 = GB_IMIN (ntasks0, ntasks-1) ;
int ntasks1 = ntasks - ntasks0 ;
//----------------------------------------------------------------------
// assign ntasks0 to the first half
//----------------------------------------------------------------------
// ntasks0 tasks merge L [pL_start...pleft-1] and R [pR_start..pright-1]
// into the result S [pS_start...work0-1].
GB_msort_2_create_merge_tasks (
L_task, L_len, R_task, R_len, S_task, t0, ntasks0, pS_start,
L_0, L_1, pL_start, pleft,
R_0, R_1, pR_start, pright) ;
//----------------------------------------------------------------------
// assign ntasks1 to the second half
//----------------------------------------------------------------------
// ntasks1 tasks merge L [pleft...pL_end-1] and R [pright...pR_end-1]
// into the result S [pS_start+work0...pS_start+total_work].
int t1 = t0 + ntasks0 ; // first task id of the second set of tasks
int64_t pS_start1 = pS_start + work0 ; // 2nd set starts here in S
GB_msort_2_create_merge_tasks (
L_task, L_len, R_task, R_len, S_task, t1, ntasks1, pS_start1,
L_0, L_1, pleft, pL_end,
R_0, R_1, pright, pR_end) ;
}
}
//------------------------------------------------------------------------------
// GB_msort_2_merge: merge two sorted lists via a single thread
//------------------------------------------------------------------------------
// merge Left [0..nleft-1] and Right [0..nright-1] into S [0..nleft+nright-1] */
static void GB_msort_2_merge
(
int64_t *restrict S_0, // output of length nleft + nright
int64_t *restrict S_1,
const int64_t *restrict Left_0, // left input of length nleft
const int64_t *restrict Left_1,
const int64_t nleft,
const int64_t *restrict Right_0, // right input of length nright
const int64_t *restrict Right_1,
const int64_t nright
)
{
int64_t p, pleft, pright ;
// merge the two inputs, Left and Right, while both inputs exist
for (p = 0, pleft = 0, pright = 0 ; pleft < nleft && pright < nright ; p++)
{
if (GB_lt_2 (Left_0, Left_1, pleft,
Right_0, Right_1, pright))
{
// S [p] = Left [pleft++]
S_0 [p] = Left_0 [pleft] ;
S_1 [p] = Left_1 [pleft] ;
pleft++ ;
}
else
{
// S [p] = Right [pright++]
S_0 [p] = Right_0 [pright] ;
S_1 [p] = Right_1 [pright] ;
pright++ ;
}
}
// either input is exhausted; copy the remaining list into S
if (pleft < nleft)
{
int64_t nremaining = (nleft - pleft) ;
memcpy (S_0 + p, Left_0 + pleft, nremaining * sizeof (int64_t)) ;
memcpy (S_1 + p, Left_1 + pleft, nremaining * sizeof (int64_t)) ;
}
else if (pright < nright)
{
int64_t nremaining = (nright - pright) ;
memcpy (S_0 + p, Right_0 + pright, nremaining * sizeof (int64_t)) ;
memcpy (S_1 + p, Right_1 + pright, nremaining * sizeof (int64_t)) ;
}
}
//------------------------------------------------------------------------------
// GB_msort_2: parallel mergesort
//------------------------------------------------------------------------------
GB_PUBLIC
GrB_Info GB_msort_2 // sort array A of size 2-by-n, using 2 keys (A [0:1][])
(
int64_t *restrict A_0, // size n array
int64_t *restrict A_1, // size n array
const int64_t n,
int nthreads // # of threads to use
)
{
//--------------------------------------------------------------------------
// handle small problems with a single thread
//--------------------------------------------------------------------------
if (nthreads <= 1 || n <= GB_BASECASE)
{
// sequential quicksort
GB_qsort_2 (A_0, A_1, n) ;
return (GrB_SUCCESS) ;
}
//--------------------------------------------------------------------------
// determine # of tasks
//--------------------------------------------------------------------------
// determine the number of levels to create, which must always be an
// even number. The # of levels is chosen to ensure that the # of leaves
// of the task tree is between 4*nthreads and 16*nthreads.
// 2 to 4 threads: 4 levels, 16 qsort leaves
// 5 to 16 threads: 6 levels, 64 qsort leaves
// 17 to 64 threads: 8 levels, 256 qsort leaves
// 65 to 256 threads: 10 levels, 1024 qsort leaves
// 256 to 1024 threads: 12 levels, 4096 qsort leaves
// ...
int k = (int) (2 + 2 * ceil (log2 ((double) nthreads) / 2)) ;
int ntasks = 1 << k ;
//--------------------------------------------------------------------------
// allocate workspace
//--------------------------------------------------------------------------
int64_t *restrict W = NULL ; size_t W_size = 0 ;
W = GB_MALLOC_WERK (2*n + 6*ntasks + 1, int64_t, &W_size) ;
if (W == NULL)
{
// out of memory
return (GrB_OUT_OF_MEMORY) ;
}
int64_t *T = W ;
int64_t *restrict W_0 = T ; T += n ;
int64_t *restrict W_1 = T ; T += n ;
int64_t *restrict L_task = T ; T += ntasks ;
int64_t *restrict L_len = T ; T += ntasks ;
int64_t *restrict R_task = T ; T += ntasks ;
int64_t *restrict R_len = T ; T += ntasks ;
int64_t *restrict S_task = T ; T += ntasks ;
int64_t *restrict Slice = T ; T += (ntasks+1) ;
//--------------------------------------------------------------------------
// partition and sort the leaves
//--------------------------------------------------------------------------
GB_eslice (Slice, n, ntasks) ;
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (tid = 0 ; tid < ntasks ; tid++)
{
int64_t leaf = Slice [tid] ;
int64_t leafsize = Slice [tid+1] - leaf ;
GB_qsort_2 (A_0 + leaf, A_1 + leaf, leafsize) ;
}
//--------------------------------------------------------------------------
// merge each level
//--------------------------------------------------------------------------
int nt = 1 ;
for ( ; k >= 2 ; k -= 2)
{
//----------------------------------------------------------------------
// merge level k into level k-1, from A into W
//----------------------------------------------------------------------
// TODO: skip k and k-1 for each group of 4 sublists of A if they are
// already sorted with respect to each other.
// this could be done in parallel if ntasks was large
for (int tid = 0 ; tid < ntasks ; tid += 2*nt)
{
// create 2*nt tasks to merge two A sublists into one W sublist
GB_msort_2_create_merge_tasks (
L_task, L_len, R_task, R_len, S_task, tid, 2*nt, Slice [tid],
A_0, A_1, Slice [tid], Slice [tid+nt],
A_0, A_1, Slice [tid+nt], Slice [tid+2*nt]) ;
}
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (tid = 0 ; tid < ntasks ; tid++)
{
// merge A [pL...pL+nL-1] and A [pR...pR+nR-1] into W [pS..]
int64_t pL = L_task [tid], nL = L_len [tid] ;
int64_t pR = R_task [tid], nR = R_len [tid] ;
int64_t pS = S_task [tid] ;
GB_msort_2_merge (
W_0 + pS, W_1 + pS,
A_0 + pL, A_1 + pL, nL,
A_0 + pR, A_1 + pR, nR) ;
}
nt = 2*nt ;
//----------------------------------------------------------------------
// merge level k-1 into level k-2, from W into A
//----------------------------------------------------------------------
// this could be done in parallel if ntasks was large
for (int tid = 0 ; tid < ntasks ; tid += 2*nt)
{
// create 2*nt tasks to merge two W sublists into one A sublist
GB_msort_2_create_merge_tasks (
L_task, L_len, R_task, R_len, S_task, tid, 2*nt, Slice [tid],
W_0, W_1, Slice [tid], Slice [tid+nt],
W_0, W_1, Slice [tid+nt], Slice [tid+2*nt]) ;
}
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (tid = 0 ; tid < ntasks ; tid++)
{
// merge A [pL...pL+nL-1] and A [pR...pR+nR-1] into W [pS..]
int64_t pL = L_task [tid], nL = L_len [tid] ;
int64_t pR = R_task [tid], nR = R_len [tid] ;
int64_t pS = S_task [tid] ;
GB_msort_2_merge (
A_0 + pS, A_1 + pS,
W_0 + pL, W_1 + pL, nL,
W_0 + pR, W_1 + pR, nR) ;
}
nt = 2*nt ;
}
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
GB_FREE_WERK (&W, W_size) ;
return (GrB_SUCCESS) ;
}
|
pca_minibatch_kmeans.c | #include "kmeans.h"
#include "kmeans_utils.h"
#include "minibatch_commons.h"
#include "../../utils/matrix/csr_matrix/csr_to_vector_list.h"
#include "../../utils/matrix/vector_list/vector_list_math.h"
#include "../../utils/matrix/csr_matrix/csr_math.h"
#include "../../utils/vector/common/common_vector_math.h"
#include "../../utils/vector/sparse/sparse_vector_math.h"
#include "../../utils/fcl_logging.h"
#include <unistd.h>
#include <float.h>
#include <math.h>
struct kmeans_result* pca_minibatch_kmeans(struct csr_matrix* samples
, struct kmeans_params *prms) {
uint32_t i;
uint64_t j;
uint64_t samples_per_batch;
uint32_t max_not_improved_counter;
uint32_t disable_optimizations;
uint32_t* chosen_sample_map;
struct sparse_vector* pca_projection_samples; /* projection matrix of samples */
struct sparse_vector* pca_projection_clusters; /* projection matrix of clusters */
struct convergence_context conv_ctx;
VALUE_TYPE* vector_lengths_pca_samples;
VALUE_TYPE* vector_lengths_pca_clusters;
struct kmeans_result* res;
struct general_kmeans_context ctx;
pca_projection_clusters = NULL;
pca_projection_samples = NULL;
initialize_general_context(prms, &ctx, samples);
conv_ctx.initialized = 0;
max_not_improved_counter = 20;
/* if clusters_raw was filled (this happens in kmeans++) free it
* since minibatch k-means uses a different strategy to fill the raw clusters
*/
free_cluster_hashmaps(ctx.clusters_raw, ctx.no_clusters);
/* reset cluster counts since minibatch kmeans handels them differently */
for (i = 0; i < ctx.no_clusters; i++) ctx.cluster_counts[i] = 0;
chosen_sample_map = NULL;
/* samples_per_batch = ctx.samples->sample_count; */
samples_per_batch = d_get_subint_default(&(prms->tr)
, "additional_params", "samples_per_batch", ctx.samples->sample_count * 0.05);
disable_optimizations = prms->ext_vects == NULL;
if (!disable_optimizations) {
/* create pca projections for the samples */
pca_projection_samples = matrix_dot(samples, prms->ext_vects);
calculate_vector_list_lengths(pca_projection_samples, samples->sample_count, &vector_lengths_pca_samples);
/* create pca projections for the clusters */
pca_projection_clusters = sparse_vectors_matrix_dot(ctx.cluster_vectors,
ctx.no_clusters,
prms->ext_vects);
vector_lengths_pca_clusters = NULL;
}
create_chosen_sample_map(&chosen_sample_map, ctx.samples->sample_count, samples_per_batch, &(prms->seed));
for (i = 0; i < prms->iteration_limit && !ctx.converged && !prms->stop; i++) {
/* track how many blockvector calculations were made / saved */
uint64_t saved_calculations_pca;
uint64_t done_pca_calcs, saved_calculations_cauchy;
/* reset all calculation counters */
done_pca_calcs = 0;
saved_calculations_cauchy = 0;
saved_calculations_pca = 0;
/* initialize data needed for the iteration */
pre_process_iteration(&ctx);
if (!disable_optimizations) {
free(vector_lengths_pca_clusters);
calculate_vector_list_lengths(pca_projection_clusters, ctx.no_clusters, &vector_lengths_pca_clusters);
}
#pragma omp parallel for schedule(dynamic, 1000)
for (j = 0; j < ctx.samples->sample_count; j++) {
/* iterate over all samples */
VALUE_TYPE dist;
uint64_t cluster_id, sample_id;
if (!prms->stop && chosen_sample_map[j]) {
sample_id = j;
if (omp_get_thread_num() == 0) check_signals(&(prms->stop));
for (cluster_id = 0; cluster_id < ctx.no_clusters; cluster_id++) {
/* iterate over all cluster centers */
if (!disable_optimizations) {
/* bv_minibatch_kmeans */
/* we already know the distance to the cluster from last iteration */
if (cluster_id == ctx.previous_cluster_assignments[sample_id]) continue;
/* evaluate cauchy approximation. fast but not good */
dist = lower_bound_euclid(ctx.vector_lengths_clusters[cluster_id]
, ctx.vector_lengths_samples[sample_id]);
if (dist >= ctx.cluster_distances[sample_id]) {
/* approximated distance is larger than current best distance. skip full distance calculation */
saved_calculations_cauchy += 1;
goto end;
}
dist = euclid_vector(pca_projection_samples[sample_id].keys
, pca_projection_samples[sample_id].values
, pca_projection_samples[sample_id].nnz
, pca_projection_clusters[cluster_id].keys
, pca_projection_clusters[cluster_id].values
, pca_projection_clusters[cluster_id].nnz
, vector_lengths_pca_samples[sample_id]
, vector_lengths_pca_clusters[cluster_id]);
done_pca_calcs += 1;
if (dist >= ctx.cluster_distances[sample_id] && fabs(dist - ctx.cluster_distances[sample_id]) >= 1e-6) {
/* approximated distance is larger than current best distance. skip full distance calculation */
saved_calculations_pca += 1;
goto end;
}
}
/* if we reached this point we need to calculate a full euclidean distance */
dist = euclid_vector_list(ctx.samples, sample_id, ctx.cluster_vectors, cluster_id
, ctx.vector_lengths_samples, ctx.vector_lengths_clusters);
ctx.done_calculations += 1;
if (dist < ctx.cluster_distances[sample_id]) {
/* replace current best distance with new distance */
ctx.cluster_distances[sample_id] = dist;
ctx.cluster_assignments[sample_id] = cluster_id;
}
end:;
}
}
}
check_signals(&(prms->stop));
post_process_iteration_minibatch(&ctx
, chosen_sample_map
, max_not_improved_counter
, &conv_ctx);
/* shift clusters to new position */
calculate_shifted_clusters_minibatch_kmeans(&ctx, chosen_sample_map);
/* calculate_shifted_clusters(&ctx); */
switch_to_shifted_clusters(&ctx);
create_chosen_sample_map(&chosen_sample_map, ctx.samples->sample_count, samples_per_batch, &(prms->seed));
if (!disable_optimizations) {
/* update only projections for cluster that shifted */
update_dot_products(ctx.cluster_vectors,
ctx.no_clusters,
prms->ext_vects,
ctx.clusters_not_changed,
pca_projection_clusters);
d_add_ilist(&(prms->tr), "iteration_pca_calcs", done_pca_calcs);
d_add_ilist(&(prms->tr), "iteration_pca_calcs_success", saved_calculations_pca + saved_calculations_cauchy);
}
#pragma omp parallel for
for (j = 0; j < ctx.samples->sample_count; j++) {
/* iterate over all chosen samples for the next iteration and
* update their distance to their current cluster
*/
if (chosen_sample_map[j]) {
ctx.cluster_distances[j]
= euclid_vector_list(ctx.samples, j
, ctx.cluster_vectors, ctx.cluster_assignments[j]
, ctx.vector_lengths_samples
, ctx.vector_lengths_clusters);
/*#pragma omp critical*/
ctx.done_calculations += 1;
ctx.total_no_calcs += 1;
}
}
print_iteration_summary(&ctx, prms, i);
if (!disable_optimizations) {
/* print projection statistics */
if (prms->verbose) LOG_INFO("PCA statistics b:%" PRINTF_INT64_MODIFIER "u/db:%" PRINTF_INT64_MODIFIER "u"
, saved_calculations_pca
, done_pca_calcs);
}
}
if (prms->verbose) LOG_INFO("total total_no_calcs = %" PRINTF_INT64_MODIFIER "u", ctx.total_no_calcs);
res = create_kmeans_result(prms, &ctx);
/* cleanup all */
if (!disable_optimizations) {
free_vector_list(pca_projection_samples, samples->sample_count);
free(vector_lengths_pca_samples);
free(pca_projection_samples);
free_vector_list(pca_projection_clusters, ctx.no_clusters);
free(pca_projection_clusters);
free(vector_lengths_pca_clusters);
}
free_null(chosen_sample_map);
free_general_context(&ctx, prms);
return res;
}
|
smg2_setup_rap.c | /*BHEADER**********************************************************************
* Copyright (c) 2008, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* This file is part of HYPRE. See file COPYRIGHT for details.
*
* HYPRE is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* $Revision$
***********************************************************************EHEADER*/
#include "_hypre_struct_ls.h"
#include "smg.h"
/*--------------------------------------------------------------------------
* Sets up new coarse grid operator stucture.
*--------------------------------------------------------------------------*/
hypre_StructMatrix *
hypre_SMG2CreateRAPOp( hypre_StructMatrix *R,
hypre_StructMatrix *A,
hypre_StructMatrix *PT,
hypre_StructGrid *coarse_grid )
{
hypre_StructMatrix *RAP;
hypre_Index *RAP_stencil_shape;
hypre_StructStencil *RAP_stencil;
HYPRE_Int RAP_stencil_size;
HYPRE_Int RAP_stencil_dim;
HYPRE_Int RAP_num_ghost[] = {1, 1, 1, 1, 0, 0};
HYPRE_Int j, i;
HYPRE_Int stencil_rank;
RAP_stencil_dim = 2;
/*-----------------------------------------------------------------------
* Define RAP_stencil
*-----------------------------------------------------------------------*/
stencil_rank = 0;
/*-----------------------------------------------------------------------
* non-symmetric case
*-----------------------------------------------------------------------*/
if (!hypre_StructMatrixSymmetric(A))
{
/*--------------------------------------------------------------------
* 5 or 9 point fine grid stencil produces 9 point RAP
*--------------------------------------------------------------------*/
RAP_stencil_size = 9;
RAP_stencil_shape = hypre_CTAlloc(hypre_Index, RAP_stencil_size);
for (j = -1; j < 2; j++)
{
for (i = -1; i < 2; i++)
{
/*--------------------------------------------------------------
* Storage for 9 elements (c,w,e,n,s,sw,se,nw,se)
*--------------------------------------------------------------*/
hypre_SetIndex3(RAP_stencil_shape[stencil_rank],i,j,0);
stencil_rank++;
}
}
}
/*-----------------------------------------------------------------------
* symmetric case
*-----------------------------------------------------------------------*/
else
{
/*--------------------------------------------------------------------
* 5 or 9 point fine grid stencil produces 9 point RAP
* Only store the lower triangular part + diagonal = 5 entries,
* lower triangular means the lower triangular part on the matrix
* in the standard lexicalgraphic ordering.
*--------------------------------------------------------------------*/
RAP_stencil_size = 5;
RAP_stencil_shape = hypre_CTAlloc(hypre_Index, RAP_stencil_size);
for (j = -1; j < 1; j++)
{
for (i = -1; i < 2; i++)
{
/*--------------------------------------------------------------
* Store 5 elements in (c,w,s,sw,se)
*--------------------------------------------------------------*/
if( i+j <=0 )
{
hypre_SetIndex3(RAP_stencil_shape[stencil_rank],i,j,0);
stencil_rank++;
}
}
}
}
RAP_stencil = hypre_StructStencilCreate(RAP_stencil_dim, RAP_stencil_size,
RAP_stencil_shape);
RAP = hypre_StructMatrixCreate(hypre_StructMatrixComm(A),
coarse_grid, RAP_stencil);
hypre_StructStencilDestroy(RAP_stencil);
/*-----------------------------------------------------------------------
* Coarse operator in symmetric iff fine operator is
*-----------------------------------------------------------------------*/
hypre_StructMatrixSymmetric(RAP) = hypre_StructMatrixSymmetric(A);
/*-----------------------------------------------------------------------
* Set number of ghost points
*-----------------------------------------------------------------------*/
if (hypre_StructMatrixSymmetric(A))
{
RAP_num_ghost[1] = 0;
RAP_num_ghost[3] = 0;
}
hypre_StructMatrixSetNumGhost(RAP, RAP_num_ghost);
return RAP;
}
/*--------------------------------------------------------------------------
* Routines to build RAP. These routines are fairly general
* 1) No assumptions about symmetry of A
* 2) No assumption that R = transpose(P)
* 3) 5 or 9-point fine grid A
*
* I am, however, assuming that the c-to-c interpolation is the identity.
*
* I've written two routines - hypre_SMG2BuildRAPSym to build the
* lower triangular part of RAP (including the diagonal) and
* hypre_SMG2BuildRAPNoSym to build the upper triangular part of RAP
* (excluding the diagonal). So using symmetric storage, only the
* first routine would be called. With full storage both would need to
* be called.
*
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SMG2BuildRAPSym( hypre_StructMatrix *A,
hypre_StructMatrix *PT,
hypre_StructMatrix *R,
hypre_StructMatrix *RAP,
hypre_Index cindex,
hypre_Index cstride )
{
hypre_Index index;
hypre_StructStencil *fine_stencil;
HYPRE_Int fine_stencil_size;
hypre_StructGrid *fgrid;
HYPRE_Int *fgrid_ids;
hypre_StructGrid *cgrid;
hypre_BoxArray *cgrid_boxes;
HYPRE_Int *cgrid_ids;
hypre_Box *cgrid_box;
hypre_IndexRef cstart;
hypre_Index stridec;
hypre_Index fstart;
hypre_IndexRef stridef;
hypre_Index loop_size;
HYPRE_Int fi, ci;
hypre_Box *A_dbox;
hypre_Box *PT_dbox;
hypre_Box *R_dbox;
hypre_Box *RAP_dbox;
HYPRE_Real *pa, *pb;
HYPRE_Real *ra, *rb;
HYPRE_Real *a_cc, *a_cw, *a_ce, *a_cs, *a_cn;
HYPRE_Real *a_csw, *a_cse, *a_cnw;
HYPRE_Real *rap_cc, *rap_cw, *rap_cs;
HYPRE_Real *rap_csw, *rap_cse;
HYPRE_Int iA, iAm1, iAp1;
HYPRE_Int iAc;
HYPRE_Int iP, iP1;
HYPRE_Int iR;
HYPRE_Int yOffsetA;
HYPRE_Int xOffsetP;
HYPRE_Int yOffsetP;
fine_stencil = hypre_StructMatrixStencil(A);
fine_stencil_size = hypre_StructStencilSize(fine_stencil);
stridef = cstride;
hypre_SetIndex3(stridec, 1, 1, 1);
fgrid = hypre_StructMatrixGrid(A);
fgrid_ids = hypre_StructGridIDs(fgrid);
cgrid = hypre_StructMatrixGrid(RAP);
cgrid_boxes = hypre_StructGridBoxes(cgrid);
cgrid_ids = hypre_StructGridIDs(cgrid);
fi = 0;
hypre_ForBoxI(ci, cgrid_boxes)
{
while (fgrid_ids[fi] != cgrid_ids[ci])
{
fi++;
}
cgrid_box = hypre_BoxArrayBox(cgrid_boxes, ci);
cstart = hypre_BoxIMin(cgrid_box);
hypre_StructMapCoarseToFine(cstart, cindex, cstride, fstart);
A_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A), fi);
PT_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(PT), fi);
R_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(R), fi);
RAP_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(RAP), ci);
/*-----------------------------------------------------------------
* Extract pointers for interpolation operator:
* pa is pointer for weight for f-point above c-point
* pb is pointer for weight for f-point below c-point
*-----------------------------------------------------------------*/
hypre_SetIndex3(index,0,1,0);
pa = hypre_StructMatrixExtractPointerByIndex(PT, fi, index);
hypre_SetIndex3(index,0,-1,0);
pb = hypre_StructMatrixExtractPointerByIndex(PT, fi, index);
/*-----------------------------------------------------------------
* Extract pointers for restriction operator:
* ra is pointer for weight for f-point above c-point
* rb is pointer for weight for f-point below c-point
*-----------------------------------------------------------------*/
hypre_SetIndex3(index,0,1,0);
ra = hypre_StructMatrixExtractPointerByIndex(R, fi, index);
hypre_SetIndex3(index,0,-1,0);
rb = hypre_StructMatrixExtractPointerByIndex(R, fi, index);
/*-----------------------------------------------------------------
* Extract pointers for 5-point fine grid operator:
*
* a_cc is pointer for center coefficient
* a_cw is pointer for west coefficient
* a_ce is pointer for east coefficient
* a_cs is pointer for south coefficient
* a_cn is pointer for north coefficient
*-----------------------------------------------------------------*/
hypre_SetIndex3(index,0,0,0);
a_cc = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index,-1,0,0);
a_cw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index,1,0,0);
a_ce = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index,0,-1,0);
a_cs = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index,0,1,0);
a_cn = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
/*-----------------------------------------------------------------
* Extract additional pointers for 9-point fine grid operator:
*
* a_csw is pointer for southwest coefficient
* a_cse is pointer for southeast coefficient
* a_cnw is pointer for northwest coefficient
* a_cne is pointer for northeast coefficient
*-----------------------------------------------------------------*/
if(fine_stencil_size > 5)
{
hypre_SetIndex3(index,-1,-1,0);
a_csw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index,1,-1,0);
a_cse = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index,-1,1,0);
a_cnw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
}
/*-----------------------------------------------------------------
* Extract pointers for coarse grid operator - always 9-point:
*
* We build only the lower triangular part (plus diagonal).
*
* rap_cc is pointer for center coefficient (etc.)
*-----------------------------------------------------------------*/
hypre_SetIndex3(index,0,0,0);
rap_cc = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,-1,0,0);
rap_cw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,0,-1,0);
rap_cs = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,-1,-1,0);
rap_csw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,1,-1,0);
rap_cse = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
/*-----------------------------------------------------------------
* Define offsets for fine grid stencil and interpolation
*
* In the BoxLoop below I assume iA and iP refer to data associated
* with the point which we are building the stencil for. The below
* Offsets are used in refering to data associated with other points.
*-----------------------------------------------------------------*/
hypre_SetIndex3(index,0,1,0);
yOffsetA = hypre_BoxOffsetDistance(A_dbox,index);
yOffsetP = hypre_BoxOffsetDistance(PT_dbox,index);
hypre_SetIndex3(index,1,0,0);
xOffsetP = hypre_BoxOffsetDistance(PT_dbox,index);
/*-----------------------------------------------------------------
* Switch statement to direct control to apropriate BoxLoop depending
* on stencil size. Default is full 9-point.
*-----------------------------------------------------------------*/
switch (fine_stencil_size)
{
/*--------------------------------------------------------------
* Loop for symmetric 5-point fine grid operator; produces a
* symmetric 9-point coarse grid operator. We calculate only the
* lower triangular stencil entries: (southwest, south, southeast,
* west, and center).
*--------------------------------------------------------------*/
case 5:
hypre_BoxGetSize(cgrid_box, loop_size);
hypre_BoxLoop4Begin(hypre_StructMatrixNDim(A), loop_size,
PT_dbox, cstart, stridec, iP,
R_dbox, cstart, stridec, iR,
A_dbox, fstart, stridef, iA,
RAP_dbox, cstart, stridec, iAc);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,iP,iR,iA,iAc,iAm1,iAp1,iP1) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop4For(iP, iR, iA, iAc)
{
iAm1 = iA - yOffsetA;
iAp1 = iA + yOffsetA;
iP1 = iP - yOffsetP - xOffsetP;
rap_csw[iAc] = rb[iR] * a_cw[iAm1] * pa[iP1];
iP1 = iP - yOffsetP;
rap_cs[iAc] = rb[iR] * a_cc[iAm1] * pa[iP1]
+ rb[iR] * a_cs[iAm1]
+ a_cs[iA] * pa[iP1];
iP1 = iP - yOffsetP + xOffsetP;
rap_cse[iAc] = rb[iR] * a_ce[iAm1] * pa[iP1];
iP1 = iP - xOffsetP;
rap_cw[iAc] = a_cw[iA]
+ rb[iR] * a_cw[iAm1] * pb[iP1]
+ ra[iR] * a_cw[iAp1] * pa[iP1];
rap_cc[iAc] = a_cc[iA]
+ rb[iR] * a_cc[iAm1] * pb[iP]
+ ra[iR] * a_cc[iAp1] * pa[iP]
+ rb[iR] * a_cn[iAm1]
+ ra[iR] * a_cs[iAp1]
+ a_cs[iA] * pb[iP]
+ a_cn[iA] * pa[iP];
}
hypre_BoxLoop4End(iP, iR, iA, iAc);
break;
/*--------------------------------------------------------------
* Loop for symmetric 9-point fine grid operator; produces a
* symmetric 9-point coarse grid operator. We calculate only the
* lower triangular stencil entries: (southwest, south, southeast,
* west, and center).
*--------------------------------------------------------------*/
default:
hypre_BoxGetSize(cgrid_box, loop_size);
hypre_BoxLoop4Begin(hypre_StructMatrixNDim(A), loop_size,
PT_dbox, cstart, stridec, iP,
R_dbox, cstart, stridec, iR,
A_dbox, fstart, stridef, iA,
RAP_dbox, cstart, stridec, iAc);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,iP,iR,iA,iAc,iAm1,iAp1,iP1) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop4For(iP, iR, iA, iAc)
{
iAm1 = iA - yOffsetA;
iAp1 = iA + yOffsetA;
iP1 = iP - yOffsetP - xOffsetP;
rap_csw[iAc] = rb[iR] * a_cw[iAm1] * pa[iP1]
+ rb[iR] * a_csw[iAm1]
+ a_csw[iA] * pa[iP1];
iP1 = iP - yOffsetP;
rap_cs[iAc] = rb[iR] * a_cc[iAm1] * pa[iP1]
+ rb[iR] * a_cs[iAm1]
+ a_cs[iA] * pa[iP1];
iP1 = iP - yOffsetP + xOffsetP;
rap_cse[iAc] = rb[iR] * a_ce[iAm1] * pa[iP1]
+ rb[iR] * a_cse[iAm1]
+ a_cse[iA] * pa[iP1];
iP1 = iP - xOffsetP;
rap_cw[iAc] = a_cw[iA]
+ rb[iR] * a_cw[iAm1] * pb[iP1]
+ ra[iR] * a_cw[iAp1] * pa[iP1]
+ rb[iR] * a_cnw[iAm1]
+ ra[iR] * a_csw[iAp1]
+ a_csw[iA] * pb[iP1]
+ a_cnw[iA] * pa[iP1];
rap_cc[iAc] = a_cc[iA]
+ rb[iR] * a_cc[iAm1] * pb[iP]
+ ra[iR] * a_cc[iAp1] * pa[iP]
+ rb[iR] * a_cn[iAm1]
+ ra[iR] * a_cs[iAp1]
+ a_cs[iA] * pb[iP]
+ a_cn[iA] * pa[iP];
}
hypre_BoxLoop4End(iP, iR, iA, iAc);
break;
} /* end switch statement */
} /* end ForBoxI */
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SMG2BuildRAPNoSym( hypre_StructMatrix *A,
hypre_StructMatrix *PT,
hypre_StructMatrix *R,
hypre_StructMatrix *RAP,
hypre_Index cindex,
hypre_Index cstride )
{
hypre_Index index;
hypre_StructStencil *fine_stencil;
HYPRE_Int fine_stencil_size;
hypre_StructGrid *fgrid;
HYPRE_Int *fgrid_ids;
hypre_StructGrid *cgrid;
hypre_BoxArray *cgrid_boxes;
HYPRE_Int *cgrid_ids;
hypre_Box *cgrid_box;
hypre_IndexRef cstart;
hypre_Index stridec;
hypre_Index fstart;
hypre_IndexRef stridef;
hypre_Index loop_size;
HYPRE_Int fi, ci;
hypre_Box *A_dbox;
hypre_Box *PT_dbox;
hypre_Box *R_dbox;
hypre_Box *RAP_dbox;
HYPRE_Real *pa, *pb;
HYPRE_Real *ra, *rb;
HYPRE_Real *a_cc, *a_cw, *a_ce, *a_cn;
HYPRE_Real *a_cse, *a_cnw, *a_cne;
HYPRE_Real *rap_ce, *rap_cn;
HYPRE_Real *rap_cnw, *rap_cne;
HYPRE_Int iA, iAm1, iAp1;
HYPRE_Int iAc;
HYPRE_Int iP, iP1;
HYPRE_Int iR;
HYPRE_Int yOffsetA;
HYPRE_Int xOffsetP;
HYPRE_Int yOffsetP;
fine_stencil = hypre_StructMatrixStencil(A);
fine_stencil_size = hypre_StructStencilSize(fine_stencil);
stridef = cstride;
hypre_SetIndex3(stridec, 1, 1, 1);
fgrid = hypre_StructMatrixGrid(A);
fgrid_ids = hypre_StructGridIDs(fgrid);
cgrid = hypre_StructMatrixGrid(RAP);
cgrid_boxes = hypre_StructGridBoxes(cgrid);
cgrid_ids = hypre_StructGridIDs(cgrid);
fi = 0;
hypre_ForBoxI(ci, cgrid_boxes)
{
while (fgrid_ids[fi] != cgrid_ids[ci])
{
fi++;
}
cgrid_box = hypre_BoxArrayBox(cgrid_boxes, ci);
cstart = hypre_BoxIMin(cgrid_box);
hypre_StructMapCoarseToFine(cstart, cindex, cstride, fstart);
A_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A), fi);
PT_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(PT), fi);
R_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(R), fi);
RAP_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(RAP), ci);
/*-----------------------------------------------------------------
* Extract pointers for interpolation operator:
* pa is pointer for weight for f-point above c-point
* pb is pointer for weight for f-point below c-point
*-----------------------------------------------------------------*/
hypre_SetIndex3(index,0,1,0);
pa = hypre_StructMatrixExtractPointerByIndex(PT, fi, index);
hypre_SetIndex3(index,0,-1,0);
pb = hypre_StructMatrixExtractPointerByIndex(PT, fi, index);
/*-----------------------------------------------------------------
* Extract pointers for restriction operator:
* ra is pointer for weight for f-point above c-point
* rb is pointer for weight for f-point below c-point
*-----------------------------------------------------------------*/
hypre_SetIndex3(index,0,1,0);
ra = hypre_StructMatrixExtractPointerByIndex(R, fi, index);
hypre_SetIndex3(index,0,-1,0);
rb = hypre_StructMatrixExtractPointerByIndex(R, fi, index);
/*-----------------------------------------------------------------
* Extract pointers for 5-point fine grid operator:
*
* a_cc is pointer for center coefficient
* a_cw is pointer for west coefficient
* a_ce is pointer for east coefficient
* a_cs is pointer for south coefficient
* a_cn is pointer for north coefficient
*-----------------------------------------------------------------*/
hypre_SetIndex3(index,0,0,0);
a_cc = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index,-1,0,0);
a_cw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index,1,0,0);
a_ce = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index,0,1,0);
a_cn = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
/*-----------------------------------------------------------------
* Extract additional pointers for 9-point fine grid operator:
*
* a_csw is pointer for southwest coefficient
* a_cse is pointer for southeast coefficient
* a_cnw is pointer for northwest coefficient
* a_cne is pointer for northeast coefficient
*-----------------------------------------------------------------*/
if(fine_stencil_size > 5)
{
hypre_SetIndex3(index,1,-1,0);
a_cse = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index,-1,1,0);
a_cnw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index,1,1,0);
a_cne = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
}
/*-----------------------------------------------------------------
* Extract pointers for coarse grid operator - always 9-point:
*
* We build only the upper triangular part.
*
* rap_ce is pointer for east coefficient (etc.)
*-----------------------------------------------------------------*/
hypre_SetIndex3(index,1,0,0);
rap_ce = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,0,1,0);
rap_cn = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,1,1,0);
rap_cne = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,-1,1,0);
rap_cnw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
/*-----------------------------------------------------------------
* Define offsets for fine grid stencil and interpolation
*
* In the BoxLoop below I assume iA and iP refer to data associated
* with the point which we are building the stencil for. The below
* Offsets are used in refering to data associated with other points.
*-----------------------------------------------------------------*/
hypre_SetIndex3(index,0,1,0);
yOffsetA = hypre_BoxOffsetDistance(A_dbox,index);
yOffsetP = hypre_BoxOffsetDistance(PT_dbox,index);
hypre_SetIndex3(index,1,0,0);
xOffsetP = hypre_BoxOffsetDistance(PT_dbox,index);
/*-----------------------------------------------------------------
* Switch statement to direct control to apropriate BoxLoop depending
* on stencil size. Default is full 27-point.
*-----------------------------------------------------------------*/
switch (fine_stencil_size)
{
/*--------------------------------------------------------------
* Loop for 5-point fine grid operator; produces upper triangular
* part of 9-point coarse grid operator - excludes diagonal.
* stencil entries: (northeast, north, northwest, and east)
*--------------------------------------------------------------*/
case 5:
hypre_BoxGetSize(cgrid_box, loop_size);
hypre_BoxLoop4Begin(hypre_StructMatrixNDim(A), loop_size,
PT_dbox, cstart, stridec, iP,
R_dbox, cstart, stridec, iR,
A_dbox, fstart, stridef, iA,
RAP_dbox, cstart, stridec, iAc);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,iP,iR,iA,iAc,iAm1,iAp1,iP1) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop4For(iP, iR, iA, iAc)
{
iAm1 = iA - yOffsetA;
iAp1 = iA + yOffsetA;
iP1 = iP + yOffsetP + xOffsetP;
rap_cne[iAc] = ra[iR] * a_ce[iAp1] * pb[iP1];
iP1 = iP + yOffsetP;
rap_cn[iAc] = ra[iR] * a_cc[iAp1] * pb[iP1]
+ ra[iR] * a_cn[iAp1]
+ a_cn[iA] * pb[iP1];
iP1 = iP + yOffsetP - xOffsetP;
rap_cnw[iAc] = ra[iR] * a_cw[iAp1] * pb[iP1];
iP1 = iP + xOffsetP;
rap_ce[iAc] = a_ce[iA]
+ rb[iR] * a_ce[iAm1] * pb[iP1]
+ ra[iR] * a_ce[iAp1] * pa[iP1];
}
hypre_BoxLoop4End(iP, iR, iA, iAc);
break;
/*--------------------------------------------------------------
* Loop for 9-point fine grid operator; produces upper triangular
* part of 9-point coarse grid operator - excludes diagonal.
* stencil entries: (northeast, north, northwest, and east)
*--------------------------------------------------------------*/
default:
hypre_BoxGetSize(cgrid_box, loop_size);
hypre_BoxLoop4Begin(hypre_StructMatrixNDim(A), loop_size,
PT_dbox, cstart, stridec, iP,
R_dbox, cstart, stridec, iR,
A_dbox, fstart, stridef, iA,
RAP_dbox, cstart, stridec, iAc);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,iP,iR,iA,iAc,iAm1,iAp1,iP1) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop4For(iP, iR, iA, iAc)
{
iAm1 = iA - yOffsetA;
iAp1 = iA + yOffsetA;
iP1 = iP + yOffsetP + xOffsetP;
rap_cne[iAc] = ra[iR] * a_ce[iAp1] * pb[iP1]
+ ra[iR] * a_cne[iAp1]
+ a_cne[iA] * pb[iP1];
iP1 = iP + yOffsetP;
rap_cn[iAc] = ra[iR] * a_cc[iAp1] * pb[iP1]
+ ra[iR] * a_cn[iAp1]
+ a_cn[iA] * pb[iP1];
iP1 = iP + yOffsetP - xOffsetP;
rap_cnw[iAc] = ra[iR] * a_cw[iAp1] * pb[iP1]
+ ra[iR] * a_cnw[iAp1]
+ a_cnw[iA] * pb[iP1];
iP1 = iP + xOffsetP;
rap_ce[iAc] = a_ce[iA]
+ rb[iR] * a_ce[iAm1] * pb[iP1]
+ ra[iR] * a_ce[iAp1] * pa[iP1]
+ rb[iR] * a_cne[iAm1]
+ ra[iR] * a_cse[iAp1]
+ a_cse[iA] * pb[iP1]
+ a_cne[iA] * pa[iP1];
}
hypre_BoxLoop4End(iP, iR, iA, iAc);
break;
} /* end switch statement */
} /* end ForBoxI */
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* Collapses stencil in periodic direction on coarsest grid.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SMG2RAPPeriodicSym( hypre_StructMatrix *RAP,
hypre_Index cindex,
hypre_Index cstride )
{
hypre_Index index;
hypre_StructGrid *cgrid;
hypre_BoxArray *cgrid_boxes;
hypre_Box *cgrid_box;
hypre_IndexRef cstart;
hypre_Index stridec;
hypre_Index loop_size;
HYPRE_Int ci;
hypre_Box *RAP_dbox;
HYPRE_Real *rap_cc, *rap_cw, *rap_cs;
HYPRE_Real *rap_csw, *rap_cse;
HYPRE_Int iAc;
HYPRE_Int iAcm1;
HYPRE_Int xOffset;
HYPRE_Real zero = 0.0;
hypre_SetIndex3(stridec, 1, 1, 1);
cgrid = hypre_StructMatrixGrid(RAP);
cgrid_boxes = hypre_StructGridBoxes(cgrid);
if (hypre_IndexY(hypre_StructGridPeriodic(cgrid)) == 1)
{
hypre_StructMatrixAssemble(RAP);
hypre_ForBoxI(ci, cgrid_boxes)
{
cgrid_box = hypre_BoxArrayBox(cgrid_boxes, ci);
cstart = hypre_BoxIMin(cgrid_box);
RAP_dbox =
hypre_BoxArrayBox(hypre_StructMatrixDataSpace(RAP), ci);
hypre_SetIndex3(index,1,0,0);
xOffset = hypre_BoxOffsetDistance(RAP_dbox,index);
/*-----------------------------------------------------------------
* Extract pointers for coarse grid operator - always 9-point:
*-----------------------------------------------------------------*/
hypre_SetIndex3(index,0,0,0);
rap_cc = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,-1,0,0);
rap_cw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,0,-1,0);
rap_cs = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,-1,-1,0);
rap_csw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,1,-1,0);
rap_cse = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_BoxGetSize(cgrid_box, loop_size);
hypre_BoxLoop1Begin(hypre_StructMatrixNDim(RAP), loop_size,
RAP_dbox, cstart, stridec, iAc);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,iAc,iAcm1) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop1For(iAc)
{
iAcm1 = iAc - xOffset;
rap_cw[iAc] += (rap_cse[iAcm1] + rap_csw[iAc]);
rap_cc[iAc] += (2.0 * rap_cs[iAc]);
}
hypre_BoxLoop1End(iAc);
hypre_BoxLoop1Begin(hypre_StructMatrixNDim(RAP), loop_size,
RAP_dbox, cstart, stridec, iAc);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,iAc) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop1For(iAc)
{
rap_csw[iAc] = zero;
rap_cs[iAc] = zero;
rap_cse[iAc] = zero;
}
hypre_BoxLoop1End(iAc);
} /* end ForBoxI */
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* Collapses stencil in periodic direction on coarsest grid.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SMG2RAPPeriodicNoSym( hypre_StructMatrix *RAP,
hypre_Index cindex,
hypre_Index cstride )
{
hypre_Index index;
hypre_StructGrid *cgrid;
hypre_BoxArray *cgrid_boxes;
hypre_Box *cgrid_box;
hypre_IndexRef cstart;
hypre_Index stridec;
hypre_Index loop_size;
HYPRE_Int ci;
hypre_Box *RAP_dbox;
HYPRE_Real *rap_cc, *rap_cw, *rap_cs;
HYPRE_Real *rap_csw, *rap_cse;
HYPRE_Real *rap_ce, *rap_cn;
HYPRE_Real *rap_cnw, *rap_cne;
HYPRE_Int iAc;
HYPRE_Real zero = 0.0;
hypre_SetIndex3(stridec, 1, 1, 1);
cgrid = hypre_StructMatrixGrid(RAP);
cgrid_boxes = hypre_StructGridBoxes(cgrid);
if (hypre_IndexY(hypre_StructGridPeriodic(cgrid)) == 1)
{
hypre_ForBoxI(ci, cgrid_boxes)
{
cgrid_box = hypre_BoxArrayBox(cgrid_boxes, ci);
cstart = hypre_BoxIMin(cgrid_box);
RAP_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(RAP), ci);
/*-----------------------------------------------------------------
* Extract pointers for coarse grid operator - always 9-point:
*-----------------------------------------------------------------*/
hypre_SetIndex3(index,0,0,0);
rap_cc = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,-1,0,0);
rap_cw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,0,-1,0);
rap_cs = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,-1,-1,0);
rap_csw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,1,-1,0);
rap_cse = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,1,0,0);
rap_ce = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,0,1,0);
rap_cn = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,1,1,0);
rap_cne = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,-1,1,0);
rap_cnw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_BoxGetSize(cgrid_box, loop_size);
hypre_BoxLoop1Begin(hypre_StructMatrixNDim(RAP), loop_size,
RAP_dbox, cstart, stridec, iAc);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,iAc) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop1For(iAc)
{
rap_cw[iAc] += (rap_cnw[iAc] + rap_csw[iAc]);
rap_cnw[iAc] = zero;
rap_csw[iAc] = zero;
rap_cc[iAc] += (rap_cn[iAc] + rap_cs[iAc]);
rap_cn[iAc] = zero;
rap_cs[iAc] = zero;
rap_ce[iAc] += (rap_cne[iAc] + rap_cse[iAc]);
rap_cne[iAc] = zero;
rap_cse[iAc] = zero;
}
hypre_BoxLoop1End(iAc);
} /* end ForBoxI */
}
return hypre_error_flag;
}
|
flush-1.c | /* { dg-additional-options "-fdump-tree-gimple" } */
/* { dg-final { scan-tree-dump "foo \\(4\\);\[\n\r]* __atomic_thread_fence \\(4\\);\[\n\r]* foo \\(4\\);" "gimple" } } */
/* { dg-final { scan-tree-dump "foo \\(3\\);\[\n\r]* __atomic_thread_fence \\(3\\);\[\n\r]* foo \\(3\\);" "gimple" } } */
/* { dg-final { scan-tree-dump "foo \\(2\\);\[\n\r]* __atomic_thread_fence \\(2\\);\[\n\r]* foo \\(2\\);" "gimple" } } */
/* { dg-final { scan-tree-dump "foo \\(5\\);\[\n\r]* __sync_synchronize \\(\\);\[\n\r]* foo \\(5\\);" "gimple" } } */
void foo (int);
void
f1 (void)
{
foo (4);
#pragma omp flush acq_rel
foo (4);
}
void
f2 (void)
{
foo (3);
#pragma omp flush release
foo (3);
}
void
f3 (void)
{
foo (2);
#pragma omp flush acquire
foo (2);
}
void
f4 (void)
{
foo (5);
#pragma omp flush
foo (5);
}
|
oddRow.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include <time.h>
int main()
{
int *A, *R, i, j, p=4, N=9, sumRow=0, rows=(N/2), countRow;
double t0, t1, t_tot;
A=(int *)malloc((N*N)*sizeof(int));
R=(int *)malloc(rows*sizeof(int));
srand(time(NULL));
printf("The matrix is: \n");
for(i=0; i<N; i++)
{
for(j=0; j<N; j++)
{
A[(i*N)+j] = (rand()%10)+1;
printf("[%d]\t", A[(i*N)+j]);
}
printf("\n");
}
t0=omp_get_wtime();
#pragma omp parallel shared(A,R,p,N) private(i,countRow) num_threads(p)
{
int id = omp_get_thread_num();
countRow = (id*2)+1;
for(i=0; i<N; i++){
sumRow+=A[countRow*N+i];
}
R[id]=sumRow;
t1=omp_get_wtime();
}
printf("\nThe vector is: \n");
for(i=0; i<rows; i++){
printf("[%d]\t", R[i]);
}
t_tot=t1-t0;
printf("\nTotal time: %f", t_tot);
return 0;
}
|
sse.h | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2017-2020 Evan Nemerson <evan@nemerson.com>
* 2015-2017 John W. Ratcliff <jratcliffscarab@gmail.com>
* 2015 Brandon Rowlett <browlett@nvidia.com>
* 2015 Ken Fast <kfast@gdeb.com>
*/
#if !defined(SIMDE_X86_SSE_H)
#define SIMDE_X86_SSE_H
#include "mmx.h"
#if defined(_WIN32)
#include <windows.h>
#endif
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
typedef union {
#if defined(SIMDE_VECTOR_SUBSCRIPT)
SIMDE_ALIGN_TO_16 int8_t i8 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 int16_t i16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 int32_t i32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 int64_t i64 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 uint8_t u8 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 uint16_t u16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 uint32_t u32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 uint64_t u64 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
#if defined(SIMDE_HAVE_INT128_)
SIMDE_ALIGN_TO_16 simde_int128 i128 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 simde_uint128 u128 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
#endif
SIMDE_ALIGN_TO_16 simde_float32 f32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 int_fast32_t i32f SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 uint_fast32_t u32f SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
#else
SIMDE_ALIGN_TO_16 int8_t i8[16];
SIMDE_ALIGN_TO_16 int16_t i16[8];
SIMDE_ALIGN_TO_16 int32_t i32[4];
SIMDE_ALIGN_TO_16 int64_t i64[2];
SIMDE_ALIGN_TO_16 uint8_t u8[16];
SIMDE_ALIGN_TO_16 uint16_t u16[8];
SIMDE_ALIGN_TO_16 uint32_t u32[4];
SIMDE_ALIGN_TO_16 uint64_t u64[2];
#if defined(SIMDE_HAVE_INT128_)
SIMDE_ALIGN_TO_16 simde_int128 i128[1];
SIMDE_ALIGN_TO_16 simde_uint128 u128[1];
#endif
SIMDE_ALIGN_TO_16 simde_float32 f32[4];
SIMDE_ALIGN_TO_16 int_fast32_t i32f[16 / sizeof(int_fast32_t)];
SIMDE_ALIGN_TO_16 uint_fast32_t u32f[16 / sizeof(uint_fast32_t)];
#endif
SIMDE_ALIGN_TO_16 simde__m64_private m64_private[2];
SIMDE_ALIGN_TO_16 simde__m64 m64[2];
#if defined(SIMDE_X86_SSE_NATIVE)
SIMDE_ALIGN_TO_16 __m128 n;
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_ALIGN_TO_16 int8x16_t neon_i8;
SIMDE_ALIGN_TO_16 int16x8_t neon_i16;
SIMDE_ALIGN_TO_16 int32x4_t neon_i32;
SIMDE_ALIGN_TO_16 int64x2_t neon_i64;
SIMDE_ALIGN_TO_16 uint8x16_t neon_u8;
SIMDE_ALIGN_TO_16 uint16x8_t neon_u16;
SIMDE_ALIGN_TO_16 uint32x4_t neon_u32;
SIMDE_ALIGN_TO_16 uint64x2_t neon_u64;
SIMDE_ALIGN_TO_16 float32x4_t neon_f32;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
SIMDE_ALIGN_TO_16 float64x2_t neon_f64;
#endif
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
SIMDE_ALIGN_TO_16 v128_t wasm_v128;
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) altivec_u8;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) altivec_u16;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) altivec_u32;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed char) altivec_i8;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed short) altivec_i16;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed int) altivec_i32;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(float) altivec_f32;
#if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long) altivec_u64;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed long long) altivec_i64;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(double) altivec_f64;
#endif
#endif
} simde__m128_private;
#if defined(SIMDE_X86_SSE_NATIVE)
typedef __m128 simde__m128;
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
typedef float32x4_t simde__m128;
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
typedef v128_t simde__m128;
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
typedef SIMDE_POWER_ALTIVEC_VECTOR(float) simde__m128;
#elif defined(SIMDE_VECTOR_SUBSCRIPT)
typedef simde_float32 simde__m128 SIMDE_ALIGN_TO_16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
#else
typedef simde__m128_private simde__m128;
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
typedef simde__m128 __m128;
#endif
HEDLEY_STATIC_ASSERT(16 == sizeof(simde__m128), "simde__m128 size incorrect");
HEDLEY_STATIC_ASSERT(16 == sizeof(simde__m128_private), "simde__m128_private size incorrect");
#if defined(SIMDE_CHECK_ALIGNMENT) && defined(SIMDE_ALIGN_OF)
HEDLEY_STATIC_ASSERT(SIMDE_ALIGN_OF(simde__m128) == 16, "simde__m128 is not 16-byte aligned");
HEDLEY_STATIC_ASSERT(SIMDE_ALIGN_OF(simde__m128_private) == 16, "simde__m128_private is not 16-byte aligned");
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde__m128_from_private(simde__m128_private v) {
simde__m128 r;
simde_memcpy(&r, &v, sizeof(r));
return r;
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128_private
simde__m128_to_private(simde__m128 v) {
simde__m128_private r;
simde_memcpy(&r, &v, sizeof(r));
return r;
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int8x16_t, neon, i8)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int16x8_t, neon, i16)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int32x4_t, neon, i32)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int64x2_t, neon, i64)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint8x16_t, neon, u8)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint16x8_t, neon, u16)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint32x4_t, neon, u32)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint64x2_t, neon, u64)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, float32x4_t, neon, f32)
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, float64x2_t, neon, f64)
#endif
#endif /* defined(SIMDE_ARM_NEON_A32V7_NATIVE) */
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed char), altivec, i8)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed short), altivec, i16)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed int), altivec, i32)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), altivec, u8)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned short), altivec, u16)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), altivec, u32)
#if defined(SIMDE_BUG_GCC_95782)
SIMDE_FUNCTION_ATTRIBUTES
SIMDE_POWER_ALTIVEC_VECTOR(float)
simde__m128_to_altivec_f32(simde__m128 value) {
simde__m128_private r_ = simde__m128_to_private(value);
return r_.altivec_f32;
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde__m128_from_altivec_f32(SIMDE_POWER_ALTIVEC_VECTOR(float) value) {
simde__m128_private r_;
r_.altivec_f32 = value;
return simde__m128_from_private(r_);
}
#else
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(float), altivec, f32)
#endif
#if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed long long), altivec, i64)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long), altivec, u64)
#endif
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, v128_t, wasm, v128);
#endif /* defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) */
enum {
#if defined(SIMDE_X86_SSE_NATIVE)
SIMDE_MM_ROUND_NEAREST = _MM_ROUND_NEAREST,
SIMDE_MM_ROUND_DOWN = _MM_ROUND_DOWN,
SIMDE_MM_ROUND_UP = _MM_ROUND_UP,
SIMDE_MM_ROUND_TOWARD_ZERO = _MM_ROUND_TOWARD_ZERO
#else
SIMDE_MM_ROUND_NEAREST = 0x0000,
SIMDE_MM_ROUND_DOWN = 0x2000,
SIMDE_MM_ROUND_UP = 0x4000,
SIMDE_MM_ROUND_TOWARD_ZERO = 0x6000
#endif
};
#if defined(_MM_FROUND_TO_NEAREST_INT)
# define SIMDE_MM_FROUND_TO_NEAREST_INT _MM_FROUND_TO_NEAREST_INT
# define SIMDE_MM_FROUND_TO_NEG_INF _MM_FROUND_TO_NEG_INF
# define SIMDE_MM_FROUND_TO_POS_INF _MM_FROUND_TO_POS_INF
# define SIMDE_MM_FROUND_TO_ZERO _MM_FROUND_TO_ZERO
# define SIMDE_MM_FROUND_CUR_DIRECTION _MM_FROUND_CUR_DIRECTION
# define SIMDE_MM_FROUND_RAISE_EXC _MM_FROUND_RAISE_EXC
# define SIMDE_MM_FROUND_NO_EXC _MM_FROUND_NO_EXC
#else
# define SIMDE_MM_FROUND_TO_NEAREST_INT 0x00
# define SIMDE_MM_FROUND_TO_NEG_INF 0x01
# define SIMDE_MM_FROUND_TO_POS_INF 0x02
# define SIMDE_MM_FROUND_TO_ZERO 0x03
# define SIMDE_MM_FROUND_CUR_DIRECTION 0x04
# define SIMDE_MM_FROUND_RAISE_EXC 0x00
# define SIMDE_MM_FROUND_NO_EXC 0x08
#endif
#define SIMDE_MM_FROUND_NINT \
(SIMDE_MM_FROUND_TO_NEAREST_INT | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_FLOOR \
(SIMDE_MM_FROUND_TO_NEG_INF | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_CEIL \
(SIMDE_MM_FROUND_TO_POS_INF | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_TRUNC \
(SIMDE_MM_FROUND_TO_ZERO | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_RINT \
(SIMDE_MM_FROUND_CUR_DIRECTION | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_NEARBYINT \
(SIMDE_MM_FROUND_CUR_DIRECTION | SIMDE_MM_FROUND_NO_EXC)
#if defined(SIMDE_X86_SSE4_1_ENABLE_NATIVE_ALIASES) && !defined(_MM_FROUND_TO_NEAREST_INT)
# define _MM_FROUND_TO_NEAREST_INT SIMDE_MM_FROUND_TO_NEAREST_INT
# define _MM_FROUND_TO_NEG_INF SIMDE_MM_FROUND_TO_NEG_INF
# define _MM_FROUND_TO_POS_INF SIMDE_MM_FROUND_TO_POS_INF
# define _MM_FROUND_TO_ZERO SIMDE_MM_FROUND_TO_ZERO
# define _MM_FROUND_CUR_DIRECTION SIMDE_MM_FROUND_CUR_DIRECTION
# define _MM_FROUND_RAISE_EXC SIMDE_MM_FROUND_RAISE_EXC
# define _MM_FROUND_NINT SIMDE_MM_FROUND_NINT
# define _MM_FROUND_FLOOR SIMDE_MM_FROUND_FLOOR
# define _MM_FROUND_CEIL SIMDE_MM_FROUND_CEIL
# define _MM_FROUND_TRUNC SIMDE_MM_FROUND_TRUNC
# define _MM_FROUND_RINT SIMDE_MM_FROUND_RINT
# define _MM_FROUND_NEARBYINT SIMDE_MM_FROUND_NEARBYINT
#endif
#if defined(_MM_EXCEPT_INVALID)
# define SIMDE_MM_EXCEPT_INVALID _MM_EXCEPT_INVALID
#else
# define SIMDE_MM_EXCEPT_INVALID (0x0001)
#endif
#if defined(_MM_EXCEPT_DENORM)
# define SIMDE_MM_EXCEPT_DENORM _MM_EXCEPT_DENORM
#else
# define SIMDE_MM_EXCEPT_DENORM (0x0002)
#endif
#if defined(_MM_EXCEPT_DIV_ZERO)
# define SIMDE_MM_EXCEPT_DIV_ZERO _MM_EXCEPT_DIV_ZERO
#else
# define SIMDE_MM_EXCEPT_DIV_ZERO (0x0004)
#endif
#if defined(_MM_EXCEPT_OVERFLOW)
# define SIMDE_MM_EXCEPT_OVERFLOW _MM_EXCEPT_OVERFLOW
#else
# define SIMDE_MM_EXCEPT_OVERFLOW (0x0008)
#endif
#if defined(_MM_EXCEPT_UNDERFLOW)
# define SIMDE_MM_EXCEPT_UNDERFLOW _MM_EXCEPT_UNDERFLOW
#else
# define SIMDE_MM_EXCEPT_UNDERFLOW (0x0010)
#endif
#if defined(_MM_EXCEPT_INEXACT)
# define SIMDE_MM_EXCEPT_INEXACT _MM_EXCEPT_INEXACT
#else
# define SIMDE_MM_EXCEPT_INEXACT (0x0020)
#endif
#if defined(_MM_EXCEPT_MASK)
# define SIMDE_MM_EXCEPT_MASK _MM_EXCEPT_MASK
#else
# define SIMDE_MM_EXCEPT_MASK \
(SIMDE_MM_EXCEPT_INVALID | SIMDE_MM_EXCEPT_DENORM | \
SIMDE_MM_EXCEPT_DIV_ZERO | SIMDE_MM_EXCEPT_OVERFLOW | \
SIMDE_MM_EXCEPT_UNDERFLOW | SIMDE_MM_EXCEPT_INEXACT)
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _MM_EXCEPT_INVALID SIMDE_MM_EXCEPT_INVALID
#define _MM_EXCEPT_DENORM SIMDE_MM_EXCEPT_DENORM
#define _MM_EXCEPT_DIV_ZERO SIMDE_MM_EXCEPT_DIV_ZERO
#define _MM_EXCEPT_OVERFLOW SIMDE_MM_EXCEPT_OVERFLOW
#define _MM_EXCEPT_UNDERFLOW SIMDE_MM_EXCEPT_UNDERFLOW
#define _MM_EXCEPT_INEXACT SIMDE_MM_EXCEPT_INEXACT
#define _MM_EXCEPT_MASK SIMDE_MM_EXCEPT_MASK
#endif
#if defined(_MM_MASK_INVALID)
# define SIMDE_MM_MASK_INVALID _MM_MASK_INVALID
#else
# define SIMDE_MM_MASK_INVALID (0x0080)
#endif
#if defined(_MM_MASK_DENORM)
# define SIMDE_MM_MASK_DENORM _MM_MASK_DENORM
#else
# define SIMDE_MM_MASK_DENORM (0x0100)
#endif
#if defined(_MM_MASK_DIV_ZERO)
# define SIMDE_MM_MASK_DIV_ZERO _MM_MASK_DIV_ZERO
#else
# define SIMDE_MM_MASK_DIV_ZERO (0x0200)
#endif
#if defined(_MM_MASK_OVERFLOW)
# define SIMDE_MM_MASK_OVERFLOW _MM_MASK_OVERFLOW
#else
# define SIMDE_MM_MASK_OVERFLOW (0x0400)
#endif
#if defined(_MM_MASK_UNDERFLOW)
# define SIMDE_MM_MASK_UNDERFLOW _MM_MASK_UNDERFLOW
#else
# define SIMDE_MM_MASK_UNDERFLOW (0x0800)
#endif
#if defined(_MM_MASK_INEXACT)
# define SIMDE_MM_MASK_INEXACT _MM_MASK_INEXACT
#else
# define SIMDE_MM_MASK_INEXACT (0x1000)
#endif
#if defined(_MM_MASK_MASK)
# define SIMDE_MM_MASK_MASK _MM_MASK_MASK
#else
# define SIMDE_MM_MASK_MASK \
(SIMDE_MM_MASK_INVALID | SIMDE_MM_MASK_DENORM | \
SIMDE_MM_MASK_DIV_ZERO | SIMDE_MM_MASK_OVERFLOW | \
SIMDE_MM_MASK_UNDERFLOW | SIMDE_MM_MASK_INEXACT)
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _MM_MASK_INVALID SIMDE_MM_MASK_INVALID
#define _MM_MASK_DENORM SIMDE_MM_MASK_DENORM
#define _MM_MASK_DIV_ZERO SIMDE_MM_MASK_DIV_ZERO
#define _MM_MASK_OVERFLOW SIMDE_MM_MASK_OVERFLOW
#define _MM_MASK_UNDERFLOW SIMDE_MM_MASK_UNDERFLOW
#define _MM_MASK_INEXACT SIMDE_MM_MASK_INEXACT
#define _MM_MASK_MASK SIMDE_MM_MASK_MASK
#endif
#if defined(_MM_FLUSH_ZERO_MASK)
# define SIMDE_MM_FLUSH_ZERO_MASK _MM_FLUSH_ZERO_MASK
#else
# define SIMDE_MM_FLUSH_ZERO_MASK (0x8000)
#endif
#if defined(_MM_FLUSH_ZERO_ON)
# define SIMDE_MM_FLUSH_ZERO_ON _MM_FLUSH_ZERO_ON
#else
# define SIMDE_MM_FLUSH_ZERO_ON (0x8000)
#endif
#if defined(_MM_FLUSH_ZERO_OFF)
# define SIMDE_MM_FLUSH_ZERO_OFF _MM_FLUSH_ZERO_OFF
#else
# define SIMDE_MM_FLUSH_ZERO_OFF (0x0000)
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _MM_FLUSH_ZERO_MASK SIMDE_MM_FLUSH_ZERO_MASK
#define _MM_FLUSH_ZERO_ON SIMDE_MM_FLUSH_ZERO_ON
#define _MM_FLUSH_ZERO_OFF SIMDE_MM_FLUSH_ZERO_OFF
#endif
SIMDE_FUNCTION_ATTRIBUTES
unsigned int
SIMDE_MM_GET_ROUNDING_MODE(void) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _MM_GET_ROUNDING_MODE();
#elif defined(SIMDE_HAVE_FENV_H)
unsigned int vfe_mode;
switch (fegetround()) {
#if defined(FE_TONEAREST)
case FE_TONEAREST:
vfe_mode = SIMDE_MM_ROUND_NEAREST;
break;
#endif
#if defined(FE_TOWARDZERO)
case FE_TOWARDZERO:
vfe_mode = SIMDE_MM_ROUND_DOWN;
break;
#endif
#if defined(FE_UPWARD)
case FE_UPWARD:
vfe_mode = SIMDE_MM_ROUND_UP;
break;
#endif
#if defined(FE_DOWNWARD)
case FE_DOWNWARD:
vfe_mode = SIMDE_MM_ROUND_TOWARD_ZERO;
break;
#endif
default:
vfe_mode = SIMDE_MM_ROUND_NEAREST;
break;
}
return vfe_mode;
#else
return SIMDE_MM_ROUND_NEAREST;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _MM_GET_ROUNDING_MODE() SIMDE_MM_GET_ROUNDING_MODE()
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
SIMDE_MM_SET_ROUNDING_MODE(unsigned int a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_MM_SET_ROUNDING_MODE(a);
#elif defined(SIMDE_HAVE_FENV_H)
int fe_mode = FE_TONEAREST;
switch (a) {
#if defined(FE_TONEAREST)
case SIMDE_MM_ROUND_NEAREST:
fe_mode = FE_TONEAREST;
break;
#endif
#if defined(FE_TOWARDZERO)
case SIMDE_MM_ROUND_TOWARD_ZERO:
fe_mode = FE_TOWARDZERO;
break;
#endif
#if defined(FE_DOWNWARD)
case SIMDE_MM_ROUND_DOWN:
fe_mode = FE_DOWNWARD;
break;
#endif
#if defined(FE_UPWARD)
case SIMDE_MM_ROUND_UP:
fe_mode = FE_UPWARD;
break;
#endif
default:
return;
}
fesetround(fe_mode);
#else
(void) a;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _MM_SET_ROUNDING_MODE(a) SIMDE_MM_SET_ROUNDING_MODE(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint32_t
SIMDE_MM_GET_FLUSH_ZERO_MODE (void) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_getcsr() & _MM_FLUSH_ZERO_MASK;
#else
return SIMDE_MM_FLUSH_ZERO_OFF;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _MM_SET_FLUSH_ZERO_MODE(a) SIMDE_MM_SET_FLUSH_ZERO_MODE(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
SIMDE_MM_SET_FLUSH_ZERO_MODE (uint32_t a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_MM_SET_FLUSH_ZERO_MODE(a);
#else
(void) a;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _MM_SET_FLUSH_ZERO_MODE(a) SIMDE_MM_SET_FLUSH_ZERO_MODE(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint32_t
simde_mm_getcsr (void) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_getcsr();
#else
return SIMDE_MM_GET_ROUNDING_MODE();
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_getcsr() simde_mm_getcsr()
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_setcsr (uint32_t a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_setcsr(a);
#else
SIMDE_MM_SET_ROUNDING_MODE(HEDLEY_STATIC_CAST(unsigned int, a));
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_setcsr(a) simde_mm_setcsr(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_round_ps (simde__m128 a, int rounding, int lax_rounding)
SIMDE_REQUIRE_CONSTANT_RANGE(rounding, 0, 15)
SIMDE_REQUIRE_CONSTANT_RANGE(lax_rounding, 0, 1) {
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
(void) lax_rounding;
/* For architectures which lack a current direction SIMD instruction.
*
* Note that NEON actually has a current rounding mode instruction,
* but in ARMv8+ the rounding mode is ignored and nearest is always
* used, so we treat ARMv7 as having a rounding mode but ARMv8 as
* not. */
#if \
defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || \
defined(SIMDE_ARM_NEON_A32V8)
if ((rounding & 7) == SIMDE_MM_FROUND_CUR_DIRECTION)
rounding = HEDLEY_STATIC_CAST(int, SIMDE_MM_GET_ROUNDING_MODE()) << 13;
#endif
switch (rounding & ~SIMDE_MM_FROUND_NO_EXC) {
case SIMDE_MM_FROUND_CUR_DIRECTION:
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_round(a_.altivec_f32));
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95399)
r_.neon_f32 = vrndiq_f32(a_.neon_f32);
#elif defined(simde_math_nearbyintf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_nearbyintf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
#endif
break;
case SIMDE_MM_FROUND_TO_NEAREST_INT:
#if defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_rint(a_.altivec_f32));
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE)
r_.neon_f32 = vrndnq_f32(a_.neon_f32);
#elif defined(simde_math_roundevenf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_roundevenf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
#endif
break;
case SIMDE_MM_FROUND_TO_NEG_INF:
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_floor(a_.altivec_f32));
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE)
r_.neon_f32 = vrndmq_f32(a_.neon_f32);
#elif defined(simde_math_floorf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_floorf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
#endif
break;
case SIMDE_MM_FROUND_TO_POS_INF:
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_ceil(a_.altivec_f32));
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE)
r_.neon_f32 = vrndpq_f32(a_.neon_f32);
#elif defined(simde_math_ceilf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_ceilf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
#endif
break;
case SIMDE_MM_FROUND_TO_ZERO:
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_trunc(a_.altivec_f32));
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE)
r_.neon_f32 = vrndq_f32(a_.neon_f32);
#elif defined(simde_math_truncf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_truncf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
#endif
break;
default:
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
}
return simde__m128_from_private(r_);
}
#if defined(SIMDE_X86_SSE4_1_NATIVE)
#define simde_mm_round_ps(a, rounding) _mm_round_ps((a), (rounding))
#else
#define simde_mm_round_ps(a, rounding) simde_x_mm_round_ps((a), (rounding), 0)
#endif
#if defined(SIMDE_X86_SSE4_1_ENABLE_NATIVE_ALIASES)
#define _mm_round_ps(a, rounding) simde_mm_round_ps((a), (rounding))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_set_ps (simde_float32 e3, simde_float32 e2, simde_float32 e1, simde_float32 e0) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_set_ps(e3, e2, e1, e0);
#else
simde__m128_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_ALIGN_TO_16 simde_float32 data[4] = { e0, e1, e2, e3 };
r_.neon_f32 = vld1q_f32(data);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_make(e0, e1, e2, e3);
#else
r_.f32[0] = e0;
r_.f32[1] = e1;
r_.f32[2] = e2;
r_.f32[3] = e3;
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_set_ps(e3, e2, e1, e0) simde_mm_set_ps(e3, e2, e1, e0)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_set_ps1 (simde_float32 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_set_ps1(a);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vdupq_n_f32(a);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
(void) a;
return vec_splats(a);
#else
return simde_mm_set_ps(a, a, a, a);
#endif
}
#define simde_mm_set1_ps(a) simde_mm_set_ps1(a)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_set_ps1(a) simde_mm_set_ps1(a)
# define _mm_set1_ps(a) simde_mm_set1_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_move_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_move_ss(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 4, 1, 2, 3);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(vgetq_lane_f32(b_.neon_f32, 0), a_.neon_f32, 0);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
static const SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) m = { ~0U, 0U, 0U, 0U };
r_.altivec_f32 = vec_sel(a_.altivec_f32, b_.altivec_f32, m);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_i8x16_shuffle(b_.wasm_v128, a_.wasm_v128, 0, 1, 2, 3, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31);
#else
r_.f32[0] = b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_move_ss(a, b) simde_mm_move_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_broadcastlow_ps(simde__m128 a) {
/* This function broadcasts the first element in the inpu vector to
* all lanes. It is used to avoid generating spurious exceptions in
* *_ss functions since there may be garbage in the upper lanes. */
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_shuffle_ps(a, a, 0);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f32 = vdupq_laneq_f32(a_.neon_f32, 0);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_splat(a_.altivec_f32, 0);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, a_.f32, 0, 0, 0, 0);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = a_.f32[0];
}
#endif
return simde__m128_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_add_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_add_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vaddq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_add(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_add(a_.altivec_f32, b_.altivec_f32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.f32 = a_.f32 + b_.f32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = a_.f32[i] + b_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_add_ps(a, b) simde_mm_add_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_add_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_add_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_ss(a, simde_mm_add_ps(a, b));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_add_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32_t b0 = vgetq_lane_f32(b_.neon_f32, 0);
float32x4_t value = vsetq_lane_f32(b0, vdupq_n_f32(0), 0);
// the upper values in the result must be the remnants of <a>.
r_.neon_f32 = vaddq_f32(a_.neon_f32, value);
#else
r_.f32[0] = a_.f32[0] + b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_add_ss(a, b) simde_mm_add_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_and_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_and_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vandq_s32(a_.neon_i32, b_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_and(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = a_.i32 & b_.i32;
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_and(a_.altivec_f32, b_.altivec_f32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = a_.i32[i] & b_.i32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_and_ps(a, b) simde_mm_and_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_andnot_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_andnot_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vbicq_s32(b_.neon_i32, a_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_andnot(b_.wasm_v128, a_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
r_.altivec_f32 = vec_andc(b_.altivec_f32, a_.altivec_f32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = ~a_.i32 & b_.i32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = ~(a_.i32[i]) & b_.i32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_andnot_ps(a, b) simde_mm_andnot_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_xor_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_xor_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = veorq_s32(a_.neon_i32, b_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_xor(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_i32 = vec_xor(a_.altivec_i32, b_.altivec_i32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32f = a_.i32f ^ b_.i32f;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) {
r_.u32[i] = a_.u32[i] ^ b_.u32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_xor_ps(a, b) simde_mm_xor_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_or_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_or_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vorrq_s32(a_.neon_i32, b_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_or(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_i32 = vec_or(a_.altivec_i32, b_.altivec_i32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32f = a_.i32f | b_.i32f;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) {
r_.u32[i] = a_.u32[i] | b_.u32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_or_ps(a, b) simde_mm_or_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_not_ps(simde__m128 a) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
__m128i ai = _mm_castps_si128(a);
return _mm_castsi128_ps(_mm_ternarylogic_epi32(ai, ai, ai, 0x55));
#elif defined(SIMDE_X86_SSE2_NATIVE)
/* Note: we use ints instead of floats because we don't want cmpeq
* to return false for (NaN, NaN) */
__m128i ai = _mm_castps_si128(a);
return _mm_castsi128_ps(_mm_andnot_si128(ai, _mm_cmpeq_epi32(ai, ai)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vmvnq_s32(a_.neon_i32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_i32 = vec_nor(a_.altivec_i32, a_.altivec_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_not(a_.wasm_v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = ~a_.i32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = ~(a_.i32[i]);
}
#endif
return simde__m128_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_select_ps(simde__m128 a, simde__m128 b, simde__m128 mask) {
/* This function is for when you want to blend two elements together
* according to a mask. It is similar to _mm_blendv_ps, except that
* it is undefined whether the blend is based on the highest bit in
* each lane (like blendv) or just bitwise operations. This allows
* us to implement the function efficiently everywhere.
*
* Basically, you promise that all the lanes in mask are either 0 or
* ~0. */
#if defined(SIMDE_X86_SSE4_1_NATIVE)
return _mm_blendv_ps(a, b, mask);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b),
mask_ = simde__m128_to_private(mask);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vbslq_s32(mask_.neon_u32, b_.neon_i32, a_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_bitselect(b_.wasm_v128, a_.wasm_v128, mask_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r_.altivec_i32 = vec_sel(a_.altivec_i32, b_.altivec_i32, mask_.altivec_u32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = a_.i32 ^ ((a_.i32 ^ b_.i32) & mask_.i32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = a_.i32[i] ^ ((a_.i32[i] ^ b_.i32[i]) & mask_.i32[i]);
}
#endif
return simde__m128_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_avg_pu16 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_avg_pu16(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u16 = vrhadd_u16(b_.neon_u16, a_.neon_u16);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && defined(SIMDE_CONVERT_VECTOR_)
uint32_t wa SIMDE_VECTOR(16);
uint32_t wb SIMDE_VECTOR(16);
uint32_t wr SIMDE_VECTOR(16);
SIMDE_CONVERT_VECTOR_(wa, a_.u16);
SIMDE_CONVERT_VECTOR_(wb, b_.u16);
wr = (wa + wb + 1) >> 1;
SIMDE_CONVERT_VECTOR_(r_.u16, wr);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) {
r_.u16[i] = (a_.u16[i] + b_.u16[i] + 1) >> 1;
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pavgw(a, b) simde_mm_avg_pu16(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_avg_pu16(a, b) simde_mm_avg_pu16(a, b)
# define _m_pavgw(a, b) simde_mm_avg_pu16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_avg_pu8 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_avg_pu8(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u8 = vrhadd_u8(b_.neon_u8, a_.neon_u8);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && defined(SIMDE_CONVERT_VECTOR_)
uint16_t wa SIMDE_VECTOR(16);
uint16_t wb SIMDE_VECTOR(16);
uint16_t wr SIMDE_VECTOR(16);
SIMDE_CONVERT_VECTOR_(wa, a_.u8);
SIMDE_CONVERT_VECTOR_(wb, b_.u8);
wr = (wa + wb + 1) >> 1;
SIMDE_CONVERT_VECTOR_(r_.u8, wr);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
r_.u8[i] = (a_.u8[i] + b_.u8[i] + 1) >> 1;
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pavgb(a, b) simde_mm_avg_pu8(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_avg_pu8(a, b) simde_mm_avg_pu8(a, b)
# define _m_pavgb(a, b) simde_mm_avg_pu8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_abs_ps(simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
simde_float32 mask_;
uint32_t u32_ = UINT32_C(0x7FFFFFFF);
simde_memcpy(&mask_, &u32_, sizeof(u32_));
return _mm_and_ps(_mm_set1_ps(mask_), a);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vabsq_f32(a_.neon_f32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
r_.altivec_f32 = vec_abs(a_.altivec_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_abs(a_.wasm_v128);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_fabsf(a_.f32[i]);
}
#endif
return simde__m128_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpeq_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpeq_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vceqq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_eq(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpeq(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), a_.f32 == b_.f32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] == b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpeq_ps(a, b) simde_mm_cmpeq_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpeq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpeq_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_ss(a, simde_mm_cmpeq_ps(a, b));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpeq_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] == b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpeq_ss(a, b) simde_mm_cmpeq_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpge_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpge_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vcgeq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_ge(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpge(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 >= b_.f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] >= b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpge_ps(a, b) simde_mm_cmpge_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpge_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI)
return _mm_cmpge_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_ss(a, simde_mm_cmpge_ps(a, b));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpge_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] >= b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpge_ss(a, b) simde_mm_cmpge_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpgt_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpgt_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vcgtq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_gt(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpgt(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 > b_.f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] > b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpgt_ps(a, b) simde_mm_cmpgt_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpgt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI)
return _mm_cmpgt_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_ss(a, simde_mm_cmpgt_ps(a, b));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpgt_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] > b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpgt_ss(a, b) simde_mm_cmpgt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmple_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmple_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vcleq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_le(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmple(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 <= b_.f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] <= b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmple_ps(a, b) simde_mm_cmple_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmple_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmple_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_ss(a, simde_mm_cmple_ps(a, b));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmple_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] <= b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmple_ss(a, b) simde_mm_cmple_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmplt_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmplt_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vcltq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_lt(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmplt(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 < b_.f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] < b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmplt_ps(a, b) simde_mm_cmplt_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmplt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmplt_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_ss(a, simde_mm_cmplt_ps(a, b));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmplt_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] < b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmplt_ss(a, b) simde_mm_cmplt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpneq_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpneq_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vmvnq_u32(vceqq_f32(a_.neon_f32, b_.neon_f32));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_ne(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpeq(a_.altivec_f32, b_.altivec_f32));
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_nor(r_.altivec_f32, r_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 != b_.f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] != b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpneq_ps(a, b) simde_mm_cmpneq_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpneq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpneq_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_ss(a, simde_mm_cmpneq_ps(a, b));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpneq_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] != b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpneq_ss(a, b) simde_mm_cmpneq_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnge_ps (simde__m128 a, simde__m128 b) {
return simde_mm_cmplt_ps(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnge_ps(a, b) simde_mm_cmpnge_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnge_ss (simde__m128 a, simde__m128 b) {
return simde_mm_cmplt_ss(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnge_ss(a, b) simde_mm_cmpnge_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpngt_ps (simde__m128 a, simde__m128 b) {
return simde_mm_cmple_ps(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpngt_ps(a, b) simde_mm_cmpngt_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpngt_ss (simde__m128 a, simde__m128 b) {
return simde_mm_cmple_ss(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpngt_ss(a, b) simde_mm_cmpngt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnle_ps (simde__m128 a, simde__m128 b) {
return simde_mm_cmpgt_ps(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnle_ps(a, b) simde_mm_cmpnle_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnle_ss (simde__m128 a, simde__m128 b) {
return simde_mm_cmpgt_ss(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnle_ss(a, b) simde_mm_cmpnle_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnlt_ps (simde__m128 a, simde__m128 b) {
return simde_mm_cmpge_ps(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnlt_ps(a, b) simde_mm_cmpnlt_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnlt_ss (simde__m128 a, simde__m128 b) {
return simde_mm_cmpge_ss(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnlt_ss(a, b) simde_mm_cmpnlt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpord_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpord_ps(a, b);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
return wasm_v128_and(wasm_f32x4_eq(a, a), wasm_f32x4_eq(b, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
/* Note: NEON does not have ordered compare builtin
Need to compare a eq a and b eq b to check for NaN
Do AND of results to get final */
uint32x4_t ceqaa = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t ceqbb = vceqq_f32(b_.neon_f32, b_.neon_f32);
r_.neon_u32 = vandq_u32(ceqaa, ceqbb);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_and(wasm_f32x4_eq(a_.wasm_v128, a_.wasm_v128), wasm_f32x4_eq(b_.wasm_v128, b_.wasm_v128));
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float),
vec_and(vec_cmpeq(a_.altivec_f32, a_.altivec_f32), vec_cmpeq(b_.altivec_f32, b_.altivec_f32)));
#elif defined(simde_math_isnanf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (simde_math_isnanf(a_.f32[i]) || simde_math_isnanf(b_.f32[i])) ? UINT32_C(0) : ~UINT32_C(0);
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpord_ps(a, b) simde_mm_cmpord_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpunord_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpunord_ps(a, b);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
return wasm_v128_or(wasm_f32x4_ne(a, a), wasm_f32x4_ne(b, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t ceqaa = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t ceqbb = vceqq_f32(b_.neon_f32, b_.neon_f32);
r_.neon_u32 = vmvnq_u32(vandq_u32(ceqaa, ceqbb));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_or(wasm_f32x4_ne(a_.wasm_v128, a_.wasm_v128), wasm_f32x4_ne(b_.wasm_v128, b_.wasm_v128));
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float),
vec_nand(vec_cmpeq(a_.altivec_f32, a_.altivec_f32), vec_cmpeq(b_.altivec_f32, b_.altivec_f32)));
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float),
vec_and(vec_cmpeq(a_.altivec_f32, a_.altivec_f32), vec_cmpeq(b_.altivec_f32, b_.altivec_f32)));
r_.altivec_f32 = vec_nor(r_.altivec_f32, r_.altivec_f32);
#elif defined(simde_math_isnanf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (simde_math_isnanf(a_.f32[i]) || simde_math_isnanf(b_.f32[i])) ? ~UINT32_C(0) : UINT32_C(0);
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpunord_ps(a, b) simde_mm_cmpunord_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpunord_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI)
return _mm_cmpunord_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_ss(a, simde_mm_cmpunord_ps(a, b));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpunord_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(simde_math_isnanf)
r_.u32[0] = (simde_math_isnanf(a_.f32[0]) || simde_math_isnanf(b_.f32[0])) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpunord_ss(a, b) simde_mm_cmpunord_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comieq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comieq_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_eq_b = vceqq_f32(a_.neon_f32, b_.neon_f32);
return !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_eq_b), 0) != 0);
#else
return a_.f32[0] == b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comieq_ss(a, b) simde_mm_comieq_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comige_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comige_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_ge_b = vcgeq_f32(a_.neon_f32, b_.neon_f32);
return !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_ge_b), 0) != 0);
#else
return a_.f32[0] >= b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comige_ss(a, b) simde_mm_comige_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comigt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comigt_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_gt_b = vcgtq_f32(a_.neon_f32, b_.neon_f32);
return !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_gt_b), 0) != 0);
#else
return a_.f32[0] > b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comigt_ss(a, b) simde_mm_comigt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comile_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comile_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_le_b = vcleq_f32(a_.neon_f32, b_.neon_f32);
return !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_le_b), 0) != 0);
#else
return a_.f32[0] <= b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comile_ss(a, b) simde_mm_comile_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comilt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comilt_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_lt_b = vcltq_f32(a_.neon_f32, b_.neon_f32);
return !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_lt_b), 0) != 0);
#else
return a_.f32[0] < b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comilt_ss(a, b) simde_mm_comilt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comineq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comineq_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_neq_b = vmvnq_u32(vceqq_f32(a_.neon_f32, b_.neon_f32));
return !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_neq_b), 0) != 0);
#else
return a_.f32[0] != b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comineq_ss(a, b) simde_mm_comineq_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_copysign_ps(simde__m128 dest, simde__m128 src) {
simde__m128_private
r_,
dest_ = simde__m128_to_private(dest),
src_ = simde__m128_to_private(src);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
const uint32x4_t sign_pos = vreinterpretq_u32_f32(vdupq_n_f32(-SIMDE_FLOAT32_C(0.0)));
r_.neon_u32 = vbslq_u32(sign_pos, src_.neon_u32, dest_.neon_u32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
const v128_t sign_pos = wasm_f32x4_splat(-0.0f);
r_.wasm_v128 = wasm_v128_bitselect(src_.wasm_v128, dest_.wasm_v128, sign_pos);
#elif defined(SIMDE_POWER_ALTIVEC_P9_NATIVE)
#if !defined(HEDLEY_IBM_VERSION)
r_.altivec_f32 = vec_cpsgn(dest_.altivec_f32, src_.altivec_f32);
#else
r_.altivec_f32 = vec_cpsgn(src_.altivec_f32, dest_.altivec_f32);
#endif
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
const SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) sign_pos = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), vec_splats(-0.0f));
r_.altivec_f32 = vec_sel(dest_.altivec_f32, src_.altivec_f32, sign_pos);
#elif defined(SIMDE_IEEE754_STORAGE)
(void) src_;
(void) dest_;
simde__m128 sign_pos = simde_mm_set1_ps(-0.0f);
r_ = simde__m128_to_private(simde_mm_xor_ps(dest, simde_mm_and_ps(simde_mm_xor_ps(dest, src), sign_pos)));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_copysignf(dest_.f32[i], src_.f32[i]);
}
#endif
return simde__m128_from_private(r_);
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_xorsign_ps(simde__m128 dest, simde__m128 src) {
return simde_mm_xor_ps(simde_mm_and_ps(simde_mm_set1_ps(-0.0f), src), dest);
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvt_pi2ps (simde__m128 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvt_pi2ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
simde__m64_private b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcombine_f32(vcvt_f32_s32(b_.neon_i32), vget_high_f32(a_.neon_f32));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.m64_private[0].f32, b_.i32);
r_.m64_private[1] = a_.m64_private[1];
#else
r_.f32[0] = (simde_float32) b_.i32[0];
r_.f32[1] = (simde_float32) b_.i32[1];
r_.i32[2] = a_.i32[2];
r_.i32[3] = a_.i32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvt_pi2ps(a, b) simde_mm_cvt_pi2ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cvt_ps2pi (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvt_ps2pi(a);
#else
simde__m64_private r_;
simde__m128_private a_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
a_ = simde__m128_to_private(simde_mm_round_ps(a, SIMDE_MM_FROUND_CUR_DIRECTION));
r_.neon_i32 = vcvt_s32_f32(vget_low_f32(a_.neon_f32));
#elif defined(SIMDE_CONVERT_VECTOR_) && SIMDE_NATURAL_VECTOR_SIZE_GE(128)
a_ = simde__m128_to_private(simde_mm_round_ps(a, SIMDE_MM_FROUND_CUR_DIRECTION));
SIMDE_CONVERT_VECTOR_(r_.i32, a_.m64_private[0].f32);
#else
a_ = simde__m128_to_private(a);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = HEDLEY_STATIC_CAST(int32_t, simde_math_nearbyintf(a_.f32[i]));
}
#endif
return simde__m64_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvt_ps2pi(a) simde_mm_cvt_ps2pi((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvt_si2ss (simde__m128 a, int32_t b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cvt_si2ss(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(HEDLEY_STATIC_CAST(float, b), a_.neon_f32, 0);
#else
r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, b);
r_.i32[1] = a_.i32[1];
r_.i32[2] = a_.i32[2];
r_.i32[3] = a_.i32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvt_si2ss(a, b) simde_mm_cvt_si2ss((a), b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
int32_t
simde_mm_cvt_ss2si (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cvt_ss2si(a);
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && defined(SIMDE_FAST_CONVERSION_RANGE) && !defined(SIMDE_BUG_GCC_95399)
return vgetq_lane_s32(vcvtnq_s32_f32(simde__m128_to_neon_f32(a)), 0);
#else
simde__m128_private a_ = simde__m128_to_private(simde_mm_round_ps(a, SIMDE_MM_FROUND_CUR_DIRECTION));
#if !defined(SIMDE_FAST_CONVERSION_RANGE)
return ((a_.f32[0] > HEDLEY_STATIC_CAST(simde_float32, INT32_MIN)) &&
(a_.f32[0] < HEDLEY_STATIC_CAST(simde_float32, INT32_MAX))) ?
SIMDE_CONVERT_FTOI(int32_t, a_.f32[0]) : INT32_MIN;
#else
return SIMDE_CONVERT_FTOI(int32_t, a_.f32[0]);
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvt_ss2si(a) simde_mm_cvt_ss2si((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpi16_ps (simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpi16_ps(a);
#else
simde__m128_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcvtq_f32_s32(vmovl_s16(a_.neon_i16));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.f32, a_.i16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
simde_float32 v = a_.i16[i];
r_.f32[i] = v;
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpi16_ps(a) simde_mm_cvtpi16_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpi32_ps (simde__m128 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpi32_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
simde__m64_private b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcombine_f32(vcvt_f32_s32(b_.neon_i32), vget_high_f32(a_.neon_f32));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.m64_private[0].f32, b_.i32);
r_.m64_private[1] = a_.m64_private[1];
#else
r_.f32[0] = (simde_float32) b_.i32[0];
r_.f32[1] = (simde_float32) b_.i32[1];
r_.i32[2] = a_.i32[2];
r_.i32[3] = a_.i32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpi32_ps(a, b) simde_mm_cvtpi32_ps((a), b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpi32x2_ps (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpi32x2_ps(a, b);
#else
simde__m128_private r_;
simde__m64_private
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcvtq_f32_s32(vcombine_s32(a_.neon_i32, b_.neon_i32));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.m64_private[0].f32, a_.i32);
SIMDE_CONVERT_VECTOR_(r_.m64_private[1].f32, b_.i32);
#else
r_.f32[0] = (simde_float32) a_.i32[0];
r_.f32[1] = (simde_float32) a_.i32[1];
r_.f32[2] = (simde_float32) b_.i32[0];
r_.f32[3] = (simde_float32) b_.i32[1];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpi32x2_ps(a, b) simde_mm_cvtpi32x2_ps(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpi8_ps (simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpi8_ps(a);
#else
simde__m128_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcvtq_f32_s32(vmovl_s16(vget_low_s16(vmovl_s8(a_.neon_i8))));
#else
r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[0]);
r_.f32[1] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[1]);
r_.f32[2] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[2]);
r_.f32[3] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[3]);
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpi8_ps(a) simde_mm_cvtpi8_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cvtps_pi16 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtps_pi16(a);
#else
simde__m64_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95399)
r_.neon_i16 = vmovn_s32(vcvtq_s32_f32(vrndiq_f32(a_.neon_f32)));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = SIMDE_CONVERT_FTOI(int16_t, simde_math_roundf(a_.f32[i]));
}
#endif
return simde__m64_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtps_pi16(a) simde_mm_cvtps_pi16((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cvtps_pi32 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtps_pi32(a);
#else
simde__m64_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && defined(SIMDE_FAST_CONVERSION_RANGE) && !defined(SIMDE_BUG_GCC_95399)
r_.neon_i32 = vcvt_s32_f32(vget_low_f32(vrndiq_f32(a_.neon_f32)));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
simde_float32 v = simde_math_roundf(a_.f32[i]);
#if !defined(SIMDE_FAST_CONVERSION_RANGE)
r_.i32[i] = ((v > HEDLEY_STATIC_CAST(simde_float32, INT32_MIN)) && (v < HEDLEY_STATIC_CAST(simde_float32, INT32_MAX))) ?
SIMDE_CONVERT_FTOI(int32_t, v) : INT32_MIN;
#else
r_.i32[i] = SIMDE_CONVERT_FTOI(int32_t, v);
#endif
}
#endif
return simde__m64_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtps_pi32(a) simde_mm_cvtps_pi32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cvtps_pi8 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtps_pi8(a);
#else
simde__m64_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95471)
/* Clamp the input to [INT8_MIN, INT8_MAX], round, convert to i32, narrow to
* i16, combine with an all-zero vector of i16 (which will become the upper
* half), narrow to i8. */
float32x4_t max = vdupq_n_f32(HEDLEY_STATIC_CAST(simde_float32, INT8_MAX));
float32x4_t min = vdupq_n_f32(HEDLEY_STATIC_CAST(simde_float32, INT8_MIN));
float32x4_t values = vrndnq_f32(vmaxq_f32(vminq_f32(max, a_.neon_f32), min));
r_.neon_i8 = vmovn_s16(vcombine_s16(vmovn_s32(vcvtq_s32_f32(values)), vdup_n_s16(0)));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.f32) / sizeof(a_.f32[0])) ; i++) {
if (a_.f32[i] > HEDLEY_STATIC_CAST(simde_float32, INT8_MAX))
r_.i8[i] = INT8_MAX;
else if (a_.f32[i] < HEDLEY_STATIC_CAST(simde_float32, INT8_MIN))
r_.i8[i] = INT8_MIN;
else
r_.i8[i] = SIMDE_CONVERT_FTOI(int8_t, simde_math_roundf(a_.f32[i]));
}
/* Note: the upper half is undefined */
#endif
return simde__m64_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtps_pi8(a) simde_mm_cvtps_pi8((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpu16_ps (simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpu16_ps(a);
#else
simde__m128_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcvtq_f32_u32(vmovl_u16(a_.neon_u16));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.f32, a_.u16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = (simde_float32) a_.u16[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpu16_ps(a) simde_mm_cvtpu16_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpu8_ps (simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpu8_ps(a);
#else
simde__m128_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcvtq_f32_u32(vmovl_u16(vget_low_u16(vmovl_u8(a_.neon_u8))));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = HEDLEY_STATIC_CAST(simde_float32, a_.u8[i]);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpu8_ps(a) simde_mm_cvtpu8_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtsi32_ss (simde__m128 a, int32_t b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cvtsi32_ss(a, b);
#else
simde__m128_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(HEDLEY_STATIC_CAST(float32_t, b), a_.neon_f32, 0);
#else
r_ = a_;
r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, b);
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtsi32_ss(a, b) simde_mm_cvtsi32_ss((a), b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtsi64_ss (simde__m128 a, int64_t b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_ARCH_AMD64)
#if !defined(__PGI)
return _mm_cvtsi64_ss(a, b);
#else
return _mm_cvtsi64x_ss(a, b);
#endif
#else
simde__m128_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(HEDLEY_STATIC_CAST(float32_t, b), a_.neon_f32, 0);
#else
r_ = a_;
r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, b);
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) || (defined(SIMDE_ENABLE_NATIVE_ALIASES) && !defined(SIMDE_ARCH_AMD64))
# define _mm_cvtsi64_ss(a, b) simde_mm_cvtsi64_ss((a), b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32
simde_mm_cvtss_f32 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cvtss_f32(a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vgetq_lane_f32(a_.neon_f32, 0);
#else
return a_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtss_f32(a) simde_mm_cvtss_f32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int32_t
simde_mm_cvtss_si32 (simde__m128 a) {
return simde_mm_cvt_ss2si(a);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtss_si32(a) simde_mm_cvtss_si32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int64_t
simde_mm_cvtss_si64 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_ARCH_AMD64)
#if !defined(__PGI)
return _mm_cvtss_si64(a);
#else
return _mm_cvtss_si64x(a);
#endif
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return SIMDE_CONVERT_FTOI(int64_t, simde_math_roundf(vgetq_lane_f32(a_.neon_f32, 0)));
#else
return SIMDE_CONVERT_FTOI(int64_t, simde_math_roundf(a_.f32[0]));
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) || (defined(SIMDE_ENABLE_NATIVE_ALIASES) && !defined(SIMDE_ARCH_AMD64))
# define _mm_cvtss_si64(a) simde_mm_cvtss_si64((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cvtt_ps2pi (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtt_ps2pi(a);
#else
simde__m64_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(SIMDE_FAST_CONVERSION_RANGE)
r_.neon_i32 = vcvt_s32_f32(vget_low_f32(a_.neon_f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
simde_float32 v = a_.f32[i];
#if !defined(SIMDE_FAST_CONVERSION_RANGE)
r_.i32[i] = ((v > HEDLEY_STATIC_CAST(simde_float32, INT32_MIN)) && (v < HEDLEY_STATIC_CAST(simde_float32, INT32_MAX))) ?
SIMDE_CONVERT_FTOI(int32_t, v) : INT32_MIN;
#else
r_.i32[i] = SIMDE_CONVERT_FTOI(int32_t, v);
#endif
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_mm_cvttps_pi32(a) simde_mm_cvtt_ps2pi(a)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtt_ps2pi(a) simde_mm_cvtt_ps2pi((a))
# define _mm_cvttps_pi32(a) simde_mm_cvttps_pi32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int32_t
simde_mm_cvtt_ss2si (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cvtt_ss2si(a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(SIMDE_FAST_CONVERSION_RANGE)
return SIMDE_CONVERT_FTOI(int32_t, vgetq_lane_f32(a_.neon_f32, 0));
#else
simde_float32 v = a_.f32[0];
#if !defined(SIMDE_FAST_CONVERSION_RANGE)
return ((v > HEDLEY_STATIC_CAST(simde_float32, INT32_MIN)) && (v < HEDLEY_STATIC_CAST(simde_float32, INT32_MAX))) ?
SIMDE_CONVERT_FTOI(int32_t, v) : INT32_MIN;
#else
return SIMDE_CONVERT_FTOI(int32_t, v);
#endif
#endif
#endif
}
#define simde_mm_cvttss_si32(a) simde_mm_cvtt_ss2si((a))
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtt_ss2si(a) simde_mm_cvtt_ss2si((a))
# define _mm_cvttss_si32(a) simde_mm_cvtt_ss2si((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int64_t
simde_mm_cvttss_si64 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_ARCH_AMD64) && !defined(_MSC_VER)
#if defined(__PGI)
return _mm_cvttss_si64x(a);
#else
return _mm_cvttss_si64(a);
#endif
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return SIMDE_CONVERT_FTOI(int64_t, vgetq_lane_f32(a_.neon_f32, 0));
#else
return SIMDE_CONVERT_FTOI(int64_t, a_.f32[0]);
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) || (defined(SIMDE_ENABLE_NATIVE_ALIASES) && !defined(SIMDE_ARCH_AMD64))
# define _mm_cvttss_si64(a) simde_mm_cvttss_si64((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpord_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpord_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_ss(a, simde_mm_cmpord_ps(a, b));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpord_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(simde_math_isnanf)
r_.u32[0] = (simde_math_isnanf(simde_mm_cvtss_f32(a)) || simde_math_isnanf(simde_mm_cvtss_f32(b))) ? UINT32_C(0) : ~UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpord_ss(a, b) simde_mm_cmpord_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_div_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_div_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f32 = vdivq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x4_t recip0 = vrecpeq_f32(b_.neon_f32);
float32x4_t recip1 = vmulq_f32(recip0, vrecpsq_f32(recip0, b_.neon_f32));
r_.neon_f32 = vmulq_f32(a_.neon_f32, recip1);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_div(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
r_.altivec_f32 = vec_div(a_.altivec_f32, b_.altivec_f32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.f32 = a_.f32 / b_.f32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = a_.f32[i] / b_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_div_ps(a, b) simde_mm_div_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_div_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_div_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_ss(a, simde_mm_div_ps(a, b));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_div_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32_t value =
vgetq_lane_f32(simde__m128_to_private(simde_mm_div_ps(a, b)).neon_f32, 0);
r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0);
#else
r_.f32[0] = a_.f32[0] / b_.f32[0];
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = a_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_div_ss(a, b) simde_mm_div_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int16_t
simde_mm_extract_pi16 (simde__m64 a, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 3) {
simde__m64_private a_ = simde__m64_to_private(a);
return a_.i16[imm8];
}
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(HEDLEY_PGI_VERSION)
# if defined(SIMDE_BUG_CLANG_44589)
# define simde_mm_extract_pi16(a, imm8) ( \
HEDLEY_DIAGNOSTIC_PUSH \
_Pragma("clang diagnostic ignored \"-Wvector-conversion\"") \
HEDLEY_STATIC_CAST(int16_t, _mm_extract_pi16((a), (imm8))) \
HEDLEY_DIAGNOSTIC_POP \
)
# else
# define simde_mm_extract_pi16(a, imm8) HEDLEY_STATIC_CAST(int16_t, _mm_extract_pi16(a, imm8))
# endif
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
# define simde_mm_extract_pi16(a, imm8) vget_lane_s16(simde__m64_to_private(a).neon_i16, imm8)
#endif
#define simde_m_pextrw(a, imm8) simde_mm_extract_pi16(a, imm8)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_extract_pi16(a, imm8) simde_mm_extract_pi16((a), (imm8))
# define _m_pextrw(a, imm8) simde_mm_extract_pi16((a), (imm8))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_insert_pi16 (simde__m64 a, int16_t i, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 3) {
simde__m64_private
r_,
a_ = simde__m64_to_private(a);
r_.i64[0] = a_.i64[0];
r_.i16[imm8] = i;
return simde__m64_from_private(r_);
}
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(__PGI)
# if defined(SIMDE_BUG_CLANG_44589)
# define ssimde_mm_insert_pi16(a, i, imm8) ( \
HEDLEY_DIAGNOSTIC_PUSH \
_Pragma("clang diagnostic ignored \"-Wvector-conversion\"") \
(_mm_insert_pi16((a), (i), (imm8))) \
HEDLEY_DIAGNOSTIC_POP \
)
# else
# define simde_mm_insert_pi16(a, i, imm8) _mm_insert_pi16(a, i, imm8)
# endif
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
# define simde_mm_insert_pi16(a, i, imm8) simde__m64_from_neon_i16(vset_lane_s16((i), simde__m64_to_neon_i16(a), (imm8)))
#endif
#define simde_m_pinsrw(a, i, imm8) (simde_mm_insert_pi16(a, i, imm8))
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_insert_pi16(a, i, imm8) simde_mm_insert_pi16(a, i, imm8)
# define _m_pinsrw(a, i, imm8) simde_mm_insert_pi16(a, i, imm8)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_load_ps (simde_float32 const mem_addr[HEDLEY_ARRAY_PARAM(4)]) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_load_ps(mem_addr);
#else
simde__m128_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vld1q_f32(mem_addr);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
r_.altivec_f32 = vec_vsx_ld(0, mem_addr);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_ld(0, mem_addr);
#else
simde_memcpy(&r_, SIMDE_ALIGN_ASSUME_LIKE(mem_addr, simde__m128), sizeof(r_));
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_load_ps(mem_addr) simde_mm_load_ps(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_load1_ps (simde_float32 const* mem_addr) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_load_ps1(mem_addr);
#else
simde__m128_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vld1q_dup_f32(mem_addr);
#else
r_ = simde__m128_to_private(simde_mm_set1_ps(*mem_addr));
#endif
return simde__m128_from_private(r_);
#endif
}
#define simde_mm_load_ps1(mem_addr) simde_mm_load1_ps(mem_addr)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_load_ps1(mem_addr) simde_mm_load1_ps(mem_addr)
# define _mm_load1_ps(mem_addr) simde_mm_load1_ps(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_load_ss (simde_float32 const* mem_addr) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_load_ss(mem_addr);
#else
simde__m128_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(*mem_addr, vdupq_n_f32(0), 0);
#else
r_.f32[0] = *mem_addr;
r_.i32[1] = 0;
r_.i32[2] = 0;
r_.i32[3] = 0;
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_load_ss(mem_addr) simde_mm_load_ss(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_loadh_pi (simde__m128 a, simde__m64 const* mem_addr) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_loadh_pi(a, HEDLEY_REINTERPRET_CAST(__m64 const*, mem_addr));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcombine_f32(vget_low_f32(a_.neon_f32), vld1_f32(HEDLEY_REINTERPRET_CAST(const float32_t*, mem_addr)));
#else
simde__m64_private b_ = *HEDLEY_REINTERPRET_CAST(simde__m64_private const*, mem_addr);
r_.f32[0] = a_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = b_.f32[0];
r_.f32[3] = b_.f32[1];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#if HEDLEY_HAS_WARNING("-Wold-style-cast")
#define _mm_loadh_pi(a, mem_addr) simde_mm_loadh_pi((a), HEDLEY_REINTERPRET_CAST(simde__m64 const*, (mem_addr)))
#else
#define _mm_loadh_pi(a, mem_addr) simde_mm_loadh_pi((a), (simde__m64 const*) (mem_addr))
#endif
#endif
/* The SSE documentation says that there are no alignment requirements
for mem_addr. Unfortunately they used the __m64 type for the argument
which is supposed to be 8-byte aligned, so some compilers (like clang
with -Wcast-align) will generate a warning if you try to cast, say,
a simde_float32* to a simde__m64* for this function.
I think the choice of argument type is unfortunate, but I do think we
need to stick to it here. If there is demand I can always add something
like simde_x_mm_loadl_f32(simde__m128, simde_float32 mem_addr[2]) */
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_loadl_pi (simde__m128 a, simde__m64 const* mem_addr) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_loadl_pi(a, HEDLEY_REINTERPRET_CAST(__m64 const*, mem_addr));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcombine_f32(vld1_f32(
HEDLEY_REINTERPRET_CAST(const float32_t*, mem_addr)), vget_high_f32(a_.neon_f32));
#else
simde__m64_private b_;
simde_memcpy(&b_, mem_addr, sizeof(b_));
r_.i32[0] = b_.i32[0];
r_.i32[1] = b_.i32[1];
r_.i32[2] = a_.i32[2];
r_.i32[3] = a_.i32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#if HEDLEY_HAS_WARNING("-Wold-style-cast")
#define _mm_loadl_pi(a, mem_addr) simde_mm_loadl_pi((a), HEDLEY_REINTERPRET_CAST(simde__m64 const*, (mem_addr)))
#else
#define _mm_loadl_pi(a, mem_addr) simde_mm_loadl_pi((a), (simde__m64 const*) (mem_addr))
#endif
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_loadr_ps (simde_float32 const mem_addr[HEDLEY_ARRAY_PARAM(4)]) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_loadr_ps(mem_addr);
#else
simde__m128_private
r_,
v_ = simde__m128_to_private(simde_mm_load_ps(mem_addr));
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vrev64q_f32(v_.neon_f32);
r_.neon_f32 = vextq_f32(r_.neon_f32, r_.neon_f32, 2);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) && defined(__PPC64__)
r_.altivec_f32 = vec_reve(v_.altivec_f32);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, v_.f32, v_.f32, 3, 2, 1, 0);
#else
r_.f32[0] = v_.f32[3];
r_.f32[1] = v_.f32[2];
r_.f32[2] = v_.f32[1];
r_.f32[3] = v_.f32[0];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_loadr_ps(mem_addr) simde_mm_loadr_ps(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_loadu_ps (simde_float32 const mem_addr[HEDLEY_ARRAY_PARAM(4)]) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_loadu_ps(mem_addr);
#else
simde__m128_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vld1q_f32(HEDLEY_REINTERPRET_CAST(const float32_t*, mem_addr));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_load(mem_addr);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) && defined(__PPC64__)
r_.altivec_f32 = vec_vsx_ld(0, mem_addr);
#else
simde_memcpy(&r_, mem_addr, sizeof(r_));
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_loadu_ps(mem_addr) simde_mm_loadu_ps(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_maskmove_si64 (simde__m64 a, simde__m64 mask, int8_t* mem_addr) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
_mm_maskmove_si64(a, mask, HEDLEY_REINTERPRET_CAST(char*, mem_addr));
#else
simde__m64_private
a_ = simde__m64_to_private(a),
mask_ = simde__m64_to_private(mask);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.i8) / sizeof(a_.i8[0])) ; i++)
if (mask_.i8[i] < 0)
mem_addr[i] = a_.i8[i];
#endif
}
#define simde_m_maskmovq(a, mask, mem_addr) simde_mm_maskmove_si64(a, mask, mem_addr)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_maskmove_si64(a, mask, mem_addr) simde_mm_maskmove_si64((a), (mask), SIMDE_CHECKED_REINTERPRET_CAST(int8_t*, char*, (mem_addr)))
# define _m_maskmovq(a, mask, mem_addr) simde_mm_maskmove_si64((a), (mask), SIMDE_CHECKED_REINTERPRET_CAST(int8_t*, char*, (mem_addr)))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_max_pi16 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_max_pi16(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i16 = vmax_s16(a_.neon_i16, b_.neon_i16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = (a_.i16[i] > b_.i16[i]) ? a_.i16[i] : b_.i16[i];
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pmaxsw(a, b) simde_mm_max_pi16(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_max_pi16(a, b) simde_mm_max_pi16(a, b)
# define _m_pmaxsw(a, b) simde_mm_max_pi16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_max_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_max_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(SIMDE_FAST_NANS)
r_.neon_f32 = vmaxq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vbslq_f32(vcgtq_f32(a_.neon_f32, b_.neon_f32), a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE) && defined(SIMDE_FAST_NANS)
r_.wasm_v128 = wasm_f32x4_max(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_bitselect(a_.wasm_v128, b_.wasm_v128, wasm_f32x4_gt(a_.wasm_v128, b_.wasm_v128));
#elif (defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)) && defined(SIMDE_FAST_NANS)
r_.altivec_f32 = vec_max(a_.altivec_f32, b_.altivec_f32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
r_.altivec_f32 = vec_sel(b_.altivec_f32, a_.altivec_f32, vec_cmpgt(a_.altivec_f32, b_.altivec_f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = (a_.f32[i] > b_.f32[i]) ? a_.f32[i] : b_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_max_ps(a, b) simde_mm_max_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_max_pu8 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_max_pu8(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u8 = vmax_u8(a_.neon_u8, b_.neon_u8);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
r_.u8[i] = (a_.u8[i] > b_.u8[i]) ? a_.u8[i] : b_.u8[i];
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pmaxub(a, b) simde_mm_max_pu8(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_max_pu8(a, b) simde_mm_max_pu8(a, b)
# define _m_pmaxub(a, b) simde_mm_max_pu8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_max_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_max_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_ss(a, simde_mm_max_ps(a, b));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_max_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32_t value = vgetq_lane_f32(maxq_f32(a_.neon_f32, b_.neon_f32), 0);
r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0);
#else
r_.f32[0] = (a_.f32[0] > b_.f32[0]) ? a_.f32[0] : b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_max_ss(a, b) simde_mm_max_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_min_pi16 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_min_pi16(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i16 = vmin_s16(a_.neon_i16, b_.neon_i16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = (a_.i16[i] < b_.i16[i]) ? a_.i16[i] : b_.i16[i];
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pminsw(a, b) simde_mm_min_pi16(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_min_pi16(a, b) simde_mm_min_pi16(a, b)
# define _m_pminsw(a, b) simde_mm_min_pi16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_min_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_min_ps(a, b);
#elif defined(SIMDE_FAST_NANS) && defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return simde__m128_from_neon_f32(vminq_f32(simde__m128_to_neon_f32(a), simde__m128_to_neon_f32(b)));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_FAST_NANS)
r_.wasm_v128 = wasm_f32x4_min(a_.wasm_v128, b_.wasm_v128);
#else
r_.wasm_v128 = wasm_v128_bitselect(a_.wasm_v128, b_.wasm_v128, wasm_f32x4_lt(a_.wasm_v128, b_.wasm_v128));
#endif
return simde__m128_from_private(r_);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_FAST_NANS)
r_.altivec_f32 = vec_min(a_.altivec_f32, b_.altivec_f32);
#else
r_.altivec_f32 = vec_sel(b_.altivec_f32, a_.altivec_f32, vec_cmpgt(b_.altivec_f32, a_.altivec_f32));
#endif
return simde__m128_from_private(r_);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
simde__m128 mask = simde_mm_cmplt_ps(a, b);
return simde_mm_or_ps(simde_mm_and_ps(mask, a), simde_mm_andnot_ps(mask, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = (a_.f32[i] < b_.f32[i]) ? a_.f32[i] : b_.f32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_min_ps(a, b) simde_mm_min_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_min_pu8 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_min_pu8(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u8 = vmin_u8(a_.neon_u8, b_.neon_u8);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
r_.u8[i] = (a_.u8[i] < b_.u8[i]) ? a_.u8[i] : b_.u8[i];
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pminub(a, b) simde_mm_min_pu8(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_min_pu8(a, b) simde_mm_min_pu8(a, b)
# define _m_pminub(a, b) simde_mm_min_pu8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_min_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_min_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_ss(a, simde_mm_min_ps(a, b));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_min_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32_t value = vgetq_lane_f32(vminq_f32(a_.neon_f32, b_.neon_f32), 0);
r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0);
#else
r_.f32[0] = (a_.f32[0] < b_.f32[0]) ? a_.f32[0] : b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_min_ss(a, b) simde_mm_min_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_movehl_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_movehl_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x2_t a32 = vget_high_f32(a_.neon_f32);
float32x2_t b32 = vget_high_f32(b_.neon_f32);
r_.neon_f32 = vcombine_f32(b32, a32);
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float),
vec_mergel(b_.altivec_i64, a_.altivec_i64));
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 6, 7, 2, 3);
#else
r_.f32[0] = b_.f32[2];
r_.f32[1] = b_.f32[3];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_movehl_ps(a, b) simde_mm_movehl_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_movelh_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_movelh_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 0, 1, 4, 5);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x2_t a10 = vget_low_f32(a_.neon_f32);
float32x2_t b10 = vget_low_f32(b_.neon_f32);
r_.neon_f32 = vcombine_f32(a10, b10);
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float),
vec_mergeh(a_.altivec_i64, b_.altivec_i64));
#else
r_.f32[0] = a_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = b_.f32[0];
r_.f32[3] = b_.f32[1];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_movelh_ps(a, b) simde_mm_movelh_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_movemask_pi8 (simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_movemask_pi8(a);
#else
simde__m64_private a_ = simde__m64_to_private(a);
int r = 0;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
uint8x8_t input = a_.neon_u8;
const int8_t xr[8] = {-7, -6, -5, -4, -3, -2, -1, 0};
const uint8x8_t mask_and = vdup_n_u8(0x80);
const int8x8_t mask_shift = vld1_s8(xr);
const uint8x8_t mask_result = vshl_u8(vand_u8(input, mask_and), mask_shift);
uint8x8_t lo = mask_result;
r = vaddv_u8(lo);
#else
const size_t nmemb = sizeof(a_.i8) / sizeof(a_.i8[0]);
SIMDE_VECTORIZE_REDUCTION(|:r)
for (size_t i = 0 ; i < nmemb ; i++) {
r |= (a_.u8[nmemb - 1 - i] >> 7) << (nmemb - 1 - i);
}
#endif
return r;
#endif
}
#define simde_m_pmovmskb(a) simde_mm_movemask_pi8(a)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_movemask_pi8(a) simde_mm_movemask_pi8(a)
# define _m_pmovmskb(a) simde_mm_movemask_pi8(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_movemask_ps (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_movemask_ps(a);
#else
int r = 0;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
static const int32_t shift_amount[] = { 0, 1, 2, 3 };
const int32x4_t shift = vld1q_s32(shift_amount);
uint32x4_t tmp = vshrq_n_u32(a_.neon_u32, 31);
return HEDLEY_STATIC_CAST(int, vaddvq_u32(vshlq_u32(tmp, shift)));
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
// Shift out everything but the sign bits with a 32-bit unsigned shift right.
uint64x2_t high_bits = vreinterpretq_u64_u32(vshrq_n_u32(a_.neon_u32, 31));
// Merge the two pairs together with a 64-bit unsigned shift right + add.
uint8x16_t paired = vreinterpretq_u8_u64(vsraq_n_u64(high_bits, high_bits, 31));
// Extract the result.
return vgetq_lane_u8(paired, 0) | (vgetq_lane_u8(paired, 8) << 2);
#else
SIMDE_VECTORIZE_REDUCTION(|:r)
for (size_t i = 0 ; i < sizeof(a_.u32) / sizeof(a_.u32[0]) ; i++) {
r |= (a_.u32[i] >> ((sizeof(a_.u32[i]) * CHAR_BIT) - 1)) << i;
}
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_movemask_ps(a) simde_mm_movemask_ps((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_mul_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_mul_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vmulq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_mul(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.f32 = a_.f32 * b_.f32;
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
r_.altivec_f32 = vec_mul(a_.altivec_f32, b_.altivec_f32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = a_.f32[i] * b_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_mul_ps(a, b) simde_mm_mul_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_mul_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_mul_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_ss(a, simde_mm_mul_ps(a, b));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_mul_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.f32[0] = a_.f32[0] * b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_mul_ss(a, b) simde_mm_mul_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_mulhi_pu16 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_mulhi_pu16(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
const uint32x4_t t1 = vmull_u16(a_.neon_u16, b_.neon_u16);
const uint32x4_t t2 = vshrq_n_u32(t1, 16);
const uint16x4_t t3 = vmovn_u32(t2);
r_.neon_u16 = t3;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) {
r_.u16[i] = HEDLEY_STATIC_CAST(uint16_t, ((HEDLEY_STATIC_CAST(uint32_t, a_.u16[i]) * HEDLEY_STATIC_CAST(uint32_t, b_.u16[i])) >> UINT32_C(16)));
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pmulhuw(a, b) simde_mm_mulhi_pu16(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_mulhi_pu16(a, b) simde_mm_mulhi_pu16(a, b)
# define _m_pmulhuw(a, b) simde_mm_mulhi_pu16(a, b)
#endif
#if defined(SIMDE_X86_SSE_NATIVE) && defined(HEDLEY_GCC_VERSION)
#define SIMDE_MM_HINT_NTA HEDLEY_STATIC_CAST(enum _mm_hint, 0)
#define SIMDE_MM_HINT_T0 HEDLEY_STATIC_CAST(enum _mm_hint, 1)
#define SIMDE_MM_HINT_T1 HEDLEY_STATIC_CAST(enum _mm_hint, 2)
#define SIMDE_MM_HINT_T2 HEDLEY_STATIC_CAST(enum _mm_hint, 3)
#define SIMDE_MM_HINT_ENTA HEDLEY_STATIC_CAST(enum _mm_hint, 4)
#define SIMDE_MM_HINT_ET0 HEDLEY_STATIC_CAST(enum _mm_hint, 5)
#define SIMDE_MM_HINT_ET1 HEDLEY_STATIC_CAST(enum _mm_hint, 6)
#define SIMDE_MM_HINT_ET2 HEDLEY_STATIC_CAST(enum _mm_hint, 7)
#else
#define SIMDE_MM_HINT_NTA 0
#define SIMDE_MM_HINT_T0 1
#define SIMDE_MM_HINT_T1 2
#define SIMDE_MM_HINT_T2 3
#define SIMDE_MM_HINT_ENTA 4
#define SIMDE_MM_HINT_ET0 5
#define SIMDE_MM_HINT_ET1 6
#define SIMDE_MM_HINT_ET2 7
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
HEDLEY_DIAGNOSTIC_PUSH
#if HEDLEY_HAS_WARNING("-Wreserved-id-macro")
_Pragma("clang diagnostic ignored \"-Wreserved-id-macro\"")
#endif
#undef _MM_HINT_NTA
#define _MM_HINT_NTA SIMDE_MM_HINT_NTA
#undef _MM_HINT_T0
#define _MM_HINT_T0 SIMDE_MM_HINT_T0
#undef _MM_HINT_T1
#define _MM_HINT_T1 SIMDE_MM_HINT_T1
#undef _MM_HINT_T2
#define _MM_HINT_T2 SIMDE_MM_HINT_T2
#undef _MM_HINT_ETNA
#define _MM_HINT_ETNA SIMDE_MM_HINT_ETNA
#undef _MM_HINT_ET0
#define _MM_HINT_ET0 SIMDE_MM_HINT_ET0
#undef _MM_HINT_ET1
#define _MM_HINT_ET1 SIMDE_MM_HINT_ET1
#undef _MM_HINT_ET1
#define _MM_HINT_ET2 SIMDE_MM_HINT_ET2
HEDLEY_DIAGNOSTIC_POP
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_prefetch (char const* p, int i) {
#if defined(HEDLEY_GCC_VERSION)
__builtin_prefetch(p);
#else
(void) p;
#endif
(void) i;
}
#if defined(SIMDE_X86_SSE_NATIVE)
#if defined(__clang__) && !SIMDE_DETECT_CLANG_VERSION_CHECK(10,0,0) /* https://reviews.llvm.org/D71718 */
#define simde_mm_prefetch(p, i) \
(__extension__({ \
HEDLEY_DIAGNOSTIC_PUSH \
HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL \
_mm_prefetch((p), (i)); \
HEDLEY_DIAGNOSTIC_POP \
}))
#else
#define simde_mm_prefetch(p, i) _mm_prefetch(p, i)
#endif
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_prefetch(p, i) simde_mm_prefetch(p, i)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_negate_ps(simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return simde_mm_xor_ps(a, _mm_set1_ps(SIMDE_FLOAT32_C(-0.0)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) && \
(!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(8,1,0))
r_.altivec_f32 = vec_neg(a_.altivec_f32);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vnegq_f32(a_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_neg(a_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
r_.altivec_f32 = vec_neg(a_.altivec_f32);
#elif defined(SIMDE_VECTOR_NEGATE)
r_.f32 = -a_.f32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = -a_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_rcp_ps (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_rcp_ps(a);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x4_t recip = vrecpeq_f32(a_.neon_f32);
#if SIMDE_ACCURACY_PREFERENCE > 0
for (int i = 0; i < SIMDE_ACCURACY_PREFERENCE ; ++i) {
recip = vmulq_f32(recip, vrecpsq_f32(recip, a_.neon_f32));
}
#endif
r_.neon_f32 = recip;
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_div(simde_mm_set1_ps(1.0f), a_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_re(a_.altivec_f32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.f32 = 1.0f / a_.f32;
#elif defined(SIMDE_IEEE754_STORAGE)
/* https://stackoverflow.com/questions/12227126/division-as-multiply-and-lut-fast-float-division-reciprocal/12228234#12228234 */
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
int32_t ix;
simde_float32 fx = a_.f32[i];
simde_memcpy(&ix, &fx, sizeof(ix));
int32_t x = INT32_C(0x7EF311C3) - ix;
simde_float32 temp;
simde_memcpy(&temp, &x, sizeof(temp));
r_.f32[i] = temp * (SIMDE_FLOAT32_C(2.0) - temp * fx);
}
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = 1.0f / a_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_rcp_ps(a) simde_mm_rcp_ps((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_rcp_ss (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_rcp_ss(a);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_ss(a, simde_mm_rcp_ps(a));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_rcp_ps(simde_x_mm_broadcastlow_ps(a)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
r_.f32[0] = 1.0f / a_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_rcp_ss(a) simde_mm_rcp_ss((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_rsqrt_ps (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_rsqrt_ps(a);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vrsqrteq_f32(a_.neon_f32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_rsqrte(a_.altivec_f32);
#elif defined(SIMDE_IEEE754_STORAGE)
/* https://basesandframes.files.wordpress.com/2020/04/even_faster_math_functions_green_2020.pdf
Pages 100 - 103 */
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
#if SIMDE_ACCURACY_PREFERENCE <= 0
r_.i32[i] = INT32_C(0x5F37624F) - (a_.i32[i] >> 1);
#else
simde_float32 x = a_.f32[i];
simde_float32 xhalf = SIMDE_FLOAT32_C(0.5) * x;
int32_t ix;
simde_memcpy(&ix, &x, sizeof(ix));
#if SIMDE_ACCURACY_PREFERENCE == 1
ix = INT32_C(0x5F375A82) - (ix >> 1);
#else
ix = INT32_C(0x5F37599E) - (ix >> 1);
#endif
simde_memcpy(&x, &ix, sizeof(x));
#if SIMDE_ACCURACY_PREFERENCE >= 2
x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x);
#endif
x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x);
r_.f32[i] = x;
#endif
}
#elif defined(simde_math_sqrtf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = 1.0f / simde_math_sqrtf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_rsqrt_ps(a) simde_mm_rsqrt_ps((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_rsqrt_ss (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_rsqrt_ss(a);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_ss(a, simde_mm_rsqrt_ps(a));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_rsqrt_ps(simde_x_mm_broadcastlow_ps(a)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(vgetq_lane_f32(simde_mm_rsqrt_ps(a).neon_f32, 0), a_.neon_f32, 0);
#elif defined(SIMDE_IEEE754_STORAGE)
{
#if SIMDE_ACCURACY_PREFERENCE <= 0
r_.i32[0] = INT32_C(0x5F37624F) - (a_.i32[0] >> 1);
#else
simde_float32 x = a_.f32[0];
simde_float32 xhalf = SIMDE_FLOAT32_C(0.5) * x;
int32_t ix;
simde_memcpy(&ix, &x, sizeof(ix));
#if SIMDE_ACCURACY_PREFERENCE == 1
ix = INT32_C(0x5F375A82) - (ix >> 1);
#else
ix = INT32_C(0x5F37599E) - (ix >> 1);
#endif
simde_memcpy(&x, &ix, sizeof(x));
#if SIMDE_ACCURACY_PREFERENCE >= 2
x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x);
#endif
x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x);
r_.f32[0] = x;
#endif
}
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#elif defined(simde_math_sqrtf)
r_.f32[0] = 1.0f / simde_math_sqrtf(a_.f32[0]);
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_rsqrt_ss(a) simde_mm_rsqrt_ss((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_sad_pu8 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_sad_pu8(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint16x4_t t = vpaddl_u8(vabd_u8(a_.neon_u8, b_.neon_u8));
uint16_t r0 = t[0] + t[1] + t[2] + t[3];
r_.neon_u16 = vset_lane_u16(r0, vdup_n_u16(0), 0);
#else
uint16_t sum = 0;
#if defined(SIMDE_HAVE_STDLIB_H)
SIMDE_VECTORIZE_REDUCTION(+:sum)
for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
sum += HEDLEY_STATIC_CAST(uint8_t, abs(a_.u8[i] - b_.u8[i]));
}
r_.i16[0] = HEDLEY_STATIC_CAST(int16_t, sum);
r_.i16[1] = 0;
r_.i16[2] = 0;
r_.i16[3] = 0;
#else
HEDLEY_UNREACHABLE();
#endif
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_psadbw(a, b) simde_mm_sad_pu8(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sad_pu8(a, b) simde_mm_sad_pu8(a, b)
# define _m_psadbw(a, b) simde_mm_sad_pu8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_set_ss (simde_float32 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_set_ss(a);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vsetq_lane_f32(a, vdupq_n_f32(SIMDE_FLOAT32_C(0.0)), 0);
#else
return simde_mm_set_ps(SIMDE_FLOAT32_C(0.0), SIMDE_FLOAT32_C(0.0), SIMDE_FLOAT32_C(0.0), a);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_set_ss(a) simde_mm_set_ss(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_setr_ps (simde_float32 e3, simde_float32 e2, simde_float32 e1, simde_float32 e0) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_setr_ps(e3, e2, e1, e0);
#else
return simde_mm_set_ps(e0, e1, e2, e3);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_setr_ps(e3, e2, e1, e0) simde_mm_setr_ps(e3, e2, e1, e0)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_setzero_ps (void) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_setzero_ps();
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vdupq_n_f32(SIMDE_FLOAT32_C(0.0));
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
return vec_splats(SIMDE_FLOAT32_C(0.0));
#else
simde__m128 r;
simde_memset(&r, 0, sizeof(r));
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_setzero_ps() simde_mm_setzero_ps()
#endif
#if defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_)
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_undefined_ps (void) {
simde__m128_private r_;
#if defined(SIMDE_HAVE_UNDEFINED128)
r_.n = _mm_undefined_ps();
#elif !defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_)
r_ = simde__m128_to_private(simde_mm_setzero_ps());
#endif
return simde__m128_from_private(r_);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_undefined_ps() simde_mm_undefined_ps()
#endif
#if defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_)
HEDLEY_DIAGNOSTIC_POP
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_setone_ps (void) {
simde__m128 t = simde_mm_setzero_ps();
return simde_mm_cmpeq_ps(t, t);
}
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_sfence (void) {
/* TODO: Use Hedley. */
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_sfence();
#elif defined(__GNUC__) && ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 7))
__atomic_thread_fence(__ATOMIC_SEQ_CST);
#elif !defined(__INTEL_COMPILER) && defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) && !defined(__STDC_NO_ATOMICS__)
#if defined(__GNUC__) && (__GNUC__ == 4) && (__GNUC_MINOR__ < 9)
__atomic_thread_fence(__ATOMIC_SEQ_CST);
#else
atomic_thread_fence(memory_order_seq_cst);
#endif
#elif defined(_MSC_VER)
MemoryBarrier();
#elif HEDLEY_HAS_EXTENSION(c_atomic)
__c11_atomic_thread_fence(__ATOMIC_SEQ_CST);
#elif defined(__GNUC__) && ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1))
__sync_synchronize();
#elif defined(_OPENMP)
#pragma omp critical(simde_mm_sfence_)
{ }
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sfence() simde_mm_sfence()
#endif
#define SIMDE_MM_SHUFFLE(z, y, x, w) (((z) << 6) | ((y) << 4) | ((x) << 2) | (w))
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _MM_SHUFFLE(z, y, x, w) SIMDE_MM_SHUFFLE(z, y, x, w)
#endif
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(__PGI)
# define simde_mm_shuffle_pi16(a, imm8) _mm_shuffle_pi16(a, imm8)
#elif defined(SIMDE_SHUFFLE_VECTOR_)
# define simde_mm_shuffle_pi16(a, imm8) (__extension__ ({ \
const simde__m64_private simde__tmp_a_ = simde__m64_to_private(a); \
simde__m64_from_private((simde__m64_private) { .i16 = \
SIMDE_SHUFFLE_VECTOR_(16, 8, \
(simde__tmp_a_).i16, \
(simde__tmp_a_).i16, \
(((imm8) ) & 3), \
(((imm8) >> 2) & 3), \
(((imm8) >> 4) & 3), \
(((imm8) >> 6) & 3)) }); }))
#else
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_shuffle_pi16 (simde__m64 a, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) {
simde__m64_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
for (size_t i = 0 ; i < sizeof(r_.i16) / sizeof(r_.i16[0]) ; i++) {
r_.i16[i] = a_.i16[(imm8 >> (i * 2)) & 3];
}
HEDLEY_DIAGNOSTIC_PUSH
#if HEDLEY_HAS_WARNING("-Wconditional-uninitialized")
# pragma clang diagnostic ignored "-Wconditional-uninitialized"
#endif
return simde__m64_from_private(r_);
HEDLEY_DIAGNOSTIC_POP
}
#endif
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(__PGI)
# define simde_m_pshufw(a, imm8) _m_pshufw(a, imm8)
#else
# define simde_m_pshufw(a, imm8) simde_mm_shuffle_pi16(a, imm8)
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_shuffle_pi16(a, imm8) simde_mm_shuffle_pi16(a, imm8)
# define _m_pshufw(a, imm8) simde_mm_shuffle_pi16(a, imm8)
#endif
#if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI)
# define simde_mm_shuffle_ps(a, b, imm8) _mm_shuffle_ps(a, b, imm8)
#elif defined(SIMDE_SHUFFLE_VECTOR_)
#define simde_mm_shuffle_ps(a, b, imm8) (__extension__ ({ \
simde__m128_from_private((simde__m128_private) { .f32 = \
SIMDE_SHUFFLE_VECTOR_(32, 16, \
simde__m128_to_private(a).f32, \
simde__m128_to_private(b).f32, \
(((imm8) ) & 3), \
(((imm8) >> 2) & 3), \
(((imm8) >> 4) & 3) + 4, \
(((imm8) >> 6) & 3) + 4) }); }))
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_mm_shuffle_ps(a, b, imm8) \
__extension__({ \
float32x4_t ret; \
ret = vmovq_n_f32( \
vgetq_lane_f32(a, (imm8) & (0x3))); \
ret = vsetq_lane_f32( \
vgetq_lane_f32(a, ((imm8) >> 2) & 0x3), \
ret, 1); \
ret = vsetq_lane_f32( \
vgetq_lane_f32(b, ((imm8) >> 4) & 0x3), \
ret, 2); \
ret = vsetq_lane_f32( \
vgetq_lane_f32(b, ((imm8) >> 6) & 0x3), \
ret, 3); \
})
#else
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_shuffle_ps (simde__m128 a, simde__m128 b, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) {
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.f32[0] = a_.f32[(imm8 >> 0) & 3];
r_.f32[1] = a_.f32[(imm8 >> 2) & 3];
r_.f32[2] = b_.f32[(imm8 >> 4) & 3];
r_.f32[3] = b_.f32[(imm8 >> 6) & 3];
return simde__m128_from_private(r_);
}
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_shuffle_ps(a, b, imm8) simde_mm_shuffle_ps((a), (b), imm8)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_sqrt_ps (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_sqrt_ps(a);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f32 = vsqrtq_f32(a_.neon_f32);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x4_t est = vrsqrteq_f32(a_.neon_f32);
for (int i = 0 ; i <= SIMDE_ACCURACY_PREFERENCE ; i++) {
est = vmulq_f32(vrsqrtsq_f32(vmulq_f32(a_.neon_f32, est), est), est);
}
r_.neon_f32 = vmulq_f32(a_.neon_f32, est);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_sqrt(a_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
r_.altivec_f32 = vec_sqrt(a_.altivec_f32);
#elif defined(simde_math_sqrt)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < sizeof(r_.f32) / sizeof(r_.f32[0]) ; i++) {
r_.f32[i] = simde_math_sqrtf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sqrt_ps(a) simde_mm_sqrt_ps((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_sqrt_ss (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_sqrt_ss(a);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_ss(a, simde_mm_sqrt_ps(a));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_sqrt_ps(simde_x_mm_broadcastlow_ps(a)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32_t value =
vgetq_lane_f32(simde__m128_to_private(simde_mm_sqrt_ps(a)).neon_f32, 0);
r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0);
#elif defined(simde_math_sqrtf)
r_.f32[0] = simde_math_sqrtf(a_.f32[0]);
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sqrt_ss(a) simde_mm_sqrt_ss((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_store_ps (simde_float32 mem_addr[4], simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_store_ps(mem_addr, a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst1q_f32(mem_addr, a_.neon_f32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
vec_st(a_.altivec_f32, 0, mem_addr);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
wasm_v128_store(mem_addr, a_.wasm_v128);
#else
simde_memcpy(mem_addr, &a_, sizeof(a));
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_store_ps(mem_addr, a) simde_mm_store_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_store1_ps (simde_float32 mem_addr[4], simde__m128 a) {
simde_float32* mem_addr_ = SIMDE_ALIGN_ASSUME_LIKE(mem_addr, simde__m128);
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_store_ps1(mem_addr_, a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst1q_f32(mem_addr_, vdupq_lane_f32(vget_low_f32(a_.neon_f32), 0));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
wasm_v128_store(mem_addr_, wasm_i32x4_shuffle(a_.wasm_v128, a_.wasm_v128, 0, 0, 0, 0));
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
vec_st(vec_splat(a_.altivec_f32, 0), 0, mem_addr_);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
simde__m128_private tmp_;
tmp_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, a_.f32, 0, 0, 0, 0);
simde_mm_store_ps(mem_addr_, tmp_.f32);
#else
SIMDE_VECTORIZE_ALIGNED(mem_addr_:16)
for (size_t i = 0 ; i < sizeof(a_.f32) / sizeof(a_.f32[0]) ; i++) {
mem_addr_[i] = a_.f32[0];
}
#endif
#endif
}
#define simde_mm_store_ps1(mem_addr, a) simde_mm_store1_ps(mem_addr, a)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_store_ps1(mem_addr, a) simde_mm_store1_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
# define _mm_store1_ps(mem_addr, a) simde_mm_store1_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_store_ss (simde_float32* mem_addr, simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_store_ss(mem_addr, a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst1q_lane_f32(mem_addr, a_.neon_f32, 0);
#else
*mem_addr = a_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_store_ss(mem_addr, a) simde_mm_store_ss(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_storeh_pi (simde__m64* mem_addr, simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_storeh_pi(HEDLEY_REINTERPRET_CAST(__m64*, mem_addr), a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst1_f32(HEDLEY_REINTERPRET_CAST(float32_t*, mem_addr), vget_high_f32(a_.neon_f32));
#else
simde_memcpy(mem_addr, &(a_.m64[1]), sizeof(a_.m64[1]));
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_storeh_pi(mem_addr, a) simde_mm_storeh_pi(mem_addr, (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_storel_pi (simde__m64* mem_addr, simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_storel_pi(HEDLEY_REINTERPRET_CAST(__m64*, mem_addr), a);
#else
simde__m64_private* dest_ = HEDLEY_REINTERPRET_CAST(simde__m64_private*, mem_addr);
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
dest_->neon_f32 = vget_low_f32(a_.neon_f32);
#else
dest_->f32[0] = a_.f32[0];
dest_->f32[1] = a_.f32[1];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_storel_pi(mem_addr, a) simde_mm_storel_pi(mem_addr, (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_storer_ps (simde_float32 mem_addr[4], simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_storer_ps(mem_addr, a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
vec_st(vec_reve(a_.altivec_f32), 0, mem_addr);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x4_t tmp = vrev64q_f32(a_.neon_f32);
vst1q_f32(mem_addr, vextq_f32(tmp, tmp, 2));
#elif defined(SIMDE_SHUFFLE_VECTOR_)
a_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, a_.f32, 3, 2, 1, 0);
simde_mm_store_ps(mem_addr, simde__m128_from_private(a_));
#else
SIMDE_VECTORIZE_ALIGNED(mem_addr:16)
for (size_t i = 0 ; i < sizeof(a_.f32) / sizeof(a_.f32[0]) ; i++) {
mem_addr[i] = a_.f32[((sizeof(a_.f32) / sizeof(a_.f32[0])) - 1) - i];
}
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_storer_ps(mem_addr, a) simde_mm_storer_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_storeu_ps (simde_float32 mem_addr[4], simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_storeu_ps(mem_addr, a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst1q_f32(mem_addr, a_.neon_f32);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
vec_vsx_st(a_.altivec_f32, 0, mem_addr);
#else
simde_memcpy(mem_addr, &a_, sizeof(a_));
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_storeu_ps(mem_addr, a) simde_mm_storeu_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_sub_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_sub_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsubq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_sub(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_sub(a_.altivec_f32, b_.altivec_f32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.f32 = a_.f32 - b_.f32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = a_.f32[i] - b_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sub_ps(a, b) simde_mm_sub_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_sub_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_sub_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_ss(a, simde_mm_sub_ps(a, b));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_sub_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.f32[0] = a_.f32[0] - b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sub_ss(a, b) simde_mm_sub_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomieq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomieq_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_eq_b = vceqq_f32(a_.neon_f32, b_.neon_f32);
r = !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_eq_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] == b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] == b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomieq_ss(a, b) simde_mm_ucomieq_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomige_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomige_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_ge_b = vcgeq_f32(a_.neon_f32, b_.neon_f32);
r = !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_ge_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] >= b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] >= b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomige_ss(a, b) simde_mm_ucomige_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomigt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomigt_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_gt_b = vcgtq_f32(a_.neon_f32, b_.neon_f32);
r = !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_gt_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] > b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] > b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomigt_ss(a, b) simde_mm_ucomigt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomile_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomile_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_le_b = vcleq_f32(a_.neon_f32, b_.neon_f32);
r = !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_le_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] <= b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] <= b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomile_ss(a, b) simde_mm_ucomile_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomilt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomilt_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_lt_b = vcltq_f32(a_.neon_f32, b_.neon_f32);
r = !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_lt_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] < b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] < b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomilt_ss(a, b) simde_mm_ucomilt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomineq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomineq_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_neq_b = vmvnq_u32(vceqq_f32(a_.neon_f32, b_.neon_f32));
r = !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_neq_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] != b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] != b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomineq_ss(a, b) simde_mm_ucomineq_ss((a), (b))
#endif
#if defined(SIMDE_X86_SSE_NATIVE)
# if defined(__has_builtin)
# if __has_builtin(__builtin_ia32_undef128)
# define SIMDE_HAVE_UNDEFINED128
# endif
# elif !defined(__PGI) && !defined(SIMDE_BUG_GCC_REV_208793) && !defined(_MSC_VER)
# define SIMDE_HAVE_UNDEFINED128
# endif
#endif
#if defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_)
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_unpackhi_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_unpackhi_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f32 = vzip2q_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x2_t a1 = vget_high_f32(a_.neon_f32);
float32x2_t b1 = vget_high_f32(b_.neon_f32);
float32x2x2_t result = vzip_f32(a1, b1);
r_.neon_f32 = vcombine_f32(result.val[0], result.val[1]);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 2, 6, 3, 7);
#else
r_.f32[0] = a_.f32[2];
r_.f32[1] = b_.f32[2];
r_.f32[2] = a_.f32[3];
r_.f32[3] = b_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_unpackhi_ps(a, b) simde_mm_unpackhi_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_unpacklo_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_unpacklo_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f32 = vzip1q_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_mergeh(a_.altivec_f32, b_.altivec_f32);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 0, 4, 1, 5);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x2_t a1 = vget_low_f32(a_.neon_f32);
float32x2_t b1 = vget_low_f32(b_.neon_f32);
float32x2x2_t result = vzip_f32(a1, b1);
r_.neon_f32 = vcombine_f32(result.val[0], result.val[1]);
#else
r_.f32[0] = a_.f32[0];
r_.f32[1] = b_.f32[0];
r_.f32[2] = a_.f32[1];
r_.f32[3] = b_.f32[1];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_unpacklo_ps(a, b) simde_mm_unpacklo_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_stream_pi (simde__m64* mem_addr, simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
_mm_stream_pi(HEDLEY_REINTERPRET_CAST(__m64*, mem_addr), a);
#else
simde__m64_private*
dest = HEDLEY_REINTERPRET_CAST(simde__m64_private*, mem_addr),
a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
dest->i64[0] = vget_lane_s64(a_.neon_i64, 0);
#else
dest->i64[0] = a_.i64[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_stream_pi(mem_addr, a) simde_mm_stream_pi(mem_addr, (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_stream_ps (simde_float32 mem_addr[4], simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_stream_ps(mem_addr, a);
#elif HEDLEY_HAS_BUILTIN(__builtin_nontemporal_store) && defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
simde__m128_private a_ = simde__m128_to_private(a);
__builtin_nontemporal_store(a_.f32, SIMDE_ALIGN_CAST(__typeof__(a_.f32)*, mem_addr));
#else
simde_mm_store_ps(mem_addr, a);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_stream_ps(mem_addr, a) simde_mm_stream_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define SIMDE_MM_TRANSPOSE4_PS(row0, row1, row2, row3) \
do { \
float32x4x2_t SIMDE_MM_TRANSPOSE4_PS_ROW01 = vtrnq_f32(row0, row1); \
float32x4x2_t SIMDE_MM_TRANSPOSE4_PS_ROW23 = vtrnq_f32(row2, row3); \
row0 = vcombine_f32(vget_low_f32(SIMDE_MM_TRANSPOSE4_PS_ROW01.val[0]), \
vget_low_f32(SIMDE_MM_TRANSPOSE4_PS_ROW23.val[0])); \
row1 = vcombine_f32(vget_low_f32(SIMDE_MM_TRANSPOSE4_PS_ROW01.val[1]), \
vget_low_f32(SIMDE_MM_TRANSPOSE4_PS_ROW23.val[1])); \
row2 = vcombine_f32(vget_high_f32(SIMDE_MM_TRANSPOSE4_PS_ROW01.val[0]), \
vget_high_f32(SIMDE_MM_TRANSPOSE4_PS_ROW23.val[0])); \
row3 = vcombine_f32(vget_high_f32(SIMDE_MM_TRANSPOSE4_PS_ROW01.val[1]), \
vget_high_f32(SIMDE_MM_TRANSPOSE4_PS_ROW23.val[1])); \
} while (0)
#else
#define SIMDE_MM_TRANSPOSE4_PS(row0, row1, row2, row3) \
do { \
simde__m128 SIMDE_MM_TRANSPOSE4_PS_tmp3, SIMDE_MM_TRANSPOSE4_PS_tmp2, SIMDE_MM_TRANSPOSE4_PS_tmp1, SIMDE_MM_TRANSPOSE4_PS_tmp0; \
SIMDE_MM_TRANSPOSE4_PS_tmp0 = simde_mm_unpacklo_ps((row0), (row1)); \
SIMDE_MM_TRANSPOSE4_PS_tmp2 = simde_mm_unpacklo_ps((row2), (row3)); \
SIMDE_MM_TRANSPOSE4_PS_tmp1 = simde_mm_unpackhi_ps((row0), (row1)); \
SIMDE_MM_TRANSPOSE4_PS_tmp3 = simde_mm_unpackhi_ps((row2), (row3)); \
row0 = simde_mm_movelh_ps(SIMDE_MM_TRANSPOSE4_PS_tmp0, SIMDE_MM_TRANSPOSE4_PS_tmp2); \
row1 = simde_mm_movehl_ps(SIMDE_MM_TRANSPOSE4_PS_tmp2, SIMDE_MM_TRANSPOSE4_PS_tmp0); \
row2 = simde_mm_movelh_ps(SIMDE_MM_TRANSPOSE4_PS_tmp1, SIMDE_MM_TRANSPOSE4_PS_tmp3); \
row3 = simde_mm_movehl_ps(SIMDE_MM_TRANSPOSE4_PS_tmp3, SIMDE_MM_TRANSPOSE4_PS_tmp1); \
} while (0)
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) SIMDE_MM_TRANSPOSE4_PS(row0, row1, row2, row3)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_SSE_H) */
|
ipw_dim_red_parallel.c | // ------------------------------------------------------------------------
//
// This file is part of SDRcausal.
//
// SDRcausal is free software: you can redistribute it and/or modify it
// under the terms of the GNU General Public License as published by the
// Free Software Foundation, either version 3 of the License, or (at your
// option) any later version.
//
// SDRcausal is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
// for more details.
//
// You should have received a copy of the GNU General Public License along
// with SDRcausal. If not, see <https://www.gnu.org/licenses/>.
//
// ------------------------------------------------------------------------
//
#include <stdio.h>
#include <math.h>
#include <string.h>
#include "omp_test.h"
#include "nw_kernel_regress.h"
#include "matrix_utilities.h"
#include "propensity_score.h"
#include "ipw_dim_red.h"
void
ipw_dim_red (int n,
int p,
int d,
const double x[n*p],
const double x_alpha[n*d],
const int treated[n],
int kernel_spec,
double h1,
double h2,
double gauss_cutoff,
int n_threads,
int *n_pr_nan,
double (*col_sum)[(p-d)*d])
{
// Creating lower n x (p - d) submatrix of x
int p0 = p - d;
double x_lower[n*p0];
for (int i=0; i<n; i++)
for (int j=0; j<p0; j++)
x_lower[i*p0 + j] = x[i*p + (j+d)];
int n_nan_tmp = 0;
double col_sum_tmp[d*p0];
for (int i=0; i<d*p0; i++)
col_sum_tmp[i] = 0;
#if defined(_OPENMP)
#pragma omp parallel for schedule (static) num_threads (n_threads)
#endif
for (int i=0; i<n; i++)
{
double x0[d];
for (int j=0; j<d; j++)
x0[j] = x_alpha[i*d + j];
double pr, d_pr[d], d_eta_est[d];
propensity_score ( n,
d,
x_alpha,
x0,
treated,
kernel_spec,
h1,
&pr,
&d_pr);
// Checks if probability is outside of range. In that case the row is
// ignored but the optimization will be punished in the parent script.
if (isnan (pr))
{
#if defined(_OPENMP)
#pragma omp atomic
#endif
n_nan_tmp++;
continue;
}
else
{
for (int j=0; j<d; j++)
d_eta_est[j] = d_pr[j] / (pr * (1 - pr));
}
double x_distance[n*d];
for (int j=0; j<n; j++)
for (int k=0; k<d; k++)
x_distance[j*d + k] = x_alpha[j*d + k] - x0[k];
// Estimates E(X_i | X * alpha)
double e_hat[p0];
nw_kernel_regress ( n,
p0,
x_distance,
x_lower,
kernel_spec,
h2,
gauss_cutoff,
&e_hat);
// Calculating the n:th row of the estimating equation
for (int j=0; j<p0; j++)
{
// 1d implementation
#if defined(_OPENMP)
#pragma omp atomic
#endif
col_sum_tmp[j] += (x_lower[i*p0 + j] - e_hat[j])
* ( (double) treated[i] - pr) * d_eta_est[0];
}
}
*n_pr_nan = n_nan_tmp;
memmove (col_sum, col_sum_tmp, d*p0*sizeof (double));
return;
}
|
random_par.c |
//**********************************************************
// Parallel Pseudo random number generator:
//
// USAGE:
//
// The pseudo random sequence is seeded with a range
//
// void range(lower_limit, higher_limit)
//
// and then subsequent calls to the random number generator
// generates values in the sequence:
//
// double drandom()
//
// A leap frog method is used to assure non-overlapping
// sequences for each thread.
//
// Note: these functions are to be called from inside the
// the OpenMP parallel region that will use the sequence.
//
// BACKGROUND:
//
// We are using a modulus of 2^31-1 and a multiplier from
// the Hoaglin LCGs in the following article:
//
// http://random.mat.sbg.ac.at/~charly/server/node3.html#lcg
//
// we are using a zero addend just to make the leap frog
// algorithm easier to implement.
//
// HISTORY:
//
// 9/2008: Written by Tim Mattson by cutting and pasting
// from a generator written by Larry Meadows
//
//***********************************************************
#include <omp.h>
static unsigned long long MULTIPLIER = 764261123;
static unsigned long long PMOD = 2147483647;
static unsigned long long mult_n;
double random_low, random_hi;
#define MAX_THREADS 128
static unsigned long long pseed[MAX_THREADS][4]; //[4] to padd to cache line
//size to avoid false sharing
unsigned long long random_last = 0;
#pragma omp threadprivate(random_last)
double drandom()
{
unsigned long long random_next;
double ret_val;
//
// compute an integer random number from zero to mod
//
random_next = (unsigned long long)((mult_n * random_last)% PMOD);
random_last = random_next;
//
// shift into preset range
//
ret_val = ((double)random_next/(double)PMOD)*(random_hi-random_low)+random_low;
return ret_val;
}
//
// set the seed, the multiplier and the range
//
void range(double low_in, double hi_in)
{
int i, id, nthreads;
unsigned long long iseed;
id = omp_get_thread_num();
#pragma omp single
{
if(low_in < hi_in)
{
random_low = low_in;
random_hi = hi_in;
}
else
{
random_low = hi_in;
random_hi = low_in;
}
//
// The Leapfrog method ... adjust the multiplier so you stride through
// the sequence by increments of "nthreads" and adust seeds so each
// thread starts with the right offset
//
nthreads = omp_get_num_threads();
iseed = PMOD/MULTIPLIER; // just pick a reasonable seed
pseed[0][0] = iseed;
mult_n = MULTIPLIER;
for (i = 1; i < nthreads; ++i)
{
iseed = (unsigned long long)((MULTIPLIER * iseed) % PMOD);
pseed[i][0] = iseed;
mult_n = (mult_n * MULTIPLIER) % PMOD;
}
}
random_last = (unsigned long long) pseed[id][0];
}
|
conv_dw_kernel_arm.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2020, OPEN AI LAB
* Author: haoluo@openailab.com
*/
#include <stdint.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
#include "conv_dw_kernel_arm.h"
#include "conv_dw_k5_k7_kernel_arm.h"
#include "conv_dw_dilation_kernel_arm.h"
#ifdef __aarch64__
void dw_k3s2p0(float* data, int h, int w, float* kernel, float* output, float* bias, int out_w, int act);
void dw_k3s2p0p1(float* data, int h, int w, float* kernel, float* output, float* bias, int out_w, int act);
void dw_k3s1p1_a72(float* data, int h, int w, float* kernel, float* output, float* bias, int act);
void dw_k3s2p1_a72(float* data, int h, int w, float* kernel, float* output, float* bias, int act);
static void DirectConv(float* input_buf, int input_h, int input_w, float* output_buf, int output_h, int output_w,
float* weight_buf, int channel_num, int stride, float* bias, int* pads, int activation,
int num_thread, int cpu_affinity)
{
int channel_size = input_h * input_w;
int channel_size_out = output_h * output_w;
int pad_h0 = pads[0];
int pad_h1 = pads[2];
if (stride == 1)
{
#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < channel_num; i++)
{
float* cur_input = input_buf + i * channel_size;
float* cur_output = output_buf + i * channel_size_out;
float* bias_tmp = NULL;
if (bias)
bias_tmp = bias + i;
dw_k3s1p1_a72(cur_input, input_h, input_w, weight_buf + i * 9, cur_output, bias_tmp, activation);
}
}
else if (pad_h0 == 0)
{
#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < channel_num; i++)
{
float* cur_input = input_buf + i * channel_size;
float* cur_output = output_buf + i * channel_size_out;
float* bias_tmp = NULL;
if (bias)
bias_tmp = bias + i;
if (pad_h1 == 0)
dw_k3s2p0(cur_input, input_h, input_w, weight_buf + i * 9, cur_output, bias_tmp, output_w, activation);
else
dw_k3s2p0p1(cur_input, input_h, input_w, weight_buf + i * 9, cur_output, bias_tmp, output_w,
activation);
}
}
else
{
#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < channel_num; i++)
{
float* cur_input = input_buf + i * channel_size;
float* cur_output = output_buf + i * channel_size_out;
float* bias_tmp = NULL;
if (bias)
bias_tmp = bias + i;
dw_k3s2p1_a72(cur_input, input_h, input_w, weight_buf + i * 9, cur_output, bias_tmp, activation);
}
}
}
#else
void dw_k3s2(float* input, float* kernel, float* output, int channel, int width, int height, float* bias, int pad0);
void dw_k3s2_relu_fused(float* input, float* kernel, float* output, int channel, int width, int height, float* bias,
int pad0);
void dw_k3s2_relu6_fused(float* input, float* kernel, float* output, int channel, int width, int height, float* bias,
int pad0);
void dw_k3s1p1(float* input, float* kernel, float* output, int channel, int width, int height, float* bias);
void dw_k3s1p1_relu_fused(float* input, float* kernel, float* output, int channel, int width, int height, float* bias);
void dw_k3s1p1_relu6_fused(float* input, float* kernel, float* output, int channel, int width, int height, float* bias);
static void DirectConv(float* input_buf, int input_h, int input_w, float* output_buf, int output_h, int output_w,
float* weight_buf, int channel_num, int stride, float* bias, int* pads, int activation,
int num_thread, int cpu_affinity)
{
int pad_h0 = pads[0];
if (stride == 1)
{
#pragma omp parallel for num_threads(num_thread)
for (int c = 0; c < channel_num; c++)
{
float* cur_input = input_buf + c * input_h * input_w;
float* cur_output = output_buf + c * output_h * output_w;
float* cur_weight = weight_buf + c * 9;
float* cur_bias = bias ? bias + c : bias;
if (activation >= 0)
{
if (activation == 0)
dw_k3s1p1_relu_fused(cur_input, cur_weight, cur_output, 1, input_w, input_h, cur_bias);
else
dw_k3s1p1_relu6_fused(cur_input, cur_weight, cur_output, 1, input_w, input_h, cur_bias);
}
else
{
dw_k3s1p1(cur_input, cur_weight, cur_output, 1, input_w, input_h, cur_bias);
}
}
}
else if (stride == 2)
{
#pragma omp parallel for num_threads(num_thread)
for (int c = 0; c < channel_num; c++)
{
float* cur_input = input_buf + c * input_h * input_w;
float* cur_output = output_buf + c * output_h * output_w;
float* cur_weight = weight_buf + c * 9;
float* cur_bias = bias ? bias + c : bias;
if (activation >= 0)
{
if (activation == 0)
dw_k3s2_relu_fused(cur_input, cur_weight, cur_output, 1, input_w, input_h, cur_bias, pad_h0);
else
dw_k3s2_relu6_fused(cur_input, cur_weight, cur_output, 1, input_w, input_h, cur_bias, pad_h0);
}
else
{
dw_k3s2(cur_input, cur_weight, cur_output, 1, input_w, input_h, cur_bias, pad_h0);
}
}
}
}
#endif
int conv_dw_run(struct ir_tensor* input_tensor, struct ir_tensor* filter_tensor, struct ir_tensor* bias_tensor,
struct ir_tensor* output_tensor, struct conv_priv_info* conv_info, struct conv_param* param, int num_thread, int cpu_affinity)
{
/* param */
int pads[4];
int group = param->group;
int kernel_h = param->kernel_h;
int kernel_w = param->kernel_w;
int stride_h = param->stride_h;
int stride_w = param->stride_w;
int dilation_h = param->dilation_h;
int dilation_w = param->dilation_w;
pads[0] = param->pad_h0;
pads[1] = param->pad_w0;
pads[2] = param->pad_h1;
pads[3] = param->pad_w1;
if (stride_h != stride_w)
return -1;
int act_type = param->activation;
int batch = input_tensor->dims[0];
int in_c = input_tensor->dims[1] / group;
int in_h = input_tensor->dims[2];
int in_w = input_tensor->dims[3];
int input_size = in_c * in_h * in_w;
int out_c = output_tensor->dims[1] / group;
int out_h = output_tensor->dims[2];
int out_w = output_tensor->dims[3];
int output_size = out_c * out_h * out_w;
/* buffer addr */
float* input_buf = ( float* )input_tensor->data;
float* kernel_buf = ( float* )filter_tensor->data;
float* output_buf = ( float* )output_tensor->data;
float* biases_buf = NULL;
if (bias_tensor)
biases_buf = ( float* )bias_tensor->data;
for (int n = 0; n < batch; n++) // batch size
{
float* cur_input = input_buf + n * input_size * group;
float* cur_output = output_buf + n * output_size * group;
if (dilation_h != 1 && dilation_w != 1 && dilation_h == pads[0])
{
conv_dw_dilation_run(cur_input, kernel_buf, biases_buf, cur_output, in_h, in_w, group, pads[0], act_type,
num_thread);
}
else if (kernel_h == 3 && kernel_w == 3)
{
DirectConv(cur_input, in_h, in_w, cur_output, out_h, out_w, kernel_buf, group, stride_h, biases_buf, pads,
act_type, num_thread, cpu_affinity);
}
else if (kernel_h == 5 && kernel_w == 5)
{
if (stride_h == 1)
depthwise_conv_k5s1(cur_input, kernel_buf, biases_buf, cur_output, in_h, in_w, group, out_h, out_w,
pads[0], pads[1], act_type, num_thread);
else if (stride_h == 2)
depthwise_conv_k5s2(cur_input, kernel_buf, biases_buf, cur_output, in_h, in_w, group, out_h, out_w,
act_type, num_thread);
}
else if (kernel_h == 7 && kernel_w == 7)
{
if (stride_h == 1)
depthwise_conv_k7s1(cur_input, kernel_buf, biases_buf, cur_output, in_h, in_w, group, out_h, out_w,
act_type, num_thread);
else if (stride_h == 2)
depthwise_conv_k7s2(cur_input, kernel_buf, biases_buf, cur_output, in_h, in_w, group, out_h, out_w,
act_type, num_thread);
}
}
return 0;
}
|
For2_Paralelo.c | #include <stdio.h>
#include <stdlib.h>
#ifdef _OPENMP
#include <omp.h>
#define TRUE 1
#define FALSE 0
#else
#define omp_get_thread_num() 0
#define omp_get_num_threads() 1
#endif
int main()
{
#ifdef _OPENMP
(void) omp_set_dynamic(FALSE);
if (omp_get_dynamic()) {printf("Warning: dynamic adjustment of threads has been set\n");}
(void) omp_set_num_threads(4);
#endif
int i, n = 9;
int a[9], b[9];
#pragma omp parallel default(none) shared(n,a,b, i)
{
#pragma omp single
printf("Primer ciclo for: el numero de hilos es %d\n",
omp_get_num_threads());
#pragma omp for
for (i=0; i<n; i++)
{
printf("El hilo %d ejecuta en el ciclo la iteracion %d\n",
omp_get_thread_num(),i);
a[i] = i;
}
#pragma omp single
printf("Segundo ciclo for: el numero de hilos %d\n",
omp_get_num_threads());
#pragma omp for
for (i=0; i<n; i++)
{
printf("El hilo %d ejecuta en el ciclo la iteracion %d\n",
omp_get_thread_num(),i);
b[i] = 2 * a[i];
}
} // Final de la region paralela
return(0);
}
|
core_zherk.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @precisions normal z -> c
*
**/
#include <plasma_core_blas.h>
#include "plasma_types.h"
#include "core_lapack.h"
/***************************************************************************//**
*
* @ingroup core_herk
*
* Performs one of the Hermitian rank k operations
*
* \f[ C = \alpha A \times A^H + \beta C, \f]
* or
* \f[ C = \alpha A^H \times A + \beta C, \f]
*
* where alpha and beta are real scalars, C is an n-by-n Hermitian
* matrix, and A is an n-by-k matrix in the first case and a k-by-n
* matrix in the second case.
*
*******************************************************************************
*
* @param[in] uplo
* - PlasmaUpper: Upper triangle of C is stored;
* - PlasmaLower: Lower triangle of C is stored.
*
* @param[in] trans
* - PlasmaNoTrans: \f[ C = \alpha A \times A^H + \beta C; \f]
* - PlasmaConjTrans: \f[ C = \alpha A^H \times A + \beta C. \f]
*
* @param[in] n
* The order of the matrix C. n >= 0.
*
* @param[in] k
* If trans = PlasmaNoTrans, number of columns of the A matrix;
* if trans = PlasmaConjTrans, number of rows of the A matrix.
*
* @param[in] alpha
* The scalar alpha.
*
* @param[in] A
* A is an lda-by-ka matrix.
* If trans = PlasmaNoTrans, ka = k;
* if trans = PlasmaConjTrans, ka = n.
*
* @param[in] lda
* The leading dimension of the array A.
* If trans = PlasmaNoTrans, lda >= max(1, n);
* if trans = PlasmaConjTrans, lda >= max(1, k).
*
* @param[in] beta
* The scalar beta.
*
* @param[in,out] C
* C is an ldc-by-n matrix.
* On exit, the uplo part of the matrix is overwritten
* by the uplo part of the updated matrix.
*
* @param[in] ldc
* The leading dimension of the array C. ldc >= max(1, n).
*
******************************************************************************/
__attribute__((weak))
void plasma_core_zherk(plasma_enum_t uplo, plasma_enum_t trans,
int n, int k,
double alpha, const plasma_complex64_t *A, int lda,
double beta, plasma_complex64_t *C, int ldc)
{
cblas_zherk(CblasColMajor,
(CBLAS_UPLO)uplo, (CBLAS_TRANSPOSE)trans,
n, k,
alpha, A, lda,
beta, C, ldc);
}
/******************************************************************************/
void plasma_core_omp_zherk(plasma_enum_t uplo, plasma_enum_t trans,
int n, int k,
double alpha, const plasma_complex64_t *A, int lda,
double beta, plasma_complex64_t *C, int ldc,
plasma_sequence_t *sequence, plasma_request_t *request)
{
int ak;
if (trans == PlasmaNoTrans)
ak = k;
else
ak = n;
#pragma omp task depend(in:A[0:lda*ak]) \
depend(inout:C[0:ldc*n])
{
if (sequence->status == PlasmaSuccess)
plasma_core_zherk(uplo, trans,
n, k,
alpha, A, lda,
beta, C, ldc);
}
}
|
bml_add_csr_typed.c | #include "../../macros.h"
#include "../../typed.h"
#include "../bml_add.h"
#include "../bml_allocate.h"
#include "../bml_parallel.h"
#include "../bml_types.h"
#include "bml_add_csr.h"
#include "bml_allocate_csr.h"
#include "bml_types_csr.h"
#include "bml_setters_csr.h"
#include "bml_threshold_csr.h"
#include "bml_scale_csr.h"
#include "bml_introspection_csr.h"
#include "../bml_logger.h"
#include <complex.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#endif
/** Matrix addition.
*
* \f$ A = \alpha A + \beta B \f$
*
* \ingroup add_group
*
* \param A Matrix A
* \param B Matrix B
* \param alpha Scalar factor multiplied by A
* \param beta Scalar factor multiplied by B
* \param threshold Threshold for matrix addition
*/
void TYPED_FUNC(
bml_add_csr) (
bml_matrix_csr_t * A,
bml_matrix_csr_t * B,
double alpha,
double beta,
double threshold)
{
int N = A->N_;
int tsize = bml_get_bandwidth_csr(A);
#pragma omp parallel default(none) \
shared(N, tsize, A, B) \
shared(alpha, beta, threshold)
{
/* create hash table */
csr_row_index_hash_t *table = csr_noinit_table(tsize);
#pragma omp for
for (int i = 0; i < N; i++)
{
int *acols = A->data_[i]->cols_;
REAL_T *avals = (REAL_T *) A->data_[i]->vals_;
const int annz = A->data_[i]->NNZ_;
for (int pos = 0; pos < annz; pos++)
{
avals[pos] *= alpha;
csr_table_insert(table, acols[pos]);
}
int *bcols = B->data_[i]->cols_;
REAL_T *bvals = (REAL_T *) B->data_[i]->vals_;
const int bnnz = B->data_[i]->NNZ_;
for (int pos = 0; pos < bnnz; pos++)
{
int *idx = (int *) csr_table_lookup(table, bcols[pos]);
REAL_T val = beta * bvals[pos];
if (idx)
{
avals[*idx] += val;
}
else
{
TYPED_FUNC(csr_set_row_element_new) (A->data_[i],
bcols[pos], &val);
}
}
//reset table
csr_reset_table(table);
}
// delete table
csr_deallocate_table(table);
}
/* apply thresholding */
TYPED_FUNC(bml_threshold_csr) (A, threshold);
}
/******** Not sure why this function is needed or why norms are being computed here -DOK******/
/** Matrix addition.
*
* \f$ A = \alpha A + \beta B \f$
*
* \ingroup add_group
*
* \param A Matrix A
* \param B Matrix B
* \param alpha Scalar factor multiplied by A
* \param beta Scalar factor multiplied by B
* \param threshold Threshold for matrix addition
*/
double TYPED_FUNC(
bml_add_norm_csr) (
bml_matrix_csr_t * A,
bml_matrix_csr_t * B,
double alpha,
double beta,
double threshold)
{
LOG_ERROR("bml_add_norm_csr: Not implemented");
return 0.;
}
/** Matrix addition.
*
* A = A + beta * I
*
* \ingroup add_group
*
* \param A Matrix A
* \param beta Scalar factor multiplied by I
* \param threshold Threshold for matrix addition
*/
void TYPED_FUNC(
bml_add_identity_csr) (
bml_matrix_csr_t * A,
double beta,
double threshold)
{
int N = A->N_;
#pragma omp parallel for \
shared(N)
for (int i = 0; i < N; i++)
{
int *acols = A->data_[i]->cols_;
REAL_T *avals = (REAL_T *) A->data_[i]->vals_;
const int annz = A->data_[i]->NNZ_;
int diag = -1;
// find position of diagonal entry
for (int pos = 0; pos < annz; pos++)
{
if (acols[pos] == i)
{
diag = pos;
break;
}
}
if (beta > (double) 0.0 || beta < (double) 0.0)
{
// if diagonal entry does not exist, insert, else add
REAL_T val = (REAL_T) beta;
if (diag == -1)
{
TYPED_FUNC(csr_set_row_element_new) (A->data_[i], i, &val);
}
else
{
avals[diag] += val;
}
}
}
/* apply thresholding */
TYPED_FUNC(bml_threshold_csr) (A, threshold);
}
/** Matrix addition.
*
* A = alpha * A + beta * I
*
* \ingroup add_group
*
* \param A Matrix A
* \param alpha Scalar factor multiplied by A
* \param beta Scalar factor multiplied by I
* \param threshold Threshold for matrix addition
*/
void TYPED_FUNC(
bml_scale_add_identity_csr) (
bml_matrix_csr_t * A,
double alpha,
double beta,
double threshold)
{
// scale then update diagonal
TYPED_FUNC(bml_scale_inplace_csr) (&alpha, A);
TYPED_FUNC(bml_add_identity_csr) (A, beta, threshold);
}
|
DRB028-privatemissing-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
tmp should be annotated as private to avoid race condition.
Data race pairs: tmp@65:5 vs. tmp@66:12
tmp@65:5 vs. tmp@65:5
*/
#include <stdlib.h>
#include <stdio.h>
int main(int argc, char* argv[])
{
int i;
int tmp;
int len=100;
int a[100];
#pragma omp parallel for simd
for (i=0;i<len;i++)
a[i]=i;
#pragma omp parallel for simd private(tmp)
for (i=0;i<len;i++)
{
tmp =a[i]+i;
a[i] = tmp;
}
printf("a[50]=%d\n", a[50]);
return 0;
}
|
task4.c | #include <math.h>
#include <string.h>
#include "timer.h"
#define NN 1024
#define NM 1024
float A[NN][NM];
float Anew[NN][NM];
int main(int argc, char** argv)
{
const int n = NN;
const int m = NM;
const int iter_max = 1000;
const double tol = 1.0e-6;
double error = 1.0;
memset(A, 0, n * m * sizeof(float));
memset(Anew, 0, n * m * sizeof(float));
for (int j = 0; j < n; j++)
{
A[j][0] = 1.0;
Anew[j][0] = 1.0;
}
printf("Jacobi relaxation Calculation: %d x %d mesh\n", n, m);
StartTimer();
int iter = 0;
#pragma acc data copy(A), create(Anew)
while ( error > tol && iter < iter_max )
{
error = 0.0;
#pragma omp parallel for shared(m, n, Anew, A)
#pragma acc kernels
for( int j = 1; j < n-1; j++)
{
#pragma acc loop gang(8), vector(32)
for( int i = 1; i < m-1; i++ )
{
Anew[j][i] = 0.25 * ( A[j][i+1] + A[j][i-1]
+ A[j-1][i] + A[j+1][i]);
error = fmax( error, fabs(Anew[j][i] - A[j][i]));
}
}
#pragma omp parallel for shared(m, n, Anew, A)
#pragma acc kernels
for( int j = 1; j < n-1; j++)
{
#pragma acc loop gang(8), vector(32)
for( int i = 1; i < m-1; i++ )
{
A[j][i] = Anew[j][i];
}
}
if(iter % 100 == 0) printf("%5d, %0.6f\n", iter, error);
iter++;
}
double runtime = GetTimer();
printf(" total: %f s\n", runtime / 1000);
}
|
top_k_op.h | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <algorithm>
#include <iostream>
#include <utility>
#include <vector>
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
template <typename T, int MajorType = Eigen::RowMajor,
typename IndexType = Eigen::DenseIndex>
using EigenMatrix = framework::EigenMatrix<T, MajorType, IndexType>;
template <typename DeviceContext, typename T>
class TopkKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
// Get the top k elements of each row of input tensor
auto* input = ctx.Input<Tensor>("X");
auto* output = ctx.Output<Tensor>("Out");
auto* indices = ctx.Output<Tensor>("Indices");
// k is determined by Attr
const size_t k = static_cast<int>(ctx.Attr<int>("k"));
T* output_data = output->mutable_data<T>(ctx.GetPlace());
int64_t* indices_data = indices->mutable_data<int64_t>(ctx.GetPlace());
// reshape input to a flattern matrix(like flat_inner_dims)
framework::DDim inputdims = input->dims();
const size_t row = framework::product(
framework::slice_ddim(inputdims, 0, inputdims.size() - 1));
const size_t col = inputdims[inputdims.size() - 1];
Eigen::DSizes<int, 2> flat2dims(row, col);
// NOTE: eigen shape doesn't affect paddle tensor.
auto eg_input = EigenMatrix<T>::Reshape(*input, inputdims.size() - 1);
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (size_t i = 0; i < row; i++) {
std::vector<std::pair<T, size_t>> vec;
vec.reserve(col);
for (size_t j = 0; j < col; j++) {
vec.push_back(std::pair<T, size_t>(eg_input(i, j), j));
}
std::partial_sort(
vec.begin(), vec.begin() + k, vec.end(),
[](const std::pair<T, size_t>& l, const std::pair<T, size_t>& r) {
return l.first > r.first;
});
for (size_t j = 0; j < k; j++) {
output_data[i * k + j] = vec[j].first;
indices_data[i * k + j] = int64_t(vec[j].second);
}
}
}
};
} // namespace operators
} // namespace paddle
|
mysql_netauth_fmt_plug.c | /* Cracker for MySQL network authentication hashes. Hacked together
* during May of 2012 by Dhiru Kholia <dhiru.kholia at gmail.com>.
*
* This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>,
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification,
* are permitted. */
#if FMT_EXTERNS_H
extern struct fmt_main fmt_mysqlna;
#elif FMT_REGISTERS_H
john_register_one(&fmt_mysqlna);
#else
#include "sha.h"
#include <string.h>
#include <assert.h>
#include <errno.h>
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#ifdef _OPENMP
static int omp_t = 1;
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 1024// tuned K8-dual HT
#endif
#endif
#include "memdbg.h"
#define FORMAT_LABEL "mysqlna"
#define FORMAT_NAME "MySQL Network Authentication"
#define FORMAT_TAG "$mysqlna$"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#define ALGORITHM_NAME "SHA1 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 32
#define HEX_LENGTH 40
#define CIPHERTEXT_LENGTH 90
#define BINARY_SIZE 20
#define BINARY_ALIGN MEM_ALIGN_WORD
#define SALT_SIZE sizeof(struct custom_salt)
#define SALT_ALIGN MEM_ALIGN_NONE
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
static struct fmt_tests mysqlna_tests[] = {
{"$mysqlna$2D52396369653E4626293B2F75244D3871507A39*7D63098BEE381A51AA6DF11E307E46BD4F8B6E0C", "openwall"},
{"$mysqlna$615c2b5e79656f7d4931594e5b5d416c7b483365*c3a70da2874db890eb2f0a5e3ea80b2ed17da0d0", "openwall"},
{"$mysqlna$295a687c59275452214b366b39776d3f31757b2e*7343f45c94cccd646a1b29bbfad064a9ee5c0380", "overlord magnum"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static uint32_t (*crypt_out)[BINARY_SIZE / sizeof(uint32_t)];
static struct custom_salt {
unsigned char scramble[20];
} *cur_salt;
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p, *q;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN))
return 0;
p = ciphertext + FORMAT_TAG_LEN;
q = strstr(ciphertext, "*");
if (!q)
return 0;
if (q - p != HEX_LENGTH)
return 0;
while (atoi16[ARCH_INDEX(*p)] != 0x7F && p < q)
p++;
if (q - p != 0)
return 0;
if (strlen(p) < HEX_LENGTH)
return 0;
q = p + 1;
while (atoi16[ARCH_INDEX(*q)] != 0x7F)
q++;
return !*q && q - p - 1 == HEX_LENGTH;
}
static char* split(char *ciphertext, int index, struct fmt_main *self)
{
static char out[CIPHERTEXT_LENGTH + 1];
strncpy(out, ciphertext, sizeof(out));
strlwr(out);
return out;
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
char *p;
int i;
static struct custom_salt cs;
memset(&cs, 0, sizeof(cs));
ctcopy += FORMAT_TAG_LEN; /* skip over "$mysqlna$" */
p = strtokm(ctcopy, "*");
for (i = 0; i < 20; i++)
cs.scramble[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
MEM_FREE(keeptr);
return (void *)&cs;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i;
p = strrchr(ciphertext, '*') + 1;
for (i = 0; i < BINARY_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
#define COMMON_GET_HASH_VAR crypt_out
#include "common-get-hash.h"
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
unsigned char stage1_hash[20];
unsigned char inner_hash[20];
unsigned char token[20];
SHA_CTX ctx;
int i;
unsigned char *p = (unsigned char*)crypt_out[index];
SHA1_Init(&ctx);
SHA1_Update(&ctx, saved_key[index], strlen(saved_key[index]));
SHA1_Final(stage1_hash, &ctx);
SHA1_Init(&ctx);
SHA1_Update(&ctx, stage1_hash, 20);
SHA1_Final(inner_hash, &ctx);
SHA1_Init(&ctx);
SHA1_Update(&ctx, cur_salt->scramble, 20);
SHA1_Update(&ctx, inner_hash, 20);
SHA1_Final(token, &ctx);
for (i = 0; i < 20; i++) {
p[i] = token[i] ^ stage1_hash[i];
}
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
#ifdef _OPENMP
for (; index < count; index++)
#endif
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void mysqlna_set_key(char *key, int index)
{
strnzcpy(saved_key[index], key, sizeof(*saved_key));
}
static char *get_key(int index)
{
return saved_key[index];
}
struct fmt_main fmt_mysqlna = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_SPLIT_UNIFIES_CASE,
{ NULL },
{ FORMAT_TAG },
mysqlna_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
set_salt,
mysqlna_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
#define COMMON_GET_HASH_LINK
#include "common-get-hash.h"
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
ams.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
#include "_hypre_parcsr_ls.h"
#include "float.h"
#include "ams.h"
/*--------------------------------------------------------------------------
* hypre_ParCSRRelax
*
* Relaxation on the ParCSR matrix A with right-hand side f and
* initial guess u. Possible values for relax_type are:
*
* 1 = l1-scaled (or weighted) Jacobi
* 2 = l1-scaled block Gauss-Seidel/SSOR
* 3 = Kaczmarz
* 4 = truncated version of 2 (Remark 6.2 in smoothers paper)
* x = BoomerAMG relaxation with relax_type = |x|
* (16 = Cheby)
*
* The default value of relax_type is 2.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_ParCSRRelax(/* matrix to relax with */
hypre_ParCSRMatrix *A,
/* right-hand side */
hypre_ParVector *f,
/* relaxation type */
HYPRE_Int relax_type,
/* number of sweeps */
HYPRE_Int relax_times,
/* l1 norms of the rows of A */
HYPRE_Real *l1_norms,
/* damping coefficient (usually <= 1) */
HYPRE_Real relax_weight,
/* SOR parameter (usually in (0,2) */
HYPRE_Real omega,
/* for cheby smoothers */
HYPRE_Real max_eig_est,
HYPRE_Real min_eig_est,
HYPRE_Int cheby_order,
HYPRE_Real cheby_fraction,
/* initial/updated approximation */
hypre_ParVector *u,
/* temporary vector */
hypre_ParVector *v,
/* temporary vector */
hypre_ParVector *z)
{
HYPRE_Int sweep;
HYPRE_Real *u_data = hypre_VectorData(hypre_ParVectorLocalVector(u));
HYPRE_Real *f_data = hypre_VectorData(hypre_ParVectorLocalVector(f));
HYPRE_Real *v_data = hypre_VectorData(hypre_ParVectorLocalVector(v));
for (sweep = 0; sweep < relax_times; sweep++)
{
if (relax_type == 1) /* l1-scaled Jacobi */
{
HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A);
#ifdef HYPRE_USING_UNIFIED_MEMORY
if (sweep == 0)
{
/* prefetch l1 norms */
hypre_TMemcpy(l1_norms, l1_norms, HYPRE_Real, num_rows,
HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_SHARED);
}
#endif
hypre_HandleCudaComputeStreamSyncPush(hypre_handle, 0);
hypre_ParVectorCopy(f, v);
hypre_ParCSRMatrixMatvec(-relax_weight, A, u, relax_weight, v);
#if defined(HYPRE_USING_CUDA)
hypreDevice_IVAXPY(num_rows, l1_norms, v_data, u_data);
#else /* #if defined(HYPRE_USING_CUDA) */
HYPRE_Int i;
/* u += w D^{-1}(f - A u), where D_ii = ||A(i,:)||_1 */
#if defined(HYPRE_USING_DEVICE_OPENMP)
#pragma omp target teams distribute parallel for private(i) is_device_ptr(u_data,v_data,l1_norms)
#endif
for (i = 0; i < num_rows; i++)
{
u_data[i] += v_data[i] / l1_norms[i];
}
#endif /* #if defined(HYPRE_USING_CUDA) */
hypre_HandleCudaComputeStreamSyncPop(hypre_handle);
hypre_SyncCudaComputeStream(hypre_handle);
}
else if (relax_type == 2 || relax_type == 4) /* offd-l1-scaled block GS */
{
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_I = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_J = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_I = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_J = hypre_CSRMatrixJ(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int i, j;
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Real *u_offd_data = hypre_TAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST);
HYPRE_Real res;
HYPRE_Int num_procs;
hypre_MPI_Comm_size(hypre_ParCSRMatrixComm(A), &num_procs);
/* Copy off-diagonal values of u to the current processor */
if (num_procs > 1)
{
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int num_sends;
HYPRE_Real *u_buf_data;
hypre_ParCSRCommHandle *comm_handle;
HYPRE_Int index = 0, start;
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
u_buf_data = hypre_TAlloc(HYPRE_Real,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg,i+1); j++)
u_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate(1,comm_pkg,u_buf_data,u_offd_data);
hypre_ParCSRCommHandleDestroy(comm_handle);
hypre_TFree(u_buf_data, HYPRE_MEMORY_HOST);
}
if (relax_weight == 1.0 && omega == 1.0) /* symmetric Gauss-Seidel */
{
/* Forward local pass */
for (i = 0; i < num_rows; i++)
{
res = f_data[i];
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
res -= A_diag_data[j] * u_data[A_diag_J[j]];
if (num_cols_offd)
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
res -= A_offd_data[j] * u_offd_data[A_offd_J[j]];
u_data[i] += res / l1_norms[i];
}
/* Backward local pass */
for (i = num_rows-1; i > -1; i--)
{
res = f_data[i];
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
res -= A_diag_data[j] * u_data[A_diag_J[j]];
if (num_cols_offd)
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
res -= A_offd_data[j] * u_offd_data[A_offd_J[j]];
u_data[i] += res / l1_norms[i];
}
}
else if (relax_weight == 1.0) /* SSOR */
{
/* Forward local pass */
for (i = 0; i < num_rows; i++)
{
res = f_data[i];
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
res -= A_diag_data[j] * u_data[A_diag_J[j]];
if (num_cols_offd)
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
res -= A_offd_data[j] * u_offd_data[A_offd_J[j]];
u_data[i] += omega * res / l1_norms[i];
}
/* Backward local pass */
for (i = num_rows-1; i > -1; i--)
{
res = f_data[i];
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
res -= A_diag_data[j] * u_data[A_diag_J[j]];
if (num_cols_offd)
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
res -= A_offd_data[j] * u_offd_data[A_offd_J[j]];
u_data[i] += omega * res / l1_norms[i];
}
}
else /* scaled SSOR */
{
HYPRE_Real dif;
HYPRE_Real c1 = omega * relax_weight;
HYPRE_Real c2 = omega * (1.0 - relax_weight);
/* Forward local pass (save initial guess in v_data) */
for (i = 0; i < num_rows; i++)
{
dif = 0.0;
v_data[i] = u_data[i];
res = f_data[i];
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
{
res -= A_diag_data[j] * u_data[A_diag_J[j]];
if (A_diag_J[j] < i)
dif += A_diag_data[j] * (v_data[A_diag_J[j]] - u_data[A_diag_J[j]]);
}
if (num_cols_offd)
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
res -= A_offd_data[j] * u_offd_data[A_offd_J[j]];
u_data[i] += (c1 * res + c2 * dif) / l1_norms[i];
}
/* Backward local pass */
for (i = num_rows-1; i > -1; i--)
{
dif = 0.0;
res = f_data[i];
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
{
res -= A_diag_data[j] * u_data[A_diag_J[j]];
if (A_diag_J[j] > i)
dif += A_diag_data[j] * (v_data[A_diag_J[j]] - u_data[A_diag_J[j]]);
}
if (num_cols_offd)
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
res -= A_offd_data[j] * u_offd_data[A_offd_J[j]];
u_data[i] += (c1 * res + c2 * dif) / l1_norms[i];
}
}
hypre_TFree(u_offd_data, HYPRE_MEMORY_HOST);
}
else if (relax_type == 3) /* Kaczmarz */
{
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_I = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_J = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_I = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_J = hypre_CSRMatrixJ(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int i, j;
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Real *u_offd_data = hypre_TAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST);
HYPRE_Real res;
HYPRE_Int num_procs;
hypre_MPI_Comm_size(hypre_ParCSRMatrixComm(A), &num_procs);
/* Copy off-diagonal values of u to the current processor */
if (num_procs > 1)
{
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int num_sends;
HYPRE_Real *u_buf_data;
hypre_ParCSRCommHandle *comm_handle;
HYPRE_Int index = 0, start;
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
u_buf_data = hypre_TAlloc(HYPRE_Real,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg,i+1); j++)
u_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate(1,comm_pkg,u_buf_data,u_offd_data);
hypre_ParCSRCommHandleDestroy(comm_handle);
hypre_TFree(u_buf_data, HYPRE_MEMORY_HOST);
}
/* Forward local pass */
for (i = 0; i < num_rows; i++)
{
res = f_data[i];
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
res -= A_diag_data[j] * u_data[A_diag_J[j]];
if (num_cols_offd)
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
res -= A_offd_data[j] * u_offd_data[A_offd_J[j]];
res /= l1_norms[i];
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
u_data[A_diag_J[j]] += omega * res * A_diag_data[j];
}
/* Backward local pass */
for (i = num_rows-1; i > -1; i--)
{
res = f_data[i];
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
res -= A_diag_data[j] * u_data[A_diag_J[j]];
if (num_cols_offd)
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
res -= A_offd_data[j] * u_offd_data[A_offd_J[j]];
res /= l1_norms[i];
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
u_data[A_diag_J[j]] += omega * res * A_diag_data[j];
}
hypre_TFree(u_offd_data, HYPRE_MEMORY_HOST);
}
else /* call BoomerAMG relaxation */
{
if (relax_type == 16)
{
hypre_ParCSRRelax_Cheby(A,
f,
max_eig_est,
min_eig_est,
cheby_fraction, cheby_order, 1,
0, u, v, z);
}
else
hypre_BoomerAMGRelax(A, f, NULL, hypre_abs(relax_type), 0, relax_weight,
omega, l1_norms, u, v, z);
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParVectorInRangeOf
*
* Return a vector that belongs to the range of a given matrix.
*--------------------------------------------------------------------------*/
hypre_ParVector *hypre_ParVectorInRangeOf(hypre_ParCSRMatrix *A)
{
hypre_ParVector *x;
x = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize(x);
hypre_ParVectorOwnsData(x) = 1;
hypre_ParVectorOwnsPartitioning(x) = 0;
return x;
}
/*--------------------------------------------------------------------------
* hypre_ParVectorInDomainOf
*
* Return a vector that belongs to the domain of a given matrix.
*--------------------------------------------------------------------------*/
hypre_ParVector *hypre_ParVectorInDomainOf(hypre_ParCSRMatrix *A)
{
hypre_ParVector *x;
x = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumCols(A),
hypre_ParCSRMatrixColStarts(A));
hypre_ParVectorInitialize(x);
hypre_ParVectorOwnsData(x) = 1;
hypre_ParVectorOwnsPartitioning(x) = 0;
return x;
}
/*--------------------------------------------------------------------------
* hypre_ParVectorBlockSplit
*
* Extract the dim sub-vectors x_0,...,x_{dim-1} composing a parallel
* block vector x. It is assumed that &x[i] = [x_0[i],...,x_{dim-1}[i]].
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_ParVectorBlockSplit(hypre_ParVector *x,
hypre_ParVector *x_[3],
HYPRE_Int dim)
{
HYPRE_Int i, d, size_;
HYPRE_Real *x_data, *x_data_[3];
size_ = hypre_VectorSize(hypre_ParVectorLocalVector(x_[0]));
x_data = hypre_VectorData(hypre_ParVectorLocalVector(x));
for (d = 0; d < dim; d++)
x_data_[d] = hypre_VectorData(hypre_ParVectorLocalVector(x_[d]));
for (i = 0; i < size_; i++)
for (d = 0; d < dim; d++)
x_data_[d][i] = x_data[dim*i+d];
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParVectorBlockGather
*
* Compose a parallel block vector x from dim given sub-vectors
* x_0,...,x_{dim-1}, such that &x[i] = [x_0[i],...,x_{dim-1}[i]].
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_ParVectorBlockGather(hypre_ParVector *x,
hypre_ParVector *x_[3],
HYPRE_Int dim)
{
HYPRE_Int i, d, size_;
HYPRE_Real *x_data, *x_data_[3];
size_ = hypre_VectorSize(hypre_ParVectorLocalVector(x_[0]));
x_data = hypre_VectorData(hypre_ParVectorLocalVector(x));
for (d = 0; d < dim; d++)
x_data_[d] = hypre_VectorData(hypre_ParVectorLocalVector(x_[d]));
for (i = 0; i < size_; i++)
for (d = 0; d < dim; d++)
x_data[dim*i+d] = x_data_[d][i];
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_BoomerAMGBlockSolve
*
* Apply the block-diagonal solver diag(B) to the system diag(A) x = b.
* Here B is a given BoomerAMG solver for A, while x and b are "block"
* parallel vectors.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_BoomerAMGBlockSolve(void *B,
hypre_ParCSRMatrix *A,
hypre_ParVector *b,
hypre_ParVector *x)
{
HYPRE_Int d, dim = 1;
hypre_ParVector *b_[3];
hypre_ParVector *x_[3];
dim = hypre_ParVectorGlobalSize(x) / hypre_ParCSRMatrixGlobalNumRows(A);
if (dim == 1)
{
hypre_BoomerAMGSolve(B, A, b, x);
return hypre_error_flag;
}
for (d = 0; d < dim; d++)
{
b_[d] = hypre_ParVectorInRangeOf(A);
x_[d] = hypre_ParVectorInRangeOf(A);
}
hypre_ParVectorBlockSplit(b, b_, dim);
hypre_ParVectorBlockSplit(x, x_, dim);
for (d = 0; d < dim; d++)
hypre_BoomerAMGSolve(B, A, b_[d], x_[d]);
hypre_ParVectorBlockGather(x, x_, dim);
for (d = 0; d < dim; d++)
{
hypre_ParVectorDestroy(b_[d]);
hypre_ParVectorDestroy(x_[d]);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixFixZeroRows
*
* For every zero row in the matrix: set the diagonal element to 1.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_ParCSRMatrixFixZeroRows(hypre_ParCSRMatrix *A)
{
HYPRE_Int i, j;
HYPRE_Real l1_norm;
HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Int *A_diag_I = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_J = hypre_CSRMatrixJ(A_diag);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_I = hypre_CSRMatrixI(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
/* a row will be considered zero if its l1 norm is less than eps */
HYPRE_Real eps = 0.0; /* DBL_EPSILON * 1e+4; */
for (i = 0; i < num_rows; i++)
{
l1_norm = 0.0;
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
l1_norm += fabs(A_diag_data[j]);
if (num_cols_offd)
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
l1_norm += fabs(A_offd_data[j]);
if (l1_norm <= eps)
{
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
if (A_diag_J[j] == i)
A_diag_data[j] = 1.0;
else
A_diag_data[j] = 0.0;
if (num_cols_offd)
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
A_offd_data[j] = 0.0;
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRComputeL1Norms
*
* Compute the l1 norms of the rows of a given matrix, depending on
* the option parameter:
*
* option 1 = Compute the l1 norm of the rows
* option 2 = Compute the l1 norm of the (processor) off-diagonal
* part of the rows plus the diagonal of A
* option 3 = Compute the l2 norm^2 of the rows
* option 4 = Truncated version of option 2 based on Remark 6.2 in "Multigrid
* Smoothers for Ultra-Parallel Computing"
*
* The above computations are done in a CF manner, whenever the provided
* cf_marker is not NULL.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_ParCSRComputeL1Norms(hypre_ParCSRMatrix *A,
HYPRE_Int option,
HYPRE_Int *cf_marker,
HYPRE_Real **l1_norm_ptr)
{
HYPRE_Int i, j;
HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Int *A_diag_I = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_J = hypre_CSRMatrixJ(A_diag);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_I = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_J = hypre_CSRMatrixJ(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Real diag;
HYPRE_Real *l1_norm = hypre_TAlloc(HYPRE_Real, num_rows, HYPRE_MEMORY_SHARED);
HYPRE_Int *cf_marker_offd = NULL;
HYPRE_Int cf_diag;
/* collect the cf marker data from other procs */
if (cf_marker != NULL)
{
HYPRE_Int index;
HYPRE_Int num_sends;
HYPRE_Int start;
HYPRE_Int *int_buf_data = NULL;
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
if (num_cols_offd)
cf_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST);
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
if (hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends))
int_buf_data = hypre_CTAlloc(HYPRE_Int,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
{
int_buf_data[index++] = cf_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
}
comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data,
cf_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
}
if (option == 1)
{
for (i = 0; i < num_rows; i++)
{
l1_norm[i] = 0.0;
if (cf_marker == NULL)
{
/* Add the l1 norm of the diag part of the ith row */
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
l1_norm[i] += fabs(A_diag_data[j]);
/* Add the l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
l1_norm[i] += fabs(A_offd_data[j]);
}
}
else
{
cf_diag = cf_marker[i];
/* Add the CF l1 norm of the diag part of the ith row */
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
if (cf_diag == cf_marker[A_diag_J[j]])
l1_norm[i] += fabs(A_diag_data[j]);
/* Add the CF l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
if (cf_diag == cf_marker_offd[A_offd_J[j]])
l1_norm[i] += fabs(A_offd_data[j]);
}
}
}
}
else if (option == 2)
{
for (i = 0; i < num_rows; i++)
{
/* Add the diag element of the ith row */
l1_norm[i] = fabs(A_diag_data[A_diag_I[i]]);
if (cf_marker == NULL)
{
/* Add the l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
l1_norm[i] += fabs(A_offd_data[j]);
}
}
else
{
cf_diag = cf_marker[i];
/* Add the CF l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
if (cf_diag == cf_marker_offd[A_offd_J[j]])
l1_norm[i] += fabs(A_offd_data[j]);
}
}
}
}
else if (option == 3)
{
for (i = 0; i < num_rows; i++)
{
l1_norm[i] = 0.0;
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
l1_norm[i] += A_diag_data[j] * A_diag_data[j];
if (num_cols_offd)
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
l1_norm[i] += A_offd_data[j] * A_offd_data[j];
}
}
else if (option == 4)
{
for (i = 0; i < num_rows; i++)
{
/* Add the diag element of the ith row */
diag = l1_norm[i] = fabs(A_diag_data[A_diag_I[i]]);
if (cf_marker == NULL)
{
/* Add the scaled l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
l1_norm[i] += 0.5*fabs(A_offd_data[j]);
}
}
else
{
cf_diag = cf_marker[i];
/* Add the scaled CF l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
if (cf_diag == cf_marker_offd[A_offd_J[j]])
l1_norm[i] += 0.5*fabs(A_offd_data[j]);
}
}
/* Truncate according to Remark 6.2 */
if (l1_norm[i] <= 4.0/3.0*diag)
l1_norm[i] = diag;
}
}
else if (option == 5) /*stores diagonal of A for Jacobi using matvec, rlx 7 */
{
for (i = 0; i < num_rows; i++)
{
diag = A_diag_data[A_diag_I[i]];
if (diag != 0.0) l1_norm[i] = diag;
else l1_norm[i] = 1.0;
}
*l1_norm_ptr = l1_norm;
return hypre_error_flag;
}
/* Handle negative definite matrices */
for (i = 0; i < num_rows; i++)
if (A_diag_data[A_diag_I[i]] < 0)
l1_norm[i] = -l1_norm[i];
for (i = 0; i < num_rows; i++)
/* if (fabs(l1_norm[i]) < DBL_EPSILON) */
if (fabs(l1_norm[i]) == 0.0)
{
hypre_error_in_arg(1);
break;
}
//for (i = 0; i < num_rows; i++) l1_norm[i]=1.0/l1_norm[i];
hypre_TFree(cf_marker_offd, HYPRE_MEMORY_HOST);
*l1_norm_ptr = l1_norm;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixSetDiagRows
*
* For every row containing only a diagonal element: set it to d.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_ParCSRMatrixSetDiagRows(hypre_ParCSRMatrix *A, HYPRE_Real d)
{
HYPRE_Int i, j;
HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Int *A_diag_I = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_J = hypre_CSRMatrixJ(A_diag);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_I = hypre_CSRMatrixI(A_offd);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
for (i = 0; i < num_rows; i++)
{
j = A_diag_I[i];
if ((A_diag_I[i+1] == j+1) && (A_diag_J[j] == i) &&
(!num_cols_offd || (A_offd_I[i+1] == A_offd_I[i])))
{
A_diag_data[j] = d;
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSCreate
*
* Allocate the AMS solver structure.
*--------------------------------------------------------------------------*/
void * hypre_AMSCreate()
{
hypre_AMSData *ams_data;
ams_data = hypre_CTAlloc(hypre_AMSData, 1, HYPRE_MEMORY_HOST);
/* Default parameters */
ams_data -> dim = 3; /* 3D problem */
ams_data -> maxit = 20; /* perform at most 20 iterations */
ams_data -> tol = 1e-6; /* convergence tolerance */
ams_data -> print_level = 1; /* print residual norm at each step */
ams_data -> cycle_type = 1; /* a 3-level multiplicative solver */
ams_data -> A_relax_type = 2; /* offd-l1-scaled GS */
ams_data -> A_relax_times = 1; /* one relaxation sweep */
ams_data -> A_relax_weight = 1.0; /* damping parameter */
ams_data -> A_omega = 1.0; /* SSOR coefficient */
ams_data -> A_cheby_order = 2; /* Cheby: order (1 -4 are vaild) */
ams_data -> A_cheby_fraction = .3; /* Cheby: fraction of spectrum to smooth */
ams_data -> B_G_coarsen_type = 10; /* HMIS coarsening */
ams_data -> B_G_agg_levels = 1; /* Levels of aggressive coarsening */
ams_data -> B_G_relax_type = 3; /* hybrid G-S/Jacobi */
ams_data -> B_G_theta = 0.25; /* strength threshold */
ams_data -> B_G_interp_type = 0; /* interpolation type */
ams_data -> B_G_Pmax = 0; /* max nonzero elements in interp. rows */
ams_data -> B_Pi_coarsen_type = 10; /* HMIS coarsening */
ams_data -> B_Pi_agg_levels = 1; /* Levels of aggressive coarsening */
ams_data -> B_Pi_relax_type = 3; /* hybrid G-S/Jacobi */
ams_data -> B_Pi_theta = 0.25; /* strength threshold */
ams_data -> B_Pi_interp_type = 0; /* interpolation type */
ams_data -> B_Pi_Pmax = 0; /* max nonzero elements in interp. rows */
ams_data -> beta_is_zero = 0; /* the problem has a mass term */
/* By default, do l1-GS smoothing on the coarsest grid */
ams_data -> B_G_coarse_relax_type = 8;
ams_data -> B_Pi_coarse_relax_type = 8;
/* The rest of the fields are initialized using the Set functions */
ams_data -> A = NULL;
ams_data -> G = NULL;
ams_data -> A_G = NULL;
ams_data -> B_G = 0;
ams_data -> Pi = NULL;
ams_data -> A_Pi = NULL;
ams_data -> B_Pi = 0;
ams_data -> x = NULL;
ams_data -> y = NULL;
ams_data -> z = NULL;
ams_data -> Gx = NULL;
ams_data -> Gy = NULL;
ams_data -> Gz = NULL;
ams_data -> r0 = NULL;
ams_data -> g0 = NULL;
ams_data -> r1 = NULL;
ams_data -> g1 = NULL;
ams_data -> r2 = NULL;
ams_data -> g2 = NULL;
ams_data -> Pix = NULL;
ams_data -> Piy = NULL;
ams_data -> Piz = NULL;
ams_data -> A_Pix = NULL;
ams_data -> A_Piy = NULL;
ams_data -> A_Piz = NULL;
ams_data -> B_Pix = 0;
ams_data -> B_Piy = 0;
ams_data -> B_Piz = 0;
ams_data -> interior_nodes = NULL;
ams_data -> G0 = NULL;
ams_data -> A_G0 = NULL;
ams_data -> B_G0 = 0;
ams_data -> projection_frequency = 5;
ams_data -> A_l1_norms = NULL;
ams_data -> A_max_eig_est = 0;
ams_data -> A_min_eig_est = 0;
ams_data -> owns_Pi = 1;
ams_data -> owns_A_G = 0;
ams_data -> owns_A_Pi = 0;
return (void *) ams_data;
}
/*--------------------------------------------------------------------------
* hypre_AMSDestroy
*
* Deallocate the AMS solver structure. Note that the input data (given
* through the Set functions) is not destroyed.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSDestroy(void *solver)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
if (!ams_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (ams_data -> owns_A_G)
if (ams_data -> A_G)
hypre_ParCSRMatrixDestroy(ams_data -> A_G);
if (!ams_data -> beta_is_zero)
if (ams_data -> B_G)
HYPRE_BoomerAMGDestroy(ams_data -> B_G);
if (ams_data -> owns_Pi && ams_data -> Pi)
hypre_ParCSRMatrixDestroy(ams_data -> Pi);
if (ams_data -> owns_A_Pi)
if (ams_data -> A_Pi)
hypre_ParCSRMatrixDestroy(ams_data -> A_Pi);
if (ams_data -> B_Pi)
HYPRE_BoomerAMGDestroy(ams_data -> B_Pi);
if (ams_data -> owns_Pi && ams_data -> Pix)
hypre_ParCSRMatrixDestroy(ams_data -> Pix);
if (ams_data -> A_Pix)
hypre_ParCSRMatrixDestroy(ams_data -> A_Pix);
if (ams_data -> B_Pix)
HYPRE_BoomerAMGDestroy(ams_data -> B_Pix);
if (ams_data -> owns_Pi && ams_data -> Piy)
hypre_ParCSRMatrixDestroy(ams_data -> Piy);
if (ams_data -> A_Piy)
hypre_ParCSRMatrixDestroy(ams_data -> A_Piy);
if (ams_data -> B_Piy)
HYPRE_BoomerAMGDestroy(ams_data -> B_Piy);
if (ams_data -> owns_Pi && ams_data -> Piz)
hypre_ParCSRMatrixDestroy(ams_data -> Piz);
if (ams_data -> A_Piz)
hypre_ParCSRMatrixDestroy(ams_data -> A_Piz);
if (ams_data -> B_Piz)
HYPRE_BoomerAMGDestroy(ams_data -> B_Piz);
if (ams_data -> r0)
hypre_ParVectorDestroy(ams_data -> r0);
if (ams_data -> g0)
hypre_ParVectorDestroy(ams_data -> g0);
if (ams_data -> r1)
hypre_ParVectorDestroy(ams_data -> r1);
if (ams_data -> g1)
hypre_ParVectorDestroy(ams_data -> g1);
if (ams_data -> r2)
hypre_ParVectorDestroy(ams_data -> r2);
if (ams_data -> g2)
hypre_ParVectorDestroy(ams_data -> g2);
if (ams_data -> G0)
hypre_ParCSRMatrixDestroy(ams_data -> A);
if (ams_data -> G0)
hypre_ParCSRMatrixDestroy(ams_data -> G0);
if (ams_data -> A_G0)
hypre_ParCSRMatrixDestroy(ams_data -> A_G0);
if (ams_data -> B_G0)
HYPRE_BoomerAMGDestroy(ams_data -> B_G0);
if (ams_data -> A_l1_norms)
hypre_TFree(ams_data -> A_l1_norms, HYPRE_MEMORY_SHARED);
/* G, x, y ,z, Gx, Gy and Gz are not destroyed */
if (ams_data)
hypre_TFree(ams_data, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetDimension
*
* Set problem dimension (2 or 3). By default we assume dim = 3.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetDimension(void *solver,
HYPRE_Int dim)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
if (dim != 2 && dim != 3)
hypre_error_in_arg(2);
ams_data -> dim = dim;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetDiscreteGradient
*
* Set the discrete gradient matrix G.
* This function should be called before hypre_AMSSetup()!
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetDiscreteGradient(void *solver,
hypre_ParCSRMatrix *G)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> G = G;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetCoordinateVectors
*
* Set the x, y and z coordinates of the vertices in the mesh.
*
* Either SetCoordinateVectors or SetEdgeConstantVectors should be
* called before hypre_AMSSetup()!
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetCoordinateVectors(void *solver,
hypre_ParVector *x,
hypre_ParVector *y,
hypre_ParVector *z)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> x = x;
ams_data -> y = y;
ams_data -> z = z;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetEdgeConstantVectors
*
* Set the vectors Gx, Gy and Gz which give the representations of
* the constant vector fields (1,0,0), (0,1,0) and (0,0,1) in the
* edge element basis.
*
* Either SetCoordinateVectors or SetEdgeConstantVectors should be
* called before hypre_AMSSetup()!
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetEdgeConstantVectors(void *solver,
hypre_ParVector *Gx,
hypre_ParVector *Gy,
hypre_ParVector *Gz)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> Gx = Gx;
ams_data -> Gy = Gy;
ams_data -> Gz = Gz;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetInterpolations
*
* Set the (components of) the Nedelec interpolation matrix Pi=[Pix,Piy,Piz].
*
* This function is generally intended to be used only for high-order Nedelec
* discretizations (in the lowest order case, Pi is constructed internally in
* AMS from the discreet gradient matrix and the coordinates of the vertices),
* though it can also be used in the lowest-order case or for other types of
* discretizations (e.g. ones based on the second family of Nedelec elements).
*
* By definition, Pi is the matrix representation of the linear operator that
* interpolates (high-order) vector nodal finite elements into the (high-order)
* Nedelec space. The component matrices are defined as Pix phi = Pi (phi,0,0)
* and similarly for Piy and Piz. Note that all these operators depend on the
* choice of the basis and degrees of freedom in the high-order spaces.
*
* The column numbering of Pi should be node-based, i.e. the x/y/z components of
* the first node (vertex or high-order dof) should be listed first, followed by
* the x/y/z components of the second node and so on (see the documentation of
* HYPRE_BoomerAMGSetDofFunc).
*
* If used, this function should be called before hypre_AMSSetup() and there is
* no need to provide the vertex coordinates. Furthermore, only one of the sets
* {Pi} and {Pix,Piy,Piz} needs to be specified (though it is OK to provide
* both). If Pix is NULL, then scalar Pi-based AMS cycles, i.e. those with
* cycle_type > 10, will be unavailable. Similarly, AMS cycles based on
* monolithic Pi (cycle_type < 10) require that Pi is not NULL.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetInterpolations(void *solver,
hypre_ParCSRMatrix *Pi,
hypre_ParCSRMatrix *Pix,
hypre_ParCSRMatrix *Piy,
hypre_ParCSRMatrix *Piz)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> Pi = Pi;
ams_data -> Pix = Pix;
ams_data -> Piy = Piy;
ams_data -> Piz = Piz;
ams_data -> owns_Pi = 0;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetAlphaPoissonMatrix
*
* Set the matrix corresponding to the Poisson problem with coefficient
* alpha (the curl-curl term coefficient in the Maxwell problem).
*
* If this function is called, the coarse space solver on the range
* of Pi^T is a block-diagonal version of A_Pi. If this function is not
* called, the coarse space solver on the range of Pi^T is constructed
* as Pi^T A Pi in hypre_AMSSetup().
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetAlphaPoissonMatrix(void *solver,
hypre_ParCSRMatrix *A_Pi)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> A_Pi = A_Pi;
/* Penalize the eliminated degrees of freedom */
hypre_ParCSRMatrixSetDiagRows(A_Pi, HYPRE_REAL_MAX);
/* Make sure that the first entry in each row is the diagonal one. */
/* hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(A_Pi)); */
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetBetaPoissonMatrix
*
* Set the matrix corresponding to the Poisson problem with coefficient
* beta (the mass term coefficient in the Maxwell problem).
*
* This function call is optional - if not given, the Poisson matrix will
* be computed in hypre_AMSSetup(). If the given matrix is NULL, we assume
* that beta is 0 and use two-level (instead of three-level) methods.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetBetaPoissonMatrix(void *solver,
hypre_ParCSRMatrix *A_G)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> A_G = A_G;
if (!A_G)
ams_data -> beta_is_zero = 1;
else
{
/* Penalize the eliminated degrees of freedom */
hypre_ParCSRMatrixSetDiagRows(A_G, HYPRE_REAL_MAX);
/* Make sure that the first entry in each row is the diagonal one. */
/* hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(A_G)); */
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetInteriorNodes
*
* Set the list of nodes which are interior to the zero-conductivity region.
* A node is interior if interior_nodes[i] == 1.0.
*
* Should be called before hypre_AMSSetup()!
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetInteriorNodes(void *solver,
hypre_ParVector *interior_nodes)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> interior_nodes = interior_nodes;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetProjectionFrequency
*
* How often to project the r.h.s. onto the compatible sub-space Ker(G0^T),
* when iterating with the solver.
*
* The default value is every 5th iteration.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetProjectionFrequency(void *solver,
HYPRE_Int projection_frequency)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> projection_frequency = projection_frequency;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetMaxIter
*
* Set the maximum number of iterations in the three-level method.
* The default value is 20. To use the AMS solver as a preconditioner,
* set maxit to 1, tol to 0.0 and print_level to 0.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetMaxIter(void *solver,
HYPRE_Int maxit)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> maxit = maxit;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetTol
*
* Set the convergence tolerance (if the method is used as a solver).
* The default value is 1e-6.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetTol(void *solver,
HYPRE_Real tol)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> tol = tol;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetCycleType
*
* Choose which three-level solver to use. Possible values are:
*
* 1 = 3-level multipl. solver (01210) <-- small solution time
* 2 = 3-level additive solver (0+1+2)
* 3 = 3-level multipl. solver (02120)
* 4 = 3-level additive solver (010+2)
* 5 = 3-level multipl. solver (0102010) <-- small solution time
* 6 = 3-level additive solver (1+020)
* 7 = 3-level multipl. solver (0201020) <-- small number of iterations
* 8 = 3-level additive solver (0(1+2)0) <-- small solution time
* 9 = 3-level multipl. solver (01210) with discrete divergence
* 11 = 5-level multipl. solver (013454310) <-- small solution time, memory
* 12 = 5-level additive solver (0+1+3+4+5)
* 13 = 5-level multipl. solver (034515430) <-- small solution time, memory
* 14 = 5-level additive solver (01(3+4+5)10)
* 20 = 2-level multipl. solver (0[12]0)
*
* 0 = a Hiptmair-like smoother (010)
*
* The default value is 1.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetCycleType(void *solver,
HYPRE_Int cycle_type)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> cycle_type = cycle_type;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetPrintLevel
*
* Control how much information is printed during the solution iterations.
* The defaut values is 1 (print residual norm at each step).
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetPrintLevel(void *solver,
HYPRE_Int print_level)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> print_level = print_level;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetSmoothingOptions
*
* Set relaxation parameters for A. Default values: 2, 1, 1.0, 1.0.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetSmoothingOptions(void *solver,
HYPRE_Int A_relax_type,
HYPRE_Int A_relax_times,
HYPRE_Real A_relax_weight,
HYPRE_Real A_omega)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> A_relax_type = A_relax_type;
ams_data -> A_relax_times = A_relax_times;
ams_data -> A_relax_weight = A_relax_weight;
ams_data -> A_omega = A_omega;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetChebySmoothingOptions
* AB: note: this could be added to the above,
* but I didn't want to change parameter list)
* Set parameters for chebyshev smoother for A. Default values: 2,.3.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetChebySmoothingOptions(void *solver,
HYPRE_Int A_cheby_order,
HYPRE_Int A_cheby_fraction)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> A_cheby_order = A_cheby_order;
ams_data -> A_cheby_fraction = A_cheby_fraction;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetAlphaAMGOptions
*
* Set AMG parameters for B_Pi. Default values: 10, 1, 3, 0.25, 0, 0.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetAlphaAMGOptions(void *solver,
HYPRE_Int B_Pi_coarsen_type,
HYPRE_Int B_Pi_agg_levels,
HYPRE_Int B_Pi_relax_type,
HYPRE_Real B_Pi_theta,
HYPRE_Int B_Pi_interp_type,
HYPRE_Int B_Pi_Pmax)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> B_Pi_coarsen_type = B_Pi_coarsen_type;
ams_data -> B_Pi_agg_levels = B_Pi_agg_levels;
ams_data -> B_Pi_relax_type = B_Pi_relax_type;
ams_data -> B_Pi_theta = B_Pi_theta;
ams_data -> B_Pi_interp_type = B_Pi_interp_type;
ams_data -> B_Pi_Pmax = B_Pi_Pmax;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetAlphaAMGCoarseRelaxType
*
* Set the AMG coarsest level relaxation for B_Pi. Default value: 8.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetAlphaAMGCoarseRelaxType(void *solver,
HYPRE_Int B_Pi_coarse_relax_type)
{
hypre_AMSData *ams_data = (hypre_AMSData *)solver;
ams_data -> B_Pi_coarse_relax_type = B_Pi_coarse_relax_type;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetBetaAMGOptions
*
* Set AMG parameters for B_G. Default values: 10, 1, 3, 0.25, 0, 0.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetBetaAMGOptions(void *solver,
HYPRE_Int B_G_coarsen_type,
HYPRE_Int B_G_agg_levels,
HYPRE_Int B_G_relax_type,
HYPRE_Real B_G_theta,
HYPRE_Int B_G_interp_type,
HYPRE_Int B_G_Pmax)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> B_G_coarsen_type = B_G_coarsen_type;
ams_data -> B_G_agg_levels = B_G_agg_levels;
ams_data -> B_G_relax_type = B_G_relax_type;
ams_data -> B_G_theta = B_G_theta;
ams_data -> B_G_interp_type = B_G_interp_type;
ams_data -> B_G_Pmax = B_G_Pmax;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetBetaAMGCoarseRelaxType
*
* Set the AMG coarsest level relaxation for B_G. Default value: 8.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetBetaAMGCoarseRelaxType(void *solver,
HYPRE_Int B_G_coarse_relax_type)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> B_G_coarse_relax_type = B_G_coarse_relax_type;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSComputePi
*
* Construct the Pi interpolation matrix, which maps the space of vector
* linear finite elements to the space of edge finite elements.
*
* The construction is based on the fact that Pi = [Pi_x, Pi_y, Pi_z],
* where each block has the same sparsity structure as G, and the entries
* can be computed from the vectors Gx, Gy, Gz.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSComputePi(hypre_ParCSRMatrix *A,
hypre_ParCSRMatrix *G,
hypre_ParVector *Gx,
hypre_ParVector *Gy,
hypre_ParVector *Gz,
HYPRE_Int dim,
hypre_ParCSRMatrix **Pi_ptr)
{
hypre_ParCSRMatrix *Pi;
/* Compute Pi = [Pi_x, Pi_y, Pi_z] */
{
HYPRE_Int i, j, d;
HYPRE_Real *Gx_data, *Gy_data, *Gz_data;
MPI_Comm comm = hypre_ParCSRMatrixComm(G);
HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(G);
HYPRE_BigInt global_num_cols = dim*hypre_ParCSRMatrixGlobalNumCols(G);
HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(G);
HYPRE_BigInt *col_starts;
HYPRE_Int col_starts_size;
HYPRE_Int num_cols_offd = dim*hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(G));
HYPRE_Int num_nonzeros_diag = dim*hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(G));
HYPRE_Int num_nonzeros_offd = dim*hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(G));
HYPRE_BigInt *col_starts_G = hypre_ParCSRMatrixColStarts(G);
#ifdef HYPRE_NO_GLOBAL_PARTITION
col_starts_size = 2;
#else
HYPRE_Int num_procs;
hypre_MPI_Comm_size(comm, &num_procs);
col_starts_size = num_procs+1;
#endif
col_starts = hypre_TAlloc(HYPRE_BigInt, col_starts_size, HYPRE_MEMORY_HOST);
for (i = 0; i < col_starts_size; i++)
col_starts[i] = (HYPRE_BigInt)dim * col_starts_G[i];
Pi = hypre_ParCSRMatrixCreate(comm,
global_num_rows,
global_num_cols,
row_starts,
col_starts,
num_cols_offd,
num_nonzeros_diag,
num_nonzeros_offd);
hypre_ParCSRMatrixOwnsData(Pi) = 1;
hypre_ParCSRMatrixOwnsRowStarts(Pi) = 0;
hypre_ParCSRMatrixOwnsColStarts(Pi) = 1;
hypre_ParCSRMatrixInitialize(Pi);
Gx_data = hypre_VectorData(hypre_ParVectorLocalVector(Gx));
Gy_data = hypre_VectorData(hypre_ParVectorLocalVector(Gy));
if (dim == 3)
Gz_data = hypre_VectorData(hypre_ParVectorLocalVector(Gz));
/* Fill-in the diagonal part */
{
hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G);
HYPRE_Int *G_diag_I = hypre_CSRMatrixI(G_diag);
HYPRE_Int *G_diag_J = hypre_CSRMatrixJ(G_diag);
HYPRE_Real *G_diag_data = hypre_CSRMatrixData(G_diag);
HYPRE_Int G_diag_nrows = hypre_CSRMatrixNumRows(G_diag);
HYPRE_Int G_diag_nnz = hypre_CSRMatrixNumNonzeros(G_diag);
hypre_CSRMatrix *Pi_diag = hypre_ParCSRMatrixDiag(Pi);
HYPRE_Int *Pi_diag_I = hypre_CSRMatrixI(Pi_diag);
HYPRE_Int *Pi_diag_J = hypre_CSRMatrixJ(Pi_diag);
HYPRE_Real *Pi_diag_data = hypre_CSRMatrixData(Pi_diag);
for (i = 0; i < G_diag_nrows+1; i++)
Pi_diag_I[i] = dim * G_diag_I[i];
for (i = 0; i < G_diag_nnz; i++)
for (d = 0; d < dim; d++)
Pi_diag_J[dim*i+d] = dim*G_diag_J[i]+d;
for (i = 0; i < G_diag_nrows; i++)
for (j = G_diag_I[i]; j < G_diag_I[i+1]; j++)
{
*Pi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gx_data[i];
*Pi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gy_data[i];
if (dim == 3)
*Pi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gz_data[i];
}
}
/* Fill-in the off-diagonal part */
{
hypre_CSRMatrix *G_offd = hypre_ParCSRMatrixOffd(G);
HYPRE_Int *G_offd_I = hypre_CSRMatrixI(G_offd);
HYPRE_Int *G_offd_J = hypre_CSRMatrixJ(G_offd);
HYPRE_Real *G_offd_data = hypre_CSRMatrixData(G_offd);
HYPRE_Int G_offd_nrows = hypre_CSRMatrixNumRows(G_offd);
HYPRE_Int G_offd_ncols = hypre_CSRMatrixNumCols(G_offd);
HYPRE_Int G_offd_nnz = hypre_CSRMatrixNumNonzeros(G_offd);
hypre_CSRMatrix *Pi_offd = hypre_ParCSRMatrixOffd(Pi);
HYPRE_Int *Pi_offd_I = hypre_CSRMatrixI(Pi_offd);
HYPRE_Int *Pi_offd_J = hypre_CSRMatrixJ(Pi_offd);
HYPRE_Real *Pi_offd_data = hypre_CSRMatrixData(Pi_offd);
HYPRE_BigInt *G_cmap = hypre_ParCSRMatrixColMapOffd(G);
HYPRE_BigInt *Pi_cmap = hypre_ParCSRMatrixColMapOffd(Pi);
if (G_offd_ncols)
for (i = 0; i < G_offd_nrows+1; i++)
Pi_offd_I[i] = dim * G_offd_I[i];
for (i = 0; i < G_offd_nnz; i++)
for (d = 0; d < dim; d++)
Pi_offd_J[dim*i+d] = dim*G_offd_J[i]+d;
for (i = 0; i < G_offd_nrows; i++)
for (j = G_offd_I[i]; j < G_offd_I[i+1]; j++)
{
*Pi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gx_data[i];
*Pi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gy_data[i];
if (dim == 3)
*Pi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gz_data[i];
}
for (i = 0; i < G_offd_ncols; i++)
for (d = 0; d < dim; d++)
Pi_cmap[dim*i+d] = (HYPRE_BigInt)dim*G_cmap[i]+(HYPRE_BigInt)d;
}
}
*Pi_ptr = Pi;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSComputePixyz
*
* Construct the components Pix, Piy, Piz of the interpolation matrix Pi,
* which maps the space of vector linear finite elements to the space of
* edge finite elements.
*
* The construction is based on the fact that each component has the same
* sparsity structure as G, and the entries can be computed from the vectors
* Gx, Gy, Gz.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSComputePixyz(hypre_ParCSRMatrix *A,
hypre_ParCSRMatrix *G,
hypre_ParVector *Gx,
hypre_ParVector *Gy,
hypre_ParVector *Gz,
HYPRE_Int dim,
hypre_ParCSRMatrix **Pix_ptr,
hypre_ParCSRMatrix **Piy_ptr,
hypre_ParCSRMatrix **Piz_ptr)
{
hypre_ParCSRMatrix *Pix, *Piy, *Piz;
/* Compute Pix, Piy, Piz */
{
HYPRE_Int i, j;
HYPRE_Real *Gx_data, *Gy_data, *Gz_data;
MPI_Comm comm = hypre_ParCSRMatrixComm(G);
HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(G);
HYPRE_BigInt global_num_cols = hypre_ParCSRMatrixGlobalNumCols(G);
HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(G);
HYPRE_BigInt *col_starts = hypre_ParCSRMatrixColStarts(G);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(G));
HYPRE_Int num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(G));
HYPRE_Int num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(G));
Pix = hypre_ParCSRMatrixCreate(comm,
global_num_rows,
global_num_cols,
row_starts,
col_starts,
num_cols_offd,
num_nonzeros_diag,
num_nonzeros_offd);
hypre_ParCSRMatrixOwnsData(Pix) = 1;
hypre_ParCSRMatrixOwnsRowStarts(Pix) = 0;
hypre_ParCSRMatrixOwnsColStarts(Pix) = 0;
hypre_ParCSRMatrixInitialize(Pix);
Piy = hypre_ParCSRMatrixCreate(comm,
global_num_rows,
global_num_cols,
row_starts,
col_starts,
num_cols_offd,
num_nonzeros_diag,
num_nonzeros_offd);
hypre_ParCSRMatrixOwnsData(Piy) = 1;
hypre_ParCSRMatrixOwnsRowStarts(Piy) = 0;
hypre_ParCSRMatrixOwnsColStarts(Piy) = 0;
hypre_ParCSRMatrixInitialize(Piy);
if (dim == 3)
{
Piz = hypre_ParCSRMatrixCreate(comm,
global_num_rows,
global_num_cols,
row_starts,
col_starts,
num_cols_offd,
num_nonzeros_diag,
num_nonzeros_offd);
hypre_ParCSRMatrixOwnsData(Piz) = 1;
hypre_ParCSRMatrixOwnsRowStarts(Piz) = 0;
hypre_ParCSRMatrixOwnsColStarts(Piz) = 0;
hypre_ParCSRMatrixInitialize(Piz);
}
Gx_data = hypre_VectorData(hypre_ParVectorLocalVector(Gx));
Gy_data = hypre_VectorData(hypre_ParVectorLocalVector(Gy));
if (dim == 3)
Gz_data = hypre_VectorData(hypre_ParVectorLocalVector(Gz));
/* Fill-in the diagonal part */
if (dim == 3)
{
hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G);
HYPRE_Int *G_diag_I = hypre_CSRMatrixI(G_diag);
HYPRE_Int *G_diag_J = hypre_CSRMatrixJ(G_diag);
HYPRE_Real *G_diag_data = hypre_CSRMatrixData(G_diag);
HYPRE_Int G_diag_nrows = hypre_CSRMatrixNumRows(G_diag);
HYPRE_Int G_diag_nnz = hypre_CSRMatrixNumNonzeros(G_diag);
hypre_CSRMatrix *Pix_diag = hypre_ParCSRMatrixDiag(Pix);
HYPRE_Int *Pix_diag_I = hypre_CSRMatrixI(Pix_diag);
HYPRE_Int *Pix_diag_J = hypre_CSRMatrixJ(Pix_diag);
HYPRE_Real *Pix_diag_data = hypre_CSRMatrixData(Pix_diag);
hypre_CSRMatrix *Piy_diag = hypre_ParCSRMatrixDiag(Piy);
HYPRE_Int *Piy_diag_I = hypre_CSRMatrixI(Piy_diag);
HYPRE_Int *Piy_diag_J = hypre_CSRMatrixJ(Piy_diag);
HYPRE_Real *Piy_diag_data = hypre_CSRMatrixData(Piy_diag);
hypre_CSRMatrix *Piz_diag = hypre_ParCSRMatrixDiag(Piz);
HYPRE_Int *Piz_diag_I = hypre_CSRMatrixI(Piz_diag);
HYPRE_Int *Piz_diag_J = hypre_CSRMatrixJ(Piz_diag);
HYPRE_Real *Piz_diag_data = hypre_CSRMatrixData(Piz_diag);
for (i = 0; i < G_diag_nrows+1; i++)
{
Pix_diag_I[i] = G_diag_I[i];
Piy_diag_I[i] = G_diag_I[i];
Piz_diag_I[i] = G_diag_I[i];
}
for (i = 0; i < G_diag_nnz; i++)
{
Pix_diag_J[i] = G_diag_J[i];
Piy_diag_J[i] = G_diag_J[i];
Piz_diag_J[i] = G_diag_J[i];
}
for (i = 0; i < G_diag_nrows; i++)
for (j = G_diag_I[i]; j < G_diag_I[i+1]; j++)
{
*Pix_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gx_data[i];
*Piy_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gy_data[i];
*Piz_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gz_data[i];
}
}
else
{
hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G);
HYPRE_Int *G_diag_I = hypre_CSRMatrixI(G_diag);
HYPRE_Int *G_diag_J = hypre_CSRMatrixJ(G_diag);
HYPRE_Real *G_diag_data = hypre_CSRMatrixData(G_diag);
HYPRE_Int G_diag_nrows = hypre_CSRMatrixNumRows(G_diag);
HYPRE_Int G_diag_nnz = hypre_CSRMatrixNumNonzeros(G_diag);
hypre_CSRMatrix *Pix_diag = hypre_ParCSRMatrixDiag(Pix);
HYPRE_Int *Pix_diag_I = hypre_CSRMatrixI(Pix_diag);
HYPRE_Int *Pix_diag_J = hypre_CSRMatrixJ(Pix_diag);
HYPRE_Real *Pix_diag_data = hypre_CSRMatrixData(Pix_diag);
hypre_CSRMatrix *Piy_diag = hypre_ParCSRMatrixDiag(Piy);
HYPRE_Int *Piy_diag_I = hypre_CSRMatrixI(Piy_diag);
HYPRE_Int *Piy_diag_J = hypre_CSRMatrixJ(Piy_diag);
HYPRE_Real *Piy_diag_data = hypre_CSRMatrixData(Piy_diag);
for (i = 0; i < G_diag_nrows+1; i++)
{
Pix_diag_I[i] = G_diag_I[i];
Piy_diag_I[i] = G_diag_I[i];
}
for (i = 0; i < G_diag_nnz; i++)
{
Pix_diag_J[i] = G_diag_J[i];
Piy_diag_J[i] = G_diag_J[i];
}
for (i = 0; i < G_diag_nrows; i++)
for (j = G_diag_I[i]; j < G_diag_I[i+1]; j++)
{
*Pix_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gx_data[i];
*Piy_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gy_data[i];
}
}
/* Fill-in the off-diagonal part */
if (dim == 3)
{
hypre_CSRMatrix *G_offd = hypre_ParCSRMatrixOffd(G);
HYPRE_Int *G_offd_I = hypre_CSRMatrixI(G_offd);
HYPRE_Int *G_offd_J = hypre_CSRMatrixJ(G_offd);
HYPRE_Real *G_offd_data = hypre_CSRMatrixData(G_offd);
HYPRE_Int G_offd_nrows = hypre_CSRMatrixNumRows(G_offd);
HYPRE_Int G_offd_ncols = hypre_CSRMatrixNumCols(G_offd);
HYPRE_Int G_offd_nnz = hypre_CSRMatrixNumNonzeros(G_offd);
hypre_CSRMatrix *Pix_offd = hypre_ParCSRMatrixOffd(Pix);
HYPRE_Int *Pix_offd_I = hypre_CSRMatrixI(Pix_offd);
HYPRE_Int *Pix_offd_J = hypre_CSRMatrixJ(Pix_offd);
HYPRE_Real *Pix_offd_data = hypre_CSRMatrixData(Pix_offd);
hypre_CSRMatrix *Piy_offd = hypre_ParCSRMatrixOffd(Piy);
HYPRE_Int *Piy_offd_I = hypre_CSRMatrixI(Piy_offd);
HYPRE_Int *Piy_offd_J = hypre_CSRMatrixJ(Piy_offd);
HYPRE_Real *Piy_offd_data = hypre_CSRMatrixData(Piy_offd);
hypre_CSRMatrix *Piz_offd = hypre_ParCSRMatrixOffd(Piz);
HYPRE_Int *Piz_offd_I = hypre_CSRMatrixI(Piz_offd);
HYPRE_Int *Piz_offd_J = hypre_CSRMatrixJ(Piz_offd);
HYPRE_Real *Piz_offd_data = hypre_CSRMatrixData(Piz_offd);
HYPRE_BigInt *G_cmap = hypre_ParCSRMatrixColMapOffd(G);
HYPRE_BigInt *Pix_cmap = hypre_ParCSRMatrixColMapOffd(Pix);
HYPRE_BigInt *Piy_cmap = hypre_ParCSRMatrixColMapOffd(Piy);
HYPRE_BigInt *Piz_cmap = hypre_ParCSRMatrixColMapOffd(Piz);
if (G_offd_ncols)
for (i = 0; i < G_offd_nrows+1; i++)
{
Pix_offd_I[i] = G_offd_I[i];
Piy_offd_I[i] = G_offd_I[i];
Piz_offd_I[i] = G_offd_I[i];
}
for (i = 0; i < G_offd_nnz; i++)
{
Pix_offd_J[i] = G_offd_J[i];
Piy_offd_J[i] = G_offd_J[i];
Piz_offd_J[i] = G_offd_J[i];
}
for (i = 0; i < G_offd_nrows; i++)
for (j = G_offd_I[i]; j < G_offd_I[i+1]; j++)
{
*Pix_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gx_data[i];
*Piy_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gy_data[i];
*Piz_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gz_data[i];
}
for (i = 0; i < G_offd_ncols; i++)
{
Pix_cmap[i] = G_cmap[i];
Piy_cmap[i] = G_cmap[i];
Piz_cmap[i] = G_cmap[i];
}
}
else
{
hypre_CSRMatrix *G_offd = hypre_ParCSRMatrixOffd(G);
HYPRE_Int *G_offd_I = hypre_CSRMatrixI(G_offd);
HYPRE_Int *G_offd_J = hypre_CSRMatrixJ(G_offd);
HYPRE_Real *G_offd_data = hypre_CSRMatrixData(G_offd);
HYPRE_Int G_offd_nrows = hypre_CSRMatrixNumRows(G_offd);
HYPRE_Int G_offd_ncols = hypre_CSRMatrixNumCols(G_offd);
HYPRE_Int G_offd_nnz = hypre_CSRMatrixNumNonzeros(G_offd);
hypre_CSRMatrix *Pix_offd = hypre_ParCSRMatrixOffd(Pix);
HYPRE_Int *Pix_offd_I = hypre_CSRMatrixI(Pix_offd);
HYPRE_Int *Pix_offd_J = hypre_CSRMatrixJ(Pix_offd);
HYPRE_Real *Pix_offd_data = hypre_CSRMatrixData(Pix_offd);
hypre_CSRMatrix *Piy_offd = hypre_ParCSRMatrixOffd(Piy);
HYPRE_Int *Piy_offd_I = hypre_CSRMatrixI(Piy_offd);
HYPRE_Int *Piy_offd_J = hypre_CSRMatrixJ(Piy_offd);
HYPRE_Real *Piy_offd_data = hypre_CSRMatrixData(Piy_offd);
HYPRE_BigInt *G_cmap = hypre_ParCSRMatrixColMapOffd(G);
HYPRE_BigInt *Pix_cmap = hypre_ParCSRMatrixColMapOffd(Pix);
HYPRE_BigInt *Piy_cmap = hypre_ParCSRMatrixColMapOffd(Piy);
if (G_offd_ncols)
for (i = 0; i < G_offd_nrows+1; i++)
{
Pix_offd_I[i] = G_offd_I[i];
Piy_offd_I[i] = G_offd_I[i];
}
for (i = 0; i < G_offd_nnz; i++)
{
Pix_offd_J[i] = G_offd_J[i];
Piy_offd_J[i] = G_offd_J[i];
}
for (i = 0; i < G_offd_nrows; i++)
for (j = G_offd_I[i]; j < G_offd_I[i+1]; j++)
{
*Pix_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gx_data[i];
*Piy_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gy_data[i];
}
for (i = 0; i < G_offd_ncols; i++)
{
Pix_cmap[i] = G_cmap[i];
Piy_cmap[i] = G_cmap[i];
}
}
}
*Pix_ptr = Pix;
*Piy_ptr = Piy;
if (dim == 3)
*Piz_ptr = Piz;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSComputeGPi
*
* Construct the matrix [G,Pi] which can be considered an interpolation
* matrix from S_h^4 (4 copies of the scalar linear finite element space)
* to the edge finite elements space.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSComputeGPi(hypre_ParCSRMatrix *A,
hypre_ParCSRMatrix *G,
hypre_ParVector *Gx,
hypre_ParVector *Gy,
hypre_ParVector *Gz,
HYPRE_Int dim,
hypre_ParCSRMatrix **GPi_ptr)
{
hypre_ParCSRMatrix *GPi;
/* Take into account G */
dim++;
/* Compute GPi = [Pi_x, Pi_y, Pi_z, G] */
{
HYPRE_Int i, j, d;
HYPRE_Real *Gx_data, *Gy_data, *Gz_data;
MPI_Comm comm = hypre_ParCSRMatrixComm(G);
HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(G);
HYPRE_BigInt global_num_cols = dim*hypre_ParCSRMatrixGlobalNumCols(G);
HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(G);
HYPRE_BigInt *col_starts;
HYPRE_Int col_starts_size;
HYPRE_Int num_cols_offd = dim*hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(G));
HYPRE_Int num_nonzeros_diag = dim*hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(G));
HYPRE_Int num_nonzeros_offd = dim*hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(G));
HYPRE_BigInt *col_starts_G = hypre_ParCSRMatrixColStarts(G);
#ifdef HYPRE_NO_GLOBAL_PARTITION
col_starts_size = 2;
#else
HYPRE_Int num_procs;
hypre_MPI_Comm_size(comm, &num_procs);
col_starts_size = num_procs+1;
#endif
col_starts = hypre_TAlloc(HYPRE_BigInt, col_starts_size, HYPRE_MEMORY_HOST);
for (i = 0; i < col_starts_size; i++)
col_starts[i] = (HYPRE_BigInt) dim * col_starts_G[i];
GPi = hypre_ParCSRMatrixCreate(comm,
global_num_rows,
global_num_cols,
row_starts,
col_starts,
num_cols_offd,
num_nonzeros_diag,
num_nonzeros_offd);
hypre_ParCSRMatrixOwnsData(GPi) = 1;
hypre_ParCSRMatrixOwnsRowStarts(GPi) = 0;
hypre_ParCSRMatrixOwnsColStarts(GPi) = 1;
hypre_ParCSRMatrixInitialize(GPi);
Gx_data = hypre_VectorData(hypre_ParVectorLocalVector(Gx));
Gy_data = hypre_VectorData(hypre_ParVectorLocalVector(Gy));
if (dim == 4)
Gz_data = hypre_VectorData(hypre_ParVectorLocalVector(Gz));
/* Fill-in the diagonal part */
{
hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G);
HYPRE_Int *G_diag_I = hypre_CSRMatrixI(G_diag);
HYPRE_Int *G_diag_J = hypre_CSRMatrixJ(G_diag);
HYPRE_Real *G_diag_data = hypre_CSRMatrixData(G_diag);
HYPRE_Int G_diag_nrows = hypre_CSRMatrixNumRows(G_diag);
HYPRE_Int G_diag_nnz = hypre_CSRMatrixNumNonzeros(G_diag);
hypre_CSRMatrix *GPi_diag = hypre_ParCSRMatrixDiag(GPi);
HYPRE_Int *GPi_diag_I = hypre_CSRMatrixI(GPi_diag);
HYPRE_Int *GPi_diag_J = hypre_CSRMatrixJ(GPi_diag);
HYPRE_Real *GPi_diag_data = hypre_CSRMatrixData(GPi_diag);
for (i = 0; i < G_diag_nrows+1; i++)
GPi_diag_I[i] = dim * G_diag_I[i];
for (i = 0; i < G_diag_nnz; i++)
for (d = 0; d < dim; d++)
GPi_diag_J[dim*i+d] = dim*G_diag_J[i]+d;
for (i = 0; i < G_diag_nrows; i++)
for (j = G_diag_I[i]; j < G_diag_I[i+1]; j++)
{
*GPi_diag_data++ = G_diag_data[j];
*GPi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gx_data[i];
*GPi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gy_data[i];
if (dim == 4)
*GPi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gz_data[i];
}
}
/* Fill-in the off-diagonal part */
{
hypre_CSRMatrix *G_offd = hypre_ParCSRMatrixOffd(G);
HYPRE_Int *G_offd_I = hypre_CSRMatrixI(G_offd);
HYPRE_Int *G_offd_J = hypre_CSRMatrixJ(G_offd);
HYPRE_Real *G_offd_data = hypre_CSRMatrixData(G_offd);
HYPRE_Int G_offd_nrows = hypre_CSRMatrixNumRows(G_offd);
HYPRE_Int G_offd_ncols = hypre_CSRMatrixNumCols(G_offd);
HYPRE_Int G_offd_nnz = hypre_CSRMatrixNumNonzeros(G_offd);
hypre_CSRMatrix *GPi_offd = hypre_ParCSRMatrixOffd(GPi);
HYPRE_Int *GPi_offd_I = hypre_CSRMatrixI(GPi_offd);
HYPRE_Int *GPi_offd_J = hypre_CSRMatrixJ(GPi_offd);
HYPRE_Real *GPi_offd_data = hypre_CSRMatrixData(GPi_offd);
HYPRE_BigInt *G_cmap = hypre_ParCSRMatrixColMapOffd(G);
HYPRE_BigInt *GPi_cmap = hypre_ParCSRMatrixColMapOffd(GPi);
if (G_offd_ncols)
for (i = 0; i < G_offd_nrows+1; i++)
GPi_offd_I[i] = dim * G_offd_I[i];
for (i = 0; i < G_offd_nnz; i++)
for (d = 0; d < dim; d++)
GPi_offd_J[dim*i+d] = dim*G_offd_J[i]+d;
for (i = 0; i < G_offd_nrows; i++)
for (j = G_offd_I[i]; j < G_offd_I[i+1]; j++)
{
*GPi_offd_data++ = G_offd_data[j];
*GPi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gx_data[i];
*GPi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gy_data[i];
if (dim == 4)
*GPi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gz_data[i];
}
for (i = 0; i < G_offd_ncols; i++)
for (d = 0; d < dim; d++)
GPi_cmap[dim*i+d] = dim*G_cmap[i]+d;
}
}
*GPi_ptr = GPi;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetup
*
* Construct the AMS solver components.
*
* The following functions need to be called before hypre_AMSSetup():
* - hypre_AMSSetDimension() (if solving a 2D problem)
* - hypre_AMSSetDiscreteGradient()
* - hypre_AMSSetCoordinateVectors() or hypre_AMSSetEdgeConstantVectors
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetup(void *solver,
hypre_ParCSRMatrix *A,
hypre_ParVector *b,
hypre_ParVector *x)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
HYPRE_Int input_info = 0;
ams_data -> A = A;
/* Modifications for problems with zero-conductivity regions */
if (ams_data -> interior_nodes)
{
hypre_ParCSRMatrix *G0t, *Aorig = A;
/* Make sure that multiple Setup()+Solve() give identical results */
ams_data -> solve_counter = 0;
/* Construct the discrete gradient matrix for the zero-conductivity region
by eliminating the zero-conductivity nodes from G^t. The range of G0
represents the kernel of A, i.e. the gradients of nodal basis functions
supported in zero-conductivity regions. */
hypre_ParCSRMatrixTranspose(ams_data -> G, &G0t, 1);
{
HYPRE_Int i, j;
HYPRE_Int nv = hypre_ParCSRMatrixNumCols(ams_data -> G);
hypre_CSRMatrix *G0td = hypre_ParCSRMatrixDiag(G0t);
HYPRE_Int *G0tdI = hypre_CSRMatrixI(G0td);
HYPRE_Real *G0tdA = hypre_CSRMatrixData(G0td);
hypre_CSRMatrix *G0to = hypre_ParCSRMatrixOffd(G0t);
HYPRE_Int *G0toI = hypre_CSRMatrixI(G0to);
HYPRE_Real *G0toA = hypre_CSRMatrixData(G0to);
HYPRE_Real *interior_nodes_data=hypre_VectorData(
hypre_ParVectorLocalVector((hypre_ParVector*) ams_data -> interior_nodes));
for (i = 0; i < nv; i++)
{
if (interior_nodes_data[i] != 1)
{
for (j = G0tdI[i]; j < G0tdI[i+1]; j++)
G0tdA[j] = 0.0;
if (G0toI)
for (j = G0toI[i]; j < G0toI[i+1]; j++)
G0toA[j] = 0.0;
}
}
}
hypre_ParCSRMatrixTranspose(G0t, & ams_data -> G0, 1);
/* Construct the subspace matrix A_G0 = G0^T G0 */
ams_data -> A_G0 = hypre_ParMatmul(G0t, ams_data -> G0);
hypre_ParCSRMatrixFixZeroRows(ams_data -> A_G0);
/* Create AMG solver for A_G0 */
HYPRE_BoomerAMGCreate(&ams_data -> B_G0);
HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_G0, ams_data -> B_G_coarsen_type);
HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_G0, ams_data -> B_G_agg_levels);
HYPRE_BoomerAMGSetRelaxType(ams_data -> B_G0, ams_data -> B_G_relax_type);
HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_G0, 1);
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_G0, 25);
HYPRE_BoomerAMGSetTol(ams_data -> B_G0, 0.0);
HYPRE_BoomerAMGSetMaxIter(ams_data -> B_G0, 3); /* use just a few V-cycles */
HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_G0, ams_data -> B_G_theta);
HYPRE_BoomerAMGSetInterpType(ams_data -> B_G0, ams_data -> B_G_interp_type);
HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_G0, ams_data -> B_G_Pmax);
HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_G0, 2); /* don't coarsen to 0 */
/* Generally, don't use exact solve on the coarsest level (matrix may be singular) */
HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_G0, ams_data -> B_G_coarse_relax_type, 3);
HYPRE_BoomerAMGSetup(ams_data -> B_G0,
(HYPRE_ParCSRMatrix)ams_data -> A_G0,
0, 0);
/* Construct the preconditioner for ams_data->A = A + G0 G0^T.
NOTE: this can be optimized significantly by taking into account that
the sparsity pattern of A is subset of the sparsity pattern of G0 G0^T */
{
hypre_ParCSRMatrix *A = hypre_ParMatmul(ams_data -> G0, G0t);
hypre_ParCSRMatrix *B = Aorig;
hypre_ParCSRMatrix **C_ptr = &ams_data -> A;
hypre_ParCSRMatrix *C;
HYPRE_Real factor, lfactor;
/* scale (penalize) G0 G0^T before adding it to the matrix */
{
HYPRE_Int i;
HYPRE_Int B_num_rows = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(B));
HYPRE_Real *B_diag_data = hypre_CSRMatrixData(hypre_ParCSRMatrixDiag(B));
HYPRE_Real *B_offd_data = hypre_CSRMatrixData(hypre_ParCSRMatrixOffd(B));
HYPRE_Int *B_diag_i = hypre_CSRMatrixI(hypre_ParCSRMatrixDiag(B));
HYPRE_Int *B_offd_i = hypre_CSRMatrixI(hypre_ParCSRMatrixOffd(B));
lfactor = -1;
for (i = 0; i < B_diag_i[B_num_rows]; i++)
if (fabs(B_diag_data[i]) > lfactor)
lfactor = fabs(B_diag_data[i]);
for (i = 0; i < B_offd_i[B_num_rows]; i++)
if (fabs(B_offd_data[i]) > lfactor)
lfactor = fabs(B_offd_data[i]);
lfactor *= 1e-10; /* scaling factor: max|A_ij|*1e-10 */
hypre_MPI_Allreduce(&lfactor, &factor, 1, HYPRE_MPI_REAL, hypre_MPI_MAX,
hypre_ParCSRMatrixComm(A));
}
hypre_ParcsrAdd(factor, A, 1.0, B, &C);
/*hypre_CSRMatrix *A_local, *B_local, *C_local, *C_tmp;
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A);
HYPRE_BigInt global_num_cols = hypre_ParCSRMatrixGlobalNumCols(A);
HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A);
HYPRE_BigInt *col_starts = hypre_ParCSRMatrixColStarts(A);
HYPRE_Int A_num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(A));
HYPRE_Int A_num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(A));
HYPRE_Int A_num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(A));
HYPRE_Int B_num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(B));
HYPRE_Int B_num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(B));
HYPRE_Int B_num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(B));
A_local = hypre_MergeDiagAndOffd(A);
B_local = hypre_MergeDiagAndOffd(B);*/
/* scale (penalize) G0 G0^T before adding it to the matrix */
/*{
HYPRE_Int i, nnz = hypre_CSRMatrixNumNonzeros(A_local);
HYPRE_Real *data = hypre_CSRMatrixData(A_local);
HYPRE_Real *dataB = hypre_CSRMatrixData(B_local);
HYPRE_Int nnzB = hypre_CSRMatrixNumNonzeros(B_local);
HYPRE_Real factor, lfactor;
lfactor = -1;
for (i = 0; i < nnzB; i++)
if (fabs(dataB[i]) > lfactor)
lfactor = fabs(dataB[i]);
lfactor *= 1e-10;
hypre_MPI_Allreduce(&lfactor, &factor, 1, HYPRE_MPI_REAL, hypre_MPI_MAX,
hypre_ParCSRMatrixComm(A));
for (i = 0; i < nnz; i++)
data[i] *= factor;
}
C_tmp = hypre_CSRMatrixBigAdd(A_local, B_local);
C_local = hypre_CSRMatrixBigDeleteZeros(C_tmp,0.0);
if (C_local)
hypre_CSRMatrixDestroy(C_tmp);
else
C_local = C_tmp;
C = hypre_ParCSRMatrixCreate (comm,
global_num_rows,
global_num_cols,
row_starts,
col_starts,
A_num_cols_offd + B_num_cols_offd,
A_num_nonzeros_diag + B_num_nonzeros_diag,
A_num_nonzeros_offd + B_num_nonzeros_offd);
GenerateDiagAndOffd(C_local, C,
hypre_ParCSRMatrixFirstColDiag(A),
hypre_ParCSRMatrixLastColDiag(A));
hypre_ParCSRMatrixOwnsRowStarts(C) = 0;
hypre_ParCSRMatrixOwnsColStarts(C) = 1;
hypre_ParCSRMatrixOwnsColStarts(G0t) = 0;
hypre_CSRMatrixDestroy(A_local);
hypre_CSRMatrixDestroy(B_local);
hypre_CSRMatrixDestroy(C_local);
*/
hypre_ParCSRMatrixDestroy(A);
*C_ptr = C;
}
hypre_ParCSRMatrixDestroy(G0t);
}
/* Make sure that the first entry in each row is the diagonal one. */
/* hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(ams_data -> A)); */
/* Compute the l1 norm of the rows of A */
if (ams_data -> A_relax_type >= 1 && ams_data -> A_relax_type <= 4)
hypre_ParCSRComputeL1Norms(ams_data -> A, ams_data -> A_relax_type,
NULL, &ams_data -> A_l1_norms);
/* Chebyshev? */
if (ams_data -> A_relax_type == 16)
{
hypre_ParCSRMaxEigEstimateCG(ams_data->A, 1, 10,
&ams_data->A_max_eig_est,
&ams_data->A_min_eig_est);
}
/* If not given, compute Gx, Gy and Gz */
{
if (ams_data -> x != NULL && ams_data -> y != NULL &&
(ams_data -> dim == 2 || ams_data -> z != NULL))
input_info = 1;
if (ams_data -> Gx != NULL && ams_data -> Gy != NULL &&
(ams_data -> dim == 2 || ams_data -> Gz != NULL))
input_info = 2;
if (input_info == 1)
{
ams_data -> Gx = hypre_ParVectorInRangeOf(ams_data -> G);
hypre_ParCSRMatrixMatvec (1.0, ams_data -> G, ams_data -> x, 0.0, ams_data -> Gx);
ams_data -> Gy = hypre_ParVectorInRangeOf(ams_data -> G);
hypre_ParCSRMatrixMatvec (1.0, ams_data -> G, ams_data -> y, 0.0, ams_data -> Gy);
if (ams_data -> dim == 3)
{
ams_data -> Gz = hypre_ParVectorInRangeOf(ams_data -> G);
hypre_ParCSRMatrixMatvec (1.0, ams_data -> G, ams_data -> z, 0.0, ams_data -> Gz);
}
}
}
if (ams_data -> Pi == NULL && ams_data -> Pix == NULL)
{
if (ams_data -> cycle_type == 20)
/* Construct the combined interpolation matrix [G,Pi] */
hypre_AMSComputeGPi(ams_data -> A,
ams_data -> G,
ams_data -> Gx,
ams_data -> Gy,
ams_data -> Gz,
ams_data -> dim,
&ams_data -> Pi);
else if (ams_data -> cycle_type > 10)
/* Construct Pi{x,y,z} instead of Pi = [Pix,Piy,Piz] */
hypre_AMSComputePixyz(ams_data -> A,
ams_data -> G,
ams_data -> Gx,
ams_data -> Gy,
ams_data -> Gz,
ams_data -> dim,
&ams_data -> Pix,
&ams_data -> Piy,
&ams_data -> Piz);
else
/* Construct the Pi interpolation matrix */
hypre_AMSComputePi(ams_data -> A,
ams_data -> G,
ams_data -> Gx,
ams_data -> Gy,
ams_data -> Gz,
ams_data -> dim,
&ams_data -> Pi);
}
/* Keep Gx, Gy and Gz only if use the method with discrete divergence
stabilization (where we use them to compute the local mesh size). */
if (input_info == 1 && ams_data -> cycle_type != 9)
{
hypre_ParVectorDestroy(ams_data -> Gx);
hypre_ParVectorDestroy(ams_data -> Gy);
if (ams_data -> dim == 3)
hypre_ParVectorDestroy(ams_data -> Gz);
}
/* Create the AMG solver on the range of G^T */
if (!ams_data -> beta_is_zero && ams_data -> cycle_type != 20)
{
HYPRE_BoomerAMGCreate(&ams_data -> B_G);
HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_G, ams_data -> B_G_coarsen_type);
HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_G, ams_data -> B_G_agg_levels);
HYPRE_BoomerAMGSetRelaxType(ams_data -> B_G, ams_data -> B_G_relax_type);
HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_G, 1);
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_G, 25);
HYPRE_BoomerAMGSetTol(ams_data -> B_G, 0.0);
HYPRE_BoomerAMGSetMaxIter(ams_data -> B_G, 1);
HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_G, ams_data -> B_G_theta);
HYPRE_BoomerAMGSetInterpType(ams_data -> B_G, ams_data -> B_G_interp_type);
HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_G, ams_data -> B_G_Pmax);
HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_G, 2); /* don't coarsen to 0 */
/* Generally, don't use exact solve on the coarsest level (matrix may be singular) */
HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_G, ams_data -> B_G_coarse_relax_type, 3);
if (ams_data -> cycle_type == 0)
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_G, 2);
/* If not given, construct the coarse space matrix by RAP */
if (!ams_data -> A_G)
{
HYPRE_Int G_owned_col_starts;
if (!hypre_ParCSRMatrixCommPkg(ams_data -> G))
hypre_MatvecCommPkgCreate(ams_data -> G);
if (!hypre_ParCSRMatrixCommPkg(ams_data -> A))
hypre_MatvecCommPkgCreate(ams_data -> A);
G_owned_col_starts = hypre_ParCSRMatrixOwnsColStarts(ams_data -> G);
hypre_BoomerAMGBuildCoarseOperator(ams_data -> G,
ams_data -> A,
ams_data -> G,
&ams_data -> A_G);
/* Make sure that A_G has no zero rows (this can happen
if beta is zero in part of the domain). */
hypre_ParCSRMatrixFixZeroRows(ams_data -> A_G);
hypre_ParCSRMatrixOwnsColStarts(ams_data -> G) = G_owned_col_starts;
hypre_ParCSRMatrixOwnsRowStarts(ams_data -> A_G) = 0;
ams_data -> owns_A_G = 1;
}
HYPRE_BoomerAMGSetup(ams_data -> B_G,
(HYPRE_ParCSRMatrix)ams_data -> A_G,
0, 0);
}
if (ams_data -> cycle_type > 10 && ams_data -> cycle_type != 20)
/* Create the AMG solvers on the range of Pi{x,y,z}^T */
{
HYPRE_Int P_owned_col_starts;
HYPRE_BoomerAMGCreate(&ams_data -> B_Pix);
HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_Pix, ams_data -> B_Pi_coarsen_type);
HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_Pix, ams_data -> B_Pi_agg_levels);
HYPRE_BoomerAMGSetRelaxType(ams_data -> B_Pix, ams_data -> B_Pi_relax_type);
HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_Pix, 1);
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Pix, 25);
HYPRE_BoomerAMGSetTol(ams_data -> B_Pix, 0.0);
HYPRE_BoomerAMGSetMaxIter(ams_data -> B_Pix, 1);
HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_Pix, ams_data -> B_Pi_theta);
HYPRE_BoomerAMGSetInterpType(ams_data -> B_Pix, ams_data -> B_Pi_interp_type);
HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_Pix, ams_data -> B_Pi_Pmax);
HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_Pix, 2);
HYPRE_BoomerAMGCreate(&ams_data -> B_Piy);
HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_Piy, ams_data -> B_Pi_coarsen_type);
HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_Piy, ams_data -> B_Pi_agg_levels);
HYPRE_BoomerAMGSetRelaxType(ams_data -> B_Piy, ams_data -> B_Pi_relax_type);
HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_Piy, 1);
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Piy, 25);
HYPRE_BoomerAMGSetTol(ams_data -> B_Piy, 0.0);
HYPRE_BoomerAMGSetMaxIter(ams_data -> B_Piy, 1);
HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_Piy, ams_data -> B_Pi_theta);
HYPRE_BoomerAMGSetInterpType(ams_data -> B_Piy, ams_data -> B_Pi_interp_type);
HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_Piy, ams_data -> B_Pi_Pmax);
HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_Piy, 2);
HYPRE_BoomerAMGCreate(&ams_data -> B_Piz);
HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_Piz, ams_data -> B_Pi_coarsen_type);
HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_Piz, ams_data -> B_Pi_agg_levels);
HYPRE_BoomerAMGSetRelaxType(ams_data -> B_Piz, ams_data -> B_Pi_relax_type);
HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_Piz, 1);
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Piz, 25);
HYPRE_BoomerAMGSetTol(ams_data -> B_Piz, 0.0);
HYPRE_BoomerAMGSetMaxIter(ams_data -> B_Piz, 1);
HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_Piz, ams_data -> B_Pi_theta);
HYPRE_BoomerAMGSetInterpType(ams_data -> B_Piz, ams_data -> B_Pi_interp_type);
HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_Piz, ams_data -> B_Pi_Pmax);
HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_Piz, 2);
/* Generally, don't use exact solve on the coarsest level (matrices may be singular) */
HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_Pix, ams_data -> B_Pi_coarse_relax_type, 3);
HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_Piy, ams_data -> B_Pi_coarse_relax_type, 3);
HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_Piz, ams_data -> B_Pi_coarse_relax_type, 3);
if (ams_data -> cycle_type == 0)
{
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Pix, 2);
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Piy, 2);
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Piz, 2);
}
/* Construct the coarse space matrices by RAP */
if (!hypre_ParCSRMatrixCommPkg(ams_data -> Pix))
hypre_MatvecCommPkgCreate(ams_data -> Pix);
P_owned_col_starts = hypre_ParCSRMatrixOwnsColStarts(ams_data -> Pix);
hypre_BoomerAMGBuildCoarseOperator(ams_data -> Pix,
ams_data -> A,
ams_data -> Pix,
&ams_data -> A_Pix);
if (!P_owned_col_starts)
{
hypre_ParCSRMatrixOwnsRowStarts(ams_data -> A_Pix) = 0;
hypre_ParCSRMatrixOwnsColStarts(ams_data -> A_Pix) = 0;
}
/* Make sure that A_Pix has no zero rows (this can happen
for some kinds of boundary conditions with contact). */
hypre_ParCSRMatrixFixZeroRows(ams_data -> A_Pix);
HYPRE_BoomerAMGSetup(ams_data -> B_Pix,
(HYPRE_ParCSRMatrix)ams_data -> A_Pix,
0, 0);
if (!hypre_ParCSRMatrixCommPkg(ams_data -> Piy))
hypre_MatvecCommPkgCreate(ams_data -> Piy);
P_owned_col_starts = hypre_ParCSRMatrixOwnsColStarts(ams_data -> Piy);
hypre_BoomerAMGBuildCoarseOperator(ams_data -> Piy,
ams_data -> A,
ams_data -> Piy,
&ams_data -> A_Piy);
if (!P_owned_col_starts)
{
hypre_ParCSRMatrixOwnsRowStarts(ams_data -> A_Piy) = 0;
hypre_ParCSRMatrixOwnsColStarts(ams_data -> A_Piy) = 0;
}
/* Make sure that A_Piy has no zero rows (this can happen
for some kinds of boundary conditions with contact). */
hypre_ParCSRMatrixFixZeroRows(ams_data -> A_Piy);
HYPRE_BoomerAMGSetup(ams_data -> B_Piy,
(HYPRE_ParCSRMatrix)ams_data -> A_Piy,
0, 0);
if (ams_data -> Piz)
{
if (!hypre_ParCSRMatrixCommPkg(ams_data -> Piz))
hypre_MatvecCommPkgCreate(ams_data -> Piz);
P_owned_col_starts = hypre_ParCSRMatrixOwnsColStarts(ams_data -> Piz);
hypre_BoomerAMGBuildCoarseOperator(ams_data -> Piz,
ams_data -> A,
ams_data -> Piz,
&ams_data -> A_Piz);
if (!P_owned_col_starts)
{
hypre_ParCSRMatrixOwnsRowStarts(ams_data -> A_Piz) = 0;
hypre_ParCSRMatrixOwnsColStarts(ams_data -> A_Piz) = 0;
}
/* Make sure that A_Piz has no zero rows (this can happen
for some kinds of boundary conditions with contact). */
hypre_ParCSRMatrixFixZeroRows(ams_data -> A_Piz);
HYPRE_BoomerAMGSetup(ams_data -> B_Piz,
(HYPRE_ParCSRMatrix)ams_data -> A_Piz,
0, 0);
}
}
else
/* Create the AMG solver on the range of Pi^T */
{
HYPRE_BoomerAMGCreate(&ams_data -> B_Pi);
HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_Pi, ams_data -> B_Pi_coarsen_type);
HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_Pi, ams_data -> B_Pi_agg_levels);
HYPRE_BoomerAMGSetRelaxType(ams_data -> B_Pi, ams_data -> B_Pi_relax_type);
HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_Pi, 1);
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Pi, 25);
HYPRE_BoomerAMGSetTol(ams_data -> B_Pi, 0.0);
HYPRE_BoomerAMGSetMaxIter(ams_data -> B_Pi, 1);
HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_Pi, ams_data -> B_Pi_theta);
HYPRE_BoomerAMGSetInterpType(ams_data -> B_Pi, ams_data -> B_Pi_interp_type);
HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_Pi, ams_data -> B_Pi_Pmax);
HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_Pi, 2); /* don't coarsen to 0 */
/* Generally, don't use exact solve on the coarsest level (matrix may be singular) */
HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_Pi, ams_data -> B_Pi_coarse_relax_type, 3);
if (ams_data -> cycle_type == 0)
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Pi, 2);
/* If not given, construct the coarse space matrix by RAP and
notify BoomerAMG that this is a dim x dim block system. */
if (!ams_data -> A_Pi)
{
HYPRE_Int P_owned_col_starts = hypre_ParCSRMatrixOwnsColStarts(ams_data -> Pi);
if (!hypre_ParCSRMatrixCommPkg(ams_data -> Pi))
hypre_MatvecCommPkgCreate(ams_data -> Pi);
if (!hypre_ParCSRMatrixCommPkg(ams_data -> A))
hypre_MatvecCommPkgCreate(ams_data -> A);
if (ams_data -> cycle_type == 9)
{
/* Add a discrete divergence term to A before computing Pi^t A Pi */
{
hypre_ParCSRMatrix *Gt, *GGt, *ApGGt;
hypre_ParCSRMatrixTranspose(ams_data -> G, &Gt, 1);
hypre_ParCSRMatrixOwnsColStarts(Gt) = 0;
hypre_ParCSRMatrixOwnsRowStarts(Gt) = 0;
/* scale GGt by h^2 */
{
HYPRE_Real h2;
HYPRE_Int i, j, k, ne;
hypre_CSRMatrix *Gt_diag = hypre_ParCSRMatrixDiag(Gt);
HYPRE_Int Gt_num_rows = hypre_CSRMatrixNumRows(Gt_diag);
HYPRE_Int *Gt_diag_I = hypre_CSRMatrixI(Gt_diag);
HYPRE_Int *Gt_diag_J = hypre_CSRMatrixJ(Gt_diag);
HYPRE_Real *Gt_diag_data = hypre_CSRMatrixData(Gt_diag);
hypre_CSRMatrix *Gt_offd = hypre_ParCSRMatrixOffd(Gt);
HYPRE_Int *Gt_offd_I = hypre_CSRMatrixI(Gt_offd);
HYPRE_Real *Gt_offd_data = hypre_CSRMatrixData(Gt_offd);
HYPRE_Real *Gx_data = hypre_VectorData(hypre_ParVectorLocalVector(ams_data -> Gx));
HYPRE_Real *Gy_data = hypre_VectorData(hypre_ParVectorLocalVector(ams_data -> Gy));
HYPRE_Real *Gz_data = hypre_VectorData(hypre_ParVectorLocalVector(ams_data -> Gz));
for (i = 0; i < Gt_num_rows; i++)
{
/* determine the characteristic mesh size for vertex i */
h2 = 0.0;
ne = 0;
for (j = Gt_diag_I[i]; j < Gt_diag_I[i+1]; j++)
{
k = Gt_diag_J[j];
h2 += Gx_data[k]*Gx_data[k]+Gy_data[k]*Gy_data[k]+Gz_data[k]*Gz_data[k];
ne++;
}
if (ne != 0)
{
h2 /= ne;
for (j = Gt_diag_I[i]; j < Gt_diag_I[i+1]; j++)
Gt_diag_data[j] *= h2;
for (j = Gt_offd_I[i]; j < Gt_offd_I[i+1]; j++)
Gt_offd_data[j] *= h2;
}
}
}
/* we only needed Gx, Gy and Gz to compute the local mesh size */
if (input_info == 1)
{
hypre_ParVectorDestroy(ams_data -> Gx);
hypre_ParVectorDestroy(ams_data -> Gy);
if (ams_data -> dim == 3)
hypre_ParVectorDestroy(ams_data -> Gz);
}
GGt = hypre_ParMatmul(ams_data -> G, Gt);
hypre_ParCSRMatrixDestroy(Gt);
/* hypre_ParCSRMatrixAdd(GGt, A, &ams_data -> A); */
hypre_ParcsrAdd(1.0, GGt, 1.0, ams_data -> A, &ApGGt);
/*{
hypre_ParCSRMatrix *A = GGt;
hypre_ParCSRMatrix *B = ams_data -> A;
hypre_ParCSRMatrix **C_ptr = &ApGGt;
hypre_ParCSRMatrix *C;
hypre_CSRMatrix *A_local, *B_local, *C_local;
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A);
HYPRE_BigInt global_num_cols = hypre_ParCSRMatrixGlobalNumCols(A);
HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A);
HYPRE_BigInt *col_starts = hypre_ParCSRMatrixColStarts(A);
HYPRE_Int A_num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(A));
HYPRE_Int A_num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(A));
HYPRE_Int A_num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(A));
HYPRE_Int B_num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(B));
HYPRE_Int B_num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(B));
HYPRE_Int B_num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(B));
A_local = hypre_MergeDiagAndOffd(A);
B_local = hypre_MergeDiagAndOffd(B);
C_local = hypre_CSRMatrixBigAdd(A_local, B_local);
hypre_CSRMatrixBigJtoJ(C_local);
C = hypre_ParCSRMatrixCreate (comm,
global_num_rows,
global_num_cols,
row_starts,
col_starts,
A_num_cols_offd + B_num_cols_offd,
A_num_nonzeros_diag + B_num_nonzeros_diag,
A_num_nonzeros_offd + B_num_nonzeros_offd);
GenerateDiagAndOffd(C_local, C,
hypre_ParCSRMatrixFirstColDiag(A),
hypre_ParCSRMatrixLastColDiag(A));
hypre_ParCSRMatrixOwnsRowStarts(C) = 0;
hypre_ParCSRMatrixOwnsColStarts(C) = 0;
hypre_CSRMatrixDestroy(A_local);
hypre_CSRMatrixDestroy(B_local);
hypre_CSRMatrixDestroy(C_local);
*C_ptr = C;
}*/
hypre_ParCSRMatrixDestroy(GGt);
hypre_BoomerAMGBuildCoarseOperator(ams_data -> Pi,
ApGGt,
ams_data -> Pi,
&ams_data -> A_Pi);
}
}
else
{
hypre_BoomerAMGBuildCoarseOperator(ams_data -> Pi,
ams_data -> A,
ams_data -> Pi,
&ams_data -> A_Pi);
}
if (!P_owned_col_starts)
{
hypre_ParCSRMatrixOwnsRowStarts(ams_data -> A_Pi) = 0;
hypre_ParCSRMatrixOwnsColStarts(ams_data -> A_Pi) = 0;
}
ams_data -> owns_A_Pi = 1;
if (ams_data -> cycle_type != 20)
HYPRE_BoomerAMGSetNumFunctions(ams_data -> B_Pi, ams_data -> dim);
else
HYPRE_BoomerAMGSetNumFunctions(ams_data -> B_Pi, ams_data -> dim + 1);
/* HYPRE_BoomerAMGSetNodal(ams_data -> B_Pi, 1); */
}
/* Make sure that A_Pi has no zero rows (this can happen for
some kinds of boundary conditions with contact). */
hypre_ParCSRMatrixFixZeroRows(ams_data -> A_Pi);
HYPRE_BoomerAMGSetup(ams_data -> B_Pi,
(HYPRE_ParCSRMatrix)ams_data -> A_Pi,
0, 0);
}
/* Allocate temporary vectors */
ams_data -> r0 = hypre_ParVectorInRangeOf(ams_data -> A);
ams_data -> g0 = hypre_ParVectorInRangeOf(ams_data -> A);
if (ams_data -> A_G)
{
ams_data -> r1 = hypre_ParVectorInRangeOf(ams_data -> A_G);
ams_data -> g1 = hypre_ParVectorInRangeOf(ams_data -> A_G);
}
if (ams_data -> r1 == NULL && ams_data -> A_Pix)
{
ams_data -> r1 = hypre_ParVectorInRangeOf(ams_data -> A_Pix);
ams_data -> g1 = hypre_ParVectorInRangeOf(ams_data -> A_Pix);
}
if (ams_data -> Pi)
{
ams_data -> r2 = hypre_ParVectorInDomainOf(ams_data -> Pi);
ams_data -> g2 = hypre_ParVectorInDomainOf(ams_data -> Pi);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSolve
*
* Solve the system A x = b.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSolve(void *solver,
hypre_ParCSRMatrix *A,
hypre_ParVector *b,
hypre_ParVector *x)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
HYPRE_Int i, my_id = -1;
HYPRE_Real r0_norm, r_norm, b_norm, relative_resid = 0, old_resid;
char cycle[30];
hypre_ParCSRMatrix *Ai[5], *Pi[5];
HYPRE_Solver Bi[5];
HYPRE_PtrToSolverFcn HBi[5];
hypre_ParVector *ri[5], *gi[5];
hypre_ParVector *z = NULL;
Ai[0] = ams_data -> A_G; Pi[0] = ams_data -> G;
Ai[1] = ams_data -> A_Pi; Pi[1] = ams_data -> Pi;
Ai[2] = ams_data -> A_Pix; Pi[2] = ams_data -> Pix;
Ai[3] = ams_data -> A_Piy; Pi[3] = ams_data -> Piy;
Ai[4] = ams_data -> A_Piz; Pi[4] = ams_data -> Piz;
Bi[0] = ams_data -> B_G; HBi[0] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGSolve;
Bi[1] = ams_data -> B_Pi; HBi[1] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGBlockSolve;
Bi[2] = ams_data -> B_Pix; HBi[2] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGSolve;
Bi[3] = ams_data -> B_Piy; HBi[3] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGSolve;
Bi[4] = ams_data -> B_Piz; HBi[4] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGSolve;
ri[0] = ams_data -> r1; gi[0] = ams_data -> g1;
ri[1] = ams_data -> r2; gi[1] = ams_data -> g2;
ri[2] = ams_data -> r1; gi[2] = ams_data -> g1;
ri[3] = ams_data -> r1; gi[3] = ams_data -> g1;
ri[4] = ams_data -> r1; gi[4] = ams_data -> g1;
/* may need to create an additional temporary vector for relaxation */
if (hypre_NumThreads() > 1 || ams_data -> A_relax_type == 16)
{
z = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize(z);
hypre_ParVectorSetPartitioningOwner(z,0);
}
if (ams_data -> print_level > 0)
hypre_MPI_Comm_rank(hypre_ParCSRMatrixComm(A), &my_id);
/* Compatible subspace projection for problems with zero-conductivity regions.
Note that this modifies the input (r.h.s.) vector b! */
if ( (ams_data -> B_G0) &&
(++ams_data->solve_counter % ( ams_data -> projection_frequency ) == 0) )
{
/* hypre_printf("Projecting onto the compatible subspace...\n"); */
hypre_AMSProjectOutGradients(ams_data, b);
}
if (ams_data -> beta_is_zero)
{
switch (ams_data -> cycle_type)
{
case 0:
hypre_sprintf(cycle,"%s","0");
break;
case 1:
case 3:
case 5:
case 7:
default:
hypre_sprintf(cycle,"%s","020");
break;
case 2:
case 4:
case 6:
case 8:
hypre_sprintf(cycle,"%s","(0+2)");
break;
case 11:
case 13:
hypre_sprintf(cycle,"%s","0345430");
break;
case 12:
hypre_sprintf(cycle,"%s","(0+3+4+5)");
break;
case 14:
hypre_sprintf(cycle,"%s","0(+3+4+5)0");
break;
}
}
else
{
switch (ams_data -> cycle_type)
{
case 0:
hypre_sprintf(cycle,"%s","010");
break;
case 1:
default:
hypre_sprintf(cycle,"%s","01210");
break;
case 2:
hypre_sprintf(cycle,"%s","(0+1+2)");
break;
case 3:
hypre_sprintf(cycle,"%s","02120");
break;
case 4:
hypre_sprintf(cycle,"%s","(010+2)");
break;
case 5:
hypre_sprintf(cycle,"%s","0102010");
break;
case 6:
hypre_sprintf(cycle,"%s","(020+1)");
break;
case 7:
hypre_sprintf(cycle,"%s","0201020");
break;
case 8:
hypre_sprintf(cycle,"%s","0(+1+2)0");
break;
case 9:
hypre_sprintf(cycle,"%s","01210");
break;
case 11:
hypre_sprintf(cycle,"%s","013454310");
break;
case 12:
hypre_sprintf(cycle,"%s","(0+1+3+4+5)");
break;
case 13:
hypre_sprintf(cycle,"%s","034515430");
break;
case 14:
hypre_sprintf(cycle,"%s","01(+3+4+5)10");
break;
case 20:
hypre_sprintf(cycle,"%s","020");
break;
}
}
for (i = 0; i < ams_data -> maxit; i++)
{
/* Compute initial residual norms */
if (ams_data -> maxit > 1 && i == 0)
{
hypre_ParVectorCopy(b, ams_data -> r0);
hypre_ParCSRMatrixMatvec(-1.0, ams_data -> A, x, 1.0, ams_data -> r0);
r_norm = sqrt(hypre_ParVectorInnerProd(ams_data -> r0,ams_data -> r0));
r0_norm = r_norm;
b_norm = sqrt(hypre_ParVectorInnerProd(b, b));
if (b_norm)
relative_resid = r_norm / b_norm;
else
relative_resid = r_norm;
if (my_id == 0 && ams_data -> print_level > 0)
{
hypre_printf(" relative\n");
hypre_printf(" residual factor residual\n");
hypre_printf(" -------- ------ --------\n");
hypre_printf(" Initial %e %e\n",
r_norm, relative_resid);
}
}
/* Apply the preconditioner */
hypre_ParCSRSubspacePrec(ams_data -> A,
ams_data -> A_relax_type,
ams_data -> A_relax_times,
ams_data -> A_l1_norms,
ams_data -> A_relax_weight,
ams_data -> A_omega,
ams_data -> A_max_eig_est,
ams_data -> A_min_eig_est,
ams_data -> A_cheby_order,
ams_data -> A_cheby_fraction,
Ai, Bi, HBi, Pi, ri, gi,
b, x,
ams_data -> r0,
ams_data -> g0,
cycle,
z);
/* Compute new residual norms */
if (ams_data -> maxit > 1)
{
old_resid = r_norm;
hypre_ParVectorCopy(b, ams_data -> r0);
hypre_ParCSRMatrixMatvec(-1.0, ams_data -> A, x, 1.0, ams_data -> r0);
r_norm = sqrt(hypre_ParVectorInnerProd(ams_data -> r0,ams_data -> r0));
if (b_norm)
relative_resid = r_norm / b_norm;
else
relative_resid = r_norm;
if (my_id == 0 && ams_data -> print_level > 0)
hypre_printf(" Cycle %2d %e %f %e \n",
i+1, r_norm, r_norm / old_resid, relative_resid);
}
if (relative_resid < ams_data -> tol)
{
i++;
break;
}
}
if (my_id == 0 && ams_data -> print_level > 0 && ams_data -> maxit > 1)
hypre_printf("\n\n Average Convergence Factor = %f\n\n",
pow((r_norm/r0_norm),(1.0/(HYPRE_Real) i)));
ams_data -> num_iterations = i;
ams_data -> rel_resid_norm = relative_resid;
if (ams_data -> num_iterations == ams_data -> maxit && ams_data -> tol > 0.0)
hypre_error(HYPRE_ERROR_CONV);
if (z)
hypre_ParVectorDestroy(z);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRSubspacePrec
*
* General subspace preconditioner for A0 y = x, based on ParCSR storage.
*
* P[i] and A[i] are the interpolation and coarse grid matrices for
* the (i+1)'th subspace. B[i] is an AMG solver for A[i]. r[i] and g[i]
* are temporary vectors. A0_* are the fine grid smoothing parameters.
*
* The default mode is multiplicative, '+' changes the next correction
* to additive, based on residual computed at '('.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_ParCSRSubspacePrec(/* fine space matrix */
hypre_ParCSRMatrix *A0,
/* relaxation parameters */
HYPRE_Int A0_relax_type,
HYPRE_Int A0_relax_times,
HYPRE_Real *A0_l1_norms,
HYPRE_Real A0_relax_weight,
HYPRE_Real A0_omega,
HYPRE_Real A0_max_eig_est,
HYPRE_Real A0_min_eig_est,
HYPRE_Int A0_cheby_order,
HYPRE_Real A0_cheby_fraction,
/* subspace matrices */
hypre_ParCSRMatrix **A,
/* subspace preconditioners */
HYPRE_Solver *B,
/* hypre solver functions for B */
HYPRE_PtrToSolverFcn *HB,
/* subspace interpolations */
hypre_ParCSRMatrix **P,
/* temporary subspace vectors */
hypre_ParVector **r,
hypre_ParVector **g,
/* right-hand side */
hypre_ParVector *x,
/* current approximation */
hypre_ParVector *y,
/* current residual */
hypre_ParVector *r0,
/* temporary vector */
hypre_ParVector *g0,
char *cycle,
/* temporary vector */
hypre_ParVector *z)
{
char *op;
HYPRE_Int use_saved_residual = 0;
for (op = cycle; *op != '\0'; op++)
{
/* do nothing */
if (*op == ')')
continue;
/* compute the residual: r = x - Ay */
else if (*op == '(')
{
hypre_ParVectorCopy(x,r0);
hypre_ParCSRMatrixMatvec(-1.0, A0, y, 1.0, r0);
}
/* switch to additive correction */
else if (*op == '+')
{
use_saved_residual = 1;
continue;
}
/* smooth: y += S (x - Ay) */
else if (*op == '0')
{
hypre_ParCSRRelax(A0, x,
A0_relax_type,
A0_relax_times,
A0_l1_norms,
A0_relax_weight,
A0_omega,
A0_max_eig_est,
A0_min_eig_est,
A0_cheby_order,
A0_cheby_fraction,
y, g0, z);
}
/* subspace correction: y += P B^{-1} P^t r */
else
{
HYPRE_Int i = *op - '1';
if (i < 0)
hypre_error_in_arg(16);
/* skip empty subspaces */
if (!A[i]) continue;
/* compute the residual? */
if (use_saved_residual)
{
use_saved_residual = 0;
hypre_ParCSRMatrixMatvecT(1.0, P[i], r0, 0.0, r[i]);
}
else
{
hypre_ParVectorCopy(x,g0);
hypre_ParCSRMatrixMatvec(-1.0, A0, y, 1.0, g0);
hypre_ParCSRMatrixMatvecT(1.0, P[i], g0, 0.0, r[i]);
}
hypre_ParVectorSetConstantValues(g[i], 0.0);
(*HB[i]) (B[i], (HYPRE_Matrix)A[i],
(HYPRE_Vector)r[i], (HYPRE_Vector)g[i]);
hypre_ParCSRMatrixMatvec(1.0, P[i], g[i], 0.0, g0);
hypre_ParVectorAxpy(1.0, g0, y);
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSGetNumIterations
*
* Get the number of AMS iterations.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSGetNumIterations(void *solver,
HYPRE_Int *num_iterations)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
*num_iterations = ams_data -> num_iterations;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSGetFinalRelativeResidualNorm
*
* Get the final relative residual norm in AMS.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSGetFinalRelativeResidualNorm(void *solver,
HYPRE_Real *rel_resid_norm)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
*rel_resid_norm = ams_data -> rel_resid_norm;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSProjectOutGradients
*
* For problems with zero-conductivity regions, project the vector onto the
* compatible subspace: x = (I - G0 (G0^t G0)^{-1} G0^T) x, where G0 is the
* discrete gradient restricted to the interior nodes of the regions with
* zero conductivity. This ensures that x is orthogonal to the gradients in
* the range of G0.
*
* This function is typically called after the solution iteration is complete,
* in order to facilitate the visualization of the computed field. Without it
* the values in the zero-conductivity regions contain kernel components.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSProjectOutGradients(void *solver,
hypre_ParVector *x)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
if (ams_data -> B_G0)
{
hypre_ParCSRMatrixMatvecT(1.0, ams_data -> G0, x, 0.0, ams_data -> r1);
hypre_ParVectorSetConstantValues(ams_data -> g1, 0.0);
hypre_BoomerAMGSolve(ams_data -> B_G0, ams_data -> A_G0, ams_data -> r1, ams_data -> g1);
hypre_ParCSRMatrixMatvec(1.0, ams_data -> G0, ams_data -> g1, 0.0, ams_data -> g0);
hypre_ParVectorAxpy(-1.0, ams_data -> g0, x);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSConstructDiscreteGradient
*
* Construct and return the lowest-order discrete gradient matrix G, based on:
* - a matrix on the egdes (e.g. the stiffness matrix A)
* - a vector on the vertices (e.g. the x coordinates)
* - the array edge_vertex, which lists the global indexes of the
* vertices of the local edges.
*
* We assume that edge_vertex lists the edge vertices consecutively,
* and that the orientation of all edges is consistent. More specificaly:
* If edge_orientation = 1, the edges are already oriented.
* If edge_orientation = 2, the orientation of edge i depends only on the
* sign of edge_vertex[2*i+1] - edge_vertex[2*i].
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSConstructDiscreteGradient(hypre_ParCSRMatrix *A,
hypre_ParVector *x_coord,
HYPRE_BigInt *edge_vertex,
HYPRE_Int edge_orientation,
hypre_ParCSRMatrix **G_ptr)
{
hypre_ParCSRMatrix *G;
HYPRE_Int nedges;
nedges = hypre_ParCSRMatrixNumRows(A);
/* Construct the local part of G based on edge_vertex and the edge
and vertex partitionings from A and x_coord */
{
HYPRE_Int i, *I = hypre_CTAlloc(HYPRE_Int, nedges+1, HYPRE_MEMORY_HOST);
HYPRE_Int part_size;
HYPRE_BigInt *row_starts, *col_starts;
HYPRE_Real *data = hypre_CTAlloc(HYPRE_Real, 2*nedges, HYPRE_MEMORY_HOST);
hypre_CSRMatrix *local = hypre_CSRMatrixCreate (nedges,
hypre_ParVectorGlobalSize(x_coord),
2*nedges);
for (i = 0; i <= nedges; i++)
I[i] = 2*i;
if (edge_orientation == 1)
{
/* Assume that the edges are already oriented */
for (i = 0; i < 2*nedges; i+=2)
{
data[i] = -1.0;
data[i+1] = 1.0;
}
}
else if (edge_orientation == 2)
{
/* Assume that the edge orientation is based on the vertex indexes */
for (i = 0; i < 2*nedges; i+=2)
{
if (edge_vertex[i] < edge_vertex[i+1])
{
data[i] = -1.0;
data[i+1] = 1.0;
}
else
{
data[i] = 1.0;
data[i+1] = -1.0;
}
}
}
else
hypre_error_in_arg(4);
hypre_CSRMatrixI(local) = I;
hypre_CSRMatrixBigJ(local) = edge_vertex;
hypre_CSRMatrixData(local) = data;
hypre_CSRMatrixRownnz(local) = NULL;
hypre_CSRMatrixOwnsData(local) = 1;
hypre_CSRMatrixNumRownnz(local) = nedges;
/* Copy partitioning from A and x_coord (previously they were re-used) */
#ifdef HYPRE_NO_GLOBAL_PARTITION
part_size = 2;
#else
hypre_MPI_Comm_size(hypre_ParCSRMatrixComm(A), &part_size);
part_size++;
#endif
row_starts = hypre_TAlloc(HYPRE_BigInt, part_size, HYPRE_MEMORY_HOST);
col_starts = hypre_TAlloc(HYPRE_BigInt, part_size, HYPRE_MEMORY_HOST);
for (i = 0; i < part_size; i++)
{
row_starts[i] = hypre_ParCSRMatrixRowStarts(A)[i];
col_starts[i] = hypre_ParVectorPartitioning(x_coord)[i];
}
/* Generate the discrete gradient matrix */
G = hypre_ParCSRMatrixCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParVectorGlobalSize(x_coord),
row_starts, col_starts, 0, 0, 0);
hypre_ParCSRMatrixOwnsRowStarts(G) = 1;
hypre_ParCSRMatrixOwnsColStarts(G) = 1;
hypre_CSRMatrixBigJtoJ(local);
GenerateDiagAndOffd(local, G,
hypre_ParVectorFirstIndex(x_coord),
hypre_ParVectorLastIndex(x_coord));
/* Account for empty rows in G. These may appear when A includes only
the interior (non-Dirichlet b.c.) edges. */
{
hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G);
G_diag->num_cols = hypre_VectorSize(hypre_ParVectorLocalVector(x_coord));
}
/* Free the local matrix */
hypre_CSRMatrixDestroy(local);
}
*G_ptr = G;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSFEISetup
*
* Construct an AMS solver object based on the following data:
*
* A - the edge element stiffness matrix
* num_vert - number of vertices (nodes) in the processor
* num_local_vert - number of vertices owned by the processor
* vert_number - global indexes of the vertices in the processor
* vert_coord - coordinates of the vertices in the processor
* num_edges - number of edges owned by the processor
* edge_vertex - the vertices of the edges owned by the processor.
* Vertices are in local numbering (the same as in
* vert_number), and edge orientation is always from
* the first to the second vertex.
*
* Here we distinguish between vertices that belong to elements in the
* current processor, and the subset of these vertices that is owned by
* the processor.
*
* This function is written specifically for input from the FEI and should
* be called before hypre_AMSSetup().
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSFEISetup(void *solver,
hypre_ParCSRMatrix *A,
hypre_ParVector *b,
hypre_ParVector *x,
HYPRE_Int num_vert,
HYPRE_Int num_local_vert,
HYPRE_BigInt *vert_number,
HYPRE_Real *vert_coord,
HYPRE_Int num_edges,
HYPRE_BigInt *edge_vertex)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
HYPRE_Int i, j;
hypre_ParCSRMatrix *G;
hypre_ParVector *x_coord, *y_coord, *z_coord;
HYPRE_Real *x_data, *y_data, *z_data;
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
HYPRE_BigInt *vert_part, num_global_vert;
HYPRE_BigInt vert_start, vert_end;
HYPRE_BigInt big_local_vert = (HYPRE_BigInt) num_local_vert;
/* Find the processor partitioning of the vertices */
#ifdef HYPRE_NO_GLOBAL_PARTITION
vert_part = hypre_TAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST);
hypre_MPI_Scan(&big_local_vert, &vert_part[1], 1, HYPRE_MPI_BIG_INT, hypre_MPI_SUM, comm);
vert_part[0] = vert_part[1] - big_local_vert;
hypre_MPI_Allreduce(&big_local_vert, &num_global_vert, 1, HYPRE_MPI_BIG_INT, hypre_MPI_SUM, comm);
#else
HYPRE_Int num_procs;
hypre_MPI_Comm_size(comm, &num_procs);
vert_part = hypre_TAlloc(HYPRE_BigInt, num_procs+1, HYPRE_MEMORY_HOST);
hypre_MPI_Allgather(&big_local_vert, 1, HYPRE_MPI_BIG_INT, &vert_part[1], 1, HYPRE_MPI_BIG_INT, comm);
vert_part[0] = 0;
for (i = 0; i < num_procs; i++)
vert_part[i+1] += vert_part[i];
num_global_vert = vert_part[num_procs];
#endif
/* Construct hypre parallel vectors for the vertex coordinates */
x_coord = hypre_ParVectorCreate(comm, num_global_vert, vert_part);
hypre_ParVectorInitialize(x_coord);
hypre_ParVectorOwnsData(x_coord) = 1;
hypre_ParVectorOwnsPartitioning(x_coord) = 0;
x_data = hypre_VectorData(hypre_ParVectorLocalVector(x_coord));
y_coord = hypre_ParVectorCreate(comm, num_global_vert, vert_part);
hypre_ParVectorInitialize(y_coord);
hypre_ParVectorOwnsData(y_coord) = 1;
hypre_ParVectorOwnsPartitioning(y_coord) = 0;
y_data = hypre_VectorData(hypre_ParVectorLocalVector(y_coord));
z_coord = hypre_ParVectorCreate(comm, num_global_vert, vert_part);
hypre_ParVectorInitialize(z_coord);
hypre_ParVectorOwnsData(z_coord) = 1;
hypre_ParVectorOwnsPartitioning(z_coord) = 0;
z_data = hypre_VectorData(hypre_ParVectorLocalVector(z_coord));
vert_start = hypre_ParVectorFirstIndex(x_coord);
vert_end = hypre_ParVectorLastIndex(x_coord);
/* Save coordinates of locally owned vertices */
for (i = 0; i < num_vert; i++)
{
if (vert_number[i] >= vert_start && vert_number[i] <= vert_end)
{
j = (HYPRE_Int)(vert_number[i] - vert_start);
x_data[j] = vert_coord[3*i];
y_data[j] = vert_coord[3*i+1];
z_data[j] = vert_coord[3*i+2];
}
}
/* Change vertex numbers from local to global */
for (i = 0; i < 2*num_edges; i++)
edge_vertex[i] = vert_number[edge_vertex[i]];
/* Construct the local part of G based on edge_vertex */
{
/* HYPRE_Int num_edges = hypre_ParCSRMatrixNumRows(A); */
HYPRE_Int *I = hypre_CTAlloc(HYPRE_Int, num_edges+1, HYPRE_MEMORY_HOST);
HYPRE_Real *data = hypre_CTAlloc(HYPRE_Real, 2*num_edges, HYPRE_MEMORY_HOST);
hypre_CSRMatrix *local = hypre_CSRMatrixCreate (num_edges,
num_global_vert,
2*num_edges);
for (i = 0; i <= num_edges; i++)
I[i] = 2*i;
/* Assume that the edge orientation is based on the vertex indexes */
for (i = 0; i < 2*num_edges; i+=2)
{
data[i] = 1.0;
data[i+1] = -1.0;
}
hypre_CSRMatrixI(local) = I;
hypre_CSRMatrixBigJ(local) = edge_vertex;
hypre_CSRMatrixData(local) = data;
hypre_CSRMatrixRownnz(local) = NULL;
hypre_CSRMatrixOwnsData(local) = 1;
hypre_CSRMatrixNumRownnz(local) = num_edges;
G = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
num_global_vert,
hypre_ParCSRMatrixRowStarts(A),
vert_part,
0, 0, 0);
hypre_ParCSRMatrixOwnsRowStarts(G) = 0;
hypre_ParCSRMatrixOwnsColStarts(G) = 1;
hypre_CSRMatrixBigJtoJ(local);
GenerateDiagAndOffd(local, G, vert_start, vert_end);
//hypre_CSRMatrixJ(local) = NULL;
hypre_CSRMatrixDestroy(local);
}
ams_data -> G = G;
ams_data -> x = x_coord;
ams_data -> y = y_coord;
ams_data -> z = z_coord;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSFEIDestroy
*
* Free the additional memory allocated in hypre_AMSFEISetup().
*
* This function is written specifically for input from the FEI and should
* be called before hypre_AMSDestroy().
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSFEIDestroy(void *solver)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
if (ams_data -> G)
hypre_ParCSRMatrixDestroy(ams_data -> G);
if (ams_data -> x)
hypre_ParVectorDestroy(ams_data -> x);
if (ams_data -> y)
hypre_ParVectorDestroy(ams_data -> y);
if (ams_data -> z)
hypre_ParVectorDestroy(ams_data -> z);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRComputeL1Norms Threads
*
* Compute the l1 norms of the rows of a given matrix, depending on
* the option parameter:
*
* option 1 = Compute the l1 norm of the rows
* option 2 = Compute the l1 norm of the (processor) off-diagonal
* part of the rows plus the diagonal of A
* option 3 = Compute the l2 norm^2 of the rows
* option 4 = Truncated version of option 2 based on Remark 6.2 in "Multigrid
* Smoothers for Ultra-Parallel Computing"
*
* The above computations are done in a CF manner, whenever the provided
* cf_marker is not NULL.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_ParCSRComputeL1NormsThreads(hypre_ParCSRMatrix *A,
HYPRE_Int option,
HYPRE_Int num_threads,
HYPRE_Int *cf_marker,
HYPRE_Real **l1_norm_ptr)
{
HYPRE_Int i, j, k;
HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Int *A_diag_I = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_J = hypre_CSRMatrixJ(A_diag);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_I = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_J = hypre_CSRMatrixJ(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Real diag;
HYPRE_Real *l1_norm = hypre_CTAlloc(HYPRE_Real, num_rows, HYPRE_MEMORY_SHARED);
HYPRE_Int ii, ns, ne, rest, size;
HYPRE_Int *cf_marker_offd = NULL;
HYPRE_Int cf_diag;
/* collect the cf marker data from other procs */
if (cf_marker != NULL)
{
HYPRE_Int index;
HYPRE_Int num_sends;
HYPRE_Int start;
HYPRE_Int *int_buf_data = NULL;
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
if (num_cols_offd)
cf_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST);
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
if (hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends))
int_buf_data = hypre_CTAlloc(HYPRE_Int,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
{
int_buf_data[index++] = cf_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
}
comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data,
cf_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,k,ns,ne,rest,size,diag,cf_diag) HYPRE_SMP_SCHEDULE
#endif
for (k = 0; k < num_threads; k++)
{
size = num_rows/num_threads;
rest = num_rows - size*num_threads;
if (k < rest)
{
ns = k*size+k;
ne = (k+1)*size+k+1;
}
else
{
ns = k*size+rest;
ne = (k+1)*size+rest;
}
if (option == 1)
{
for (i = ns; i < ne; i++)
{
l1_norm[i] = 0.0;
if (cf_marker == NULL)
{
/* Add the l1 norm of the diag part of the ith row */
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
l1_norm[i] += fabs(A_diag_data[j]);
/* Add the l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
l1_norm[i] += fabs(A_offd_data[j]);
}
}
else
{
cf_diag = cf_marker[i];
/* Add the CF l1 norm of the diag part of the ith row */
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
if (cf_diag == cf_marker[A_diag_J[j]])
l1_norm[i] += fabs(A_diag_data[j]);
/* Add the CF l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
if (cf_diag == cf_marker_offd[A_offd_J[j]])
l1_norm[i] += fabs(A_offd_data[j]);
}
}
}
}
else if (option == 2)
{
for (i = ns; i < ne; i++)
{
l1_norm[i] = 0.0;
if (cf_marker == NULL)
{
/* Add the diagonal and the local off-thread part of the ith row */
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
{
ii = A_diag_J[j];
if (ii == i || ii < ns || ii >= ne)
l1_norm[i] += fabs(A_diag_data[j]);
}
/* Add the l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
l1_norm[i] += fabs(A_offd_data[j]);
}
}
else
{
cf_diag = cf_marker[i];
/* Add the diagonal and the local off-thread part of the ith row */
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
{
ii = A_diag_J[j];
if ((ii == i || ii < ns || ii >= ne) &&
(cf_diag == cf_marker[A_diag_J[j]]))
l1_norm[i] += fabs(A_diag_data[j]);
}
/* Add the CF l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
if (cf_diag == cf_marker_offd[A_offd_J[j]])
l1_norm[i] += fabs(A_offd_data[j]);
}
}
}
}
else if (option == 3)
{
for (i = ns; i < ne; i++)
{
l1_norm[i] = 0.0;
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
l1_norm[i] += A_diag_data[j] * A_diag_data[j];
if (num_cols_offd)
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
l1_norm[i] += A_offd_data[j] * A_offd_data[j];
}
}
else if (option == 4)
{
for (i = ns; i < ne; i++)
{
l1_norm[i] = 0.0;
if (cf_marker == NULL)
{
/* Add the diagonal and the local off-thread part of the ith row */
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
{
ii = A_diag_J[j];
if (ii == i || ii < ns || ii >= ne)
{
if (ii == i)
{
diag = fabs(A_diag_data[j]);
l1_norm[i] += fabs(A_diag_data[j]);
}
else
l1_norm[i] += 0.5*fabs(A_diag_data[j]);
}
}
/* Add the l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
l1_norm[i] += 0.5*fabs(A_offd_data[j]);
}
}
else
{
cf_diag = cf_marker[i];
/* Add the diagonal and the local off-thread part of the ith row */
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
{
ii = A_diag_J[j];
if ((ii == i || ii < ns || ii >= ne) &&
(cf_diag == cf_marker[A_diag_J[j]]))
{
if (ii == i)
{
diag = fabs(A_diag_data[j]);
l1_norm[i] += fabs(A_diag_data[j]);
}
else
l1_norm[i] += 0.5*fabs(A_diag_data[j]);
}
}
/* Add the CF l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
if (cf_diag == cf_marker_offd[A_offd_J[j]])
l1_norm[i] += 0.5*fabs(A_offd_data[j]);
}
}
/* Truncate according to Remark 6.2 */
if (l1_norm[i] <= 4.0/3.0*diag)
l1_norm[i] = diag;
}
}
/* Handle negative definite matrices */
for (i = ns; i < ne; i++)
if (A_diag_data[A_diag_I[i]] < 0)
l1_norm[i] = -l1_norm[i];
for (i = ns; i < ne; i++)
/* if (fabs(l1_norm[i]) < DBL_EPSILON) */
if (fabs(l1_norm[i]) == 0.0)
{
hypre_error_in_arg(1);
break;
}
}
hypre_TFree(cf_marker_offd, HYPRE_MEMORY_HOST);
*l1_norm_ptr = l1_norm;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRRelaxThreads
* 1 = l1-scaled Jacobi
* 2 = l1-scaled block Gauss-Seidel/SSOR
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_ParCSRRelaxThreads(hypre_ParCSRMatrix *A,
hypre_ParVector *f,
HYPRE_Int relax_type,
HYPRE_Int relax_times,
HYPRE_Real *l1_norms,
HYPRE_Real relax_weight,
HYPRE_Real omega,
hypre_ParVector *u,
hypre_ParVector *Vtemp,
hypre_ParVector *z)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
hypre_Vector *u_local = hypre_ParVectorLocalVector(u);
HYPRE_Real *u_data = hypre_VectorData(u_local);
hypre_Vector *f_local = hypre_ParVectorLocalVector(f);
HYPRE_Real *f_data = hypre_VectorData(f_local);
hypre_Vector *Vtemp_local = hypre_ParVectorLocalVector(Vtemp);
HYPRE_Real *Vtemp_data = hypre_VectorData(Vtemp_local);
HYPRE_Real *Vext_data;
HYPRE_Real *v_buf_data;
HYPRE_Real *tmp_data;
HYPRE_Int i, j;
HYPRE_Int ii, jj;
HYPRE_Int ns, ne, size, rest;
HYPRE_Int relax_error = 0;
HYPRE_Int num_sends;
HYPRE_Int index, start;
HYPRE_Int num_procs, num_threads, my_id;
HYPRE_Real zero = 0.0;
HYPRE_Real res, res2;
hypre_MPI_Comm_size(comm,&num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
num_threads = hypre_NumThreads();
/* only allow jacobi and GS */
if (relax_type > 2)
relax_type = 2;
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
if (num_procs > 1)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
v_buf_data = hypre_CTAlloc(HYPRE_Real,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
Vext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST);
if (num_cols_offd)
{
A_offd_j = hypre_CSRMatrixJ(A_offd);
A_offd_data = hypre_CSRMatrixData(A_offd);
}
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg,i+1); j++)
v_buf_data[index++]
= u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate(1, comm_pkg, v_buf_data,
Vext_data);
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
}
if (relax_type == 1) /* Jacobi */
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
{
Vtemp_data[i] = u_data[i];
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,jj,res) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * Vtemp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += (relax_weight*res)/l1_norms[i];
}
}
}
else if (relax_type == 2) /* GS */
{
if (relax_weight == 1 && omega == 1)
{
tmp_data = hypre_CTAlloc(HYPRE_Real, n, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res -= A_diag_data[jj] * u_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += res / l1_norms[i];
}
}
for (i = ne-1; i > ns-1; i--) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res -= A_diag_data[jj] * u_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += res / l1_norms[i];
}
}
}
hypre_TFree(tmp_data, HYPRE_MEMORY_HOST);
}
else
{
HYPRE_Real c1 = omega*relax_weight;
HYPRE_Real c2 = omega*(1.0-relax_weight);
tmp_data = hypre_CTAlloc(HYPRE_Real, n, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
{
tmp_data[i] = u_data[i];
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (A_diag_data[A_diag_i[i]] != zero)
{
res2 = 0.0;
res = f_data[i];
Vtemp_data[i] = u_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res -= A_diag_data[jj] * u_data[ii];
if (ii < i)
res2 += A_diag_data[jj] * (Vtemp_data[ii] - u_data[ii]);
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += (c1*res + c2*res2) / l1_norms[i];
}
}
for (i = ne-1; i > ns-1; i--) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (A_diag_data[A_diag_i[i]] != zero)
{
res2 = 0.0;
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res -= A_diag_data[jj] * u_data[ii];
if (ii > i)
res2 += A_diag_data[jj] * (Vtemp_data[ii] - u_data[ii]);
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += (c1*res + c2*res2) / l1_norms[i];
}
}
}
hypre_TFree(tmp_data, HYPRE_MEMORY_HOST);
}
} /* end of Jacobi or G.S. */
if (num_procs > 1)
{
hypre_TFree(Vext_data, HYPRE_MEMORY_HOST);
hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST);
}
return(relax_error);
}
|
matabs.c | #include "matrix.h"
/** \brief Computes absolute value of matrix
*
* \param[in] A Input matrix
* \param[in] result Matrix to store the result
* \return \f$ \textrm{abs}(\mathbf{A}) \f$
*
*/
MATRIX mat_abs(MATRIX A, MATRIX result)
{
int i, j, m, n;
m = MatCol(A);
n = MatRow(A);
if(result==NULL) if((result = mat_creat(n, m, UNDEFINED))==NULL)
return (NULL);
#pragma omp parallel for private(j)
for(i=0; i<n; ++i)
for(j=0; j<m; ++j)
{
result[i][j] = (mtype)fabs(A[i][j]);
}
return (result);
}
/** \brief Computes absolute value of an integer vector
*
* \param[in] A Input integer vector
* \param[in] result Vector to store the result
* \return \f$ \textrm{abs}(A) \f$
*
*/
INT_VECTOR int_vec_abs(INT_VECTOR A, INT_VECTOR result)
{
int i, m;
m = Int_VecLen(A);
if(result==NULL) if((result = int_vec_creat(m, UNDEFINED))==NULL)
int_vec_error(INT_VEC_MALLOC);
for(i=0; i<m; ++i) result[i] = abs(A[i]);
return result;
}
|
right_synch_p2p_dataflow.c | /*
* This file is part of a small series of tutorial,
* which aims to demonstrate key features of the GASPI
* standard by means of small but expandable examples.
* Conceptually the tutorial follows a MPI course
* developed by EPCC and HLRS.
*
* Contact point for the MPI tutorial:
* rabenseifner@hlrs.de
* Contact point for the GASPI tutorial:
* daniel.gruenewald@itwm.fraunhofer.de
* mirko.rahn@itwm.fraunhofer.de
* christian.simmendinger@t-systems.com
*/
#include "assert.h"
#include "constant.h"
#include "data.h"
#include "topology.h"
#include "now.h"
#include "mm_pause.h"
#include "success_or_die.h"
#include "queue.h"
#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
/* global stage counters for comp */
static volatile counter_t *compStage = NULL;
#define MIN(x,y) ((x)<(y)?(x):(y))
int main (int argc, char *argv[])
{
int i, j;
int nProc, iProc;
int provided, required = MPI_THREAD_MULTIPLE;
MPI_Init_thread(&argc, &argv, required, &provided);
ASSERT(required == MPI_THREAD_MULTIPLE);
MPI_Comm_rank (MPI_COMM_WORLD, &iProc);
MPI_Comm_size (MPI_COMM_WORLD, &nProc);
gaspi_rank_t iProcG, nProcG;
SUCCESS_OR_DIE (gaspi_proc_init (GASPI_BLOCK));
SUCCESS_OR_DIE (gaspi_proc_rank (&iProcG));
SUCCESS_OR_DIE (gaspi_proc_num (&nProcG));
ASSERT(iProc == iProcG);
ASSERT(nProc == nProcG);
gaspi_number_t notification_num;
SUCCESS_OR_DIE (gaspi_notification_num (¬ification_num));
ASSERT(K_SZ*nThreads <= notification_num);
// num threads
omp_set_num_threads(nThreads);
// global stage counter
compStage = malloc(nThreads * sizeof(counter_t));
// left, right neighbour (proc)
const int left = LEFT(iProc);
const int right = RIGHT(iProc);
// assignment per proc, i-direction
#ifdef USE_STRONG_SCALING
int mSize = M_SZ/nProc;
if (M_SZ % nProc != 0)
{
mSize++;
}
const int mStart = iProc*mSize + 1;
const int mStop = MIN((iProc+1)*mSize, M_SZ);
mSize = mStop-mStart+1;
#else
int mSize = M_SZ;
const int mStart = iProc*mSize + 1;
const int mStop = MIN((iProc+1)*mSize, M_SZ*nProc);
mSize = mStop-mStart+1;
#endif
// align local array
const int CL_SZ = ((mSize+1) % CL) == 0 ? (mSize+1) : CL*(1+(mSize+1)/CL);
// allocate segment for array
gaspi_segment_id_t const segment_id = 0;
SUCCESS_OR_DIE ( gaspi_segment_create
( segment_id
, CL_SZ * (nThreads+1) * (K_SZ+1) * sizeof (double)
, GASPI_GROUP_ALL
, GASPI_BLOCK
, GASPI_MEM_UNINITIALIZED
));
gaspi_pointer_t array;
SUCCESS_OR_DIE ( gaspi_segment_ptr ( segment_id, &array) );
ASSERT (array != 0);
#pragma omp parallel default (none) shared(compStage, CL_SZ, \
mSize, array, stdout, stderr)
{
int const tid = omp_get_thread_num();
compStage[tid].global = 0;
// initialize data
data_init_tlocal(mSize, tid, array, CL_SZ);
}
data_init_global(mStart, mSize, iProc, array, CL_SZ);
int iter;
double median[NITER];
for (iter = 0; iter < NITER; iter++)
{
double time = -now();
MPI_Barrier(MPI_COMM_WORLD);
#pragma omp parallel default (none) shared(mStart, mSize, \
compStage, nThreads, iProc, nProc, stdout, stderr, array, CL_SZ)
{
int const tid = omp_get_thread_num();
gaspi_queue_id_t queue_id = 0;
int k;
for (k = 1; k <= K_SZ; k++)
{
if (left >= 0 )
{
gaspi_notification_id_t id, data_available = (k-1)*nThreads+tid;
SUCCESS_OR_DIE(gaspi_notify_waitsome (segment_id
, data_available
, 1
, &id
, GASPI_BLOCK
));
ASSERT (id == data_available);
gaspi_notification_t value;
SUCCESS_OR_DIE (gaspi_notify_reset (segment_id
, id
, &value
));
ASSERT (value == 1);
}
if(tid > 0)
{
volatile int it;
while((it = compStage[tid-1].global) <= compStage[tid].global)
{
_mm_pause();
}
}
// compute */
data_compute (mStart, mSize, tid, k, array, CL_SZ);
/* increase stage counter */
compStage[tid].global++;
// issue send
if (right < nProc)
{
gaspi_notification_id_t data_available = (k-1)*nThreads+tid;
wait_for_queue_entries_for_write_notify(&queue_id);
SUCCESS_OR_DIE ( gaspi_write_notify
( segment_id
, array_OFFSET (mSize, tid+1, k)
, right
, segment_id
, array_OFFSET (0, tid+1, k)
, sizeof (double)
, data_available
, 1
, queue_id
, GASPI_BLOCK
));
}
#ifdef USE_OMP_BARRIER
#pragma omp barrier
#endif
}
}
MPI_Barrier(MPI_COMM_WORLD);
time += now();
/* iteration time */
median[iter] = time;
}
MPI_Barrier(MPI_COMM_WORLD);
// validate */
#pragma omp parallel default (none) shared(mStart, array, CL_SZ, mSize)
{
int const tid = omp_get_thread_num();
data_validate (mStart, mSize, tid, K_SZ, array, CL_SZ);;
}
MPI_Barrier(MPI_COMM_WORLD);
sort_median(&median[0], &median[NITER-1]);
printf ("# gaspi %s nProc: %d nThreads: %d M_SZ: %d K_SZ: %d niter: %d time: %g\n"
, argv[0], nProc, nThreads, M_SZ, K_SZ, NITER, median[NITER/2]
);
if (iProc == nProc-1)
{
double res = 1.0E-06 * 4 * mSize*nThreads*K_SZ*nProc / median[NITER/2];
printf("\nRate (MFlops/s): %lf\n",res);
}
MPI_Barrier(MPI_COMM_WORLD);
MPI_Finalize();
return EXIT_SUCCESS;
}
|
libimagequant.c | /* pngquant.c - quantize the colors in an alphamap down to a specified number
**
** Copyright (C) 1989, 1991 by Jef Poskanzer.
** Copyright (C) 1997, 2000, 2002 by Greg Roelofs; based on an idea by
** Stefan Schneider.
** © 2009-2013 by Kornel Lesinski.
**
** Permission to use, copy, modify, and distribute this software and its
** documentation for any purpose and without fee is hereby granted, provided
** that the above copyright notice appear in all copies and that both that
** copyright notice and this permission notice appear in supporting
** documentation. This software is provided "as is" without express or
** implied warranty.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdarg.h>
#include <stdbool.h>
#include <stdint.h>
#include <limits.h>
#if !(defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199900L) && !(defined(_MSC_VER) && _MSC_VER >= 1800)
#error "This program requires C99, e.g. -std=c99 switch in GCC or it requires MSVC 18.0 or higher."
#error "Ignore torrent of syntax errors that may follow. It's only because compiler is set to use too old C version."
#endif
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_max_threads() 1
#define omp_get_thread_num() 0
#endif
#include "libimagequant.h"
#include "pam.h"
#include "mediancut.h"
#include "nearest.h"
#include "blur.h"
#include "viter.h"
#define LIQ_HIGH_MEMORY_LIMIT (1<<26) /* avoid allocating buffers larger than 64MB */
// each structure has a pointer as a unique identifier that allows type checking at run time
static const char *const liq_attr_magic = "liq_attr", *const liq_image_magic = "liq_image",
*const liq_result_magic = "liq_result", *const liq_remapping_result_magic = "liq_remapping_result",
*const liq_freed_magic = "free";
#define CHECK_STRUCT_TYPE(attr, kind) liq_crash_if_invalid_handle_pointer_given((const liq_attr*)attr, kind ## _magic)
#define CHECK_USER_POINTER(ptr) liq_crash_if_invalid_pointer_given(ptr)
struct liq_attr {
const char *magic_header;
void* (*malloc)(size_t);
void (*free)(void*);
double target_mse, max_mse, voronoi_iteration_limit;
float min_opaque_val;
unsigned int max_colors, max_histogram_entries;
unsigned int min_posterization_output /* user setting */, min_posterization_input /* speed setting */;
unsigned int voronoi_iterations, feedback_loop_trials;
bool last_index_transparent, use_contrast_maps, use_dither_map, fast_palette;
unsigned int speed;
liq_log_callback_function *log_callback;
void *log_callback_user_info;
liq_log_flush_callback_function *log_flush_callback;
void *log_flush_callback_user_info;
};
struct liq_image {
const char *magic_header;
void* (*malloc)(size_t);
void (*free)(void*);
f_pixel *f_pixels;
rgba_pixel **rows;
double gamma;
unsigned int width, height;
unsigned char *noise, *edges, *dither_map;
rgba_pixel *pixels, *temp_row;
f_pixel *temp_f_row;
liq_image_get_rgba_row_callback *row_callback;
void *row_callback_user_info;
float min_opaque_val;
f_pixel fixed_colors[256];
unsigned short fixed_colors_count;
bool free_pixels, free_rows, free_rows_internal;
float gamma_lut[256];
};
typedef struct liq_remapping_result {
const char *magic_header;
void* (*malloc)(size_t);
void (*free)(void*);
unsigned char *pixels;
colormap *palette;
liq_palette int_palette;
double gamma, palette_error;
float dither_level;
bool use_dither_map;
float gamma_lut[256];
} liq_remapping_result;
struct liq_result {
const char *magic_header;
void* (*malloc)(size_t);
void (*free)(void*);
liq_remapping_result *remapping;
colormap *palette;
liq_palette int_palette;
float dither_level;
double gamma, palette_error;
int min_posterization_output;
bool use_dither_map, fast_palette;
float gamma_lut[256];
};
static liq_result *pngquant_quantize(histogram *hist, const liq_attr *options, const liq_image *img);
static void modify_alpha(liq_image *input_image, rgba_pixel *const row_pixels);
static void contrast_maps(liq_image *image);
static histogram *get_histogram(liq_image *input_image, const liq_attr *options);
static const rgba_pixel *liq_image_get_row_rgba(liq_image *input_image, unsigned int row);
static const f_pixel *liq_image_get_row_f(liq_image *input_image, unsigned int row);
static void liq_remapping_result_destroy(liq_remapping_result *result);
static void liq_verbose_printf(const liq_attr *context, const char *fmt, ...)
{
if (context->log_callback) {
va_list va;
va_start(va, fmt);
int required_space = vsnprintf(NULL, 0, fmt, va)+1; // +\0
va_end(va);
char buf[required_space];
va_start(va, fmt);
vsnprintf(buf, required_space, fmt, va);
va_end(va);
context->log_callback(context, buf, context->log_callback_user_info);
}
}
inline static void verbose_print(const liq_attr *attr, const char *msg)
{
if (attr->log_callback) {
attr->log_callback(attr, msg, attr->log_callback_user_info);
}
}
static void liq_verbose_printf_flush(liq_attr *attr)
{
if (attr->log_flush_callback) {
attr->log_flush_callback(attr, attr->log_flush_callback_user_info);
}
}
#if USE_SSE
inline static bool is_sse_available()
{
#if (defined(__x86_64__) || defined(__amd64))
return true;
#else
int a,b,c,d;
cpuid(1, a, b, c, d);
return d & (1<<25); // edx bit 25 is set when SSE is present
#endif
}
#endif
/* make it clear in backtrace when user-supplied handle points to invalid memory */
NEVER_INLINE LIQ_EXPORT bool liq_crash_if_invalid_handle_pointer_given(const liq_attr *user_supplied_pointer, const char *const expected_magic_header);
LIQ_EXPORT bool liq_crash_if_invalid_handle_pointer_given(const liq_attr *user_supplied_pointer, const char *const expected_magic_header)
{
if (!user_supplied_pointer) {
return false;
}
if (user_supplied_pointer->magic_header == liq_freed_magic) {
fprintf(stderr, "%s used after being freed", expected_magic_header);
// this is not normal error handling, this is programmer error that should crash the program.
// program cannot safely continue if memory has been used after it's been freed.
// abort() is nasty, but security vulnerability may be worse.
abort();
}
return user_supplied_pointer->magic_header == expected_magic_header;
}
NEVER_INLINE LIQ_EXPORT bool liq_crash_if_invalid_pointer_given(void *pointer);
LIQ_EXPORT bool liq_crash_if_invalid_pointer_given(void *pointer)
{
if (!pointer) {
return false;
}
// Force a read from the given (potentially invalid) memory location in order to check early whether this crashes the program or not.
// It doesn't matter what value is read, the code here is just to shut the compiler up about unused read.
char test_access = *((volatile char *)pointer);
return test_access || true;
}
static void liq_log_error(const liq_attr *attr, const char *msg) {
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return;
liq_verbose_printf(attr, " error: %s", msg);
}
static double quality_to_mse(long quality)
{
if (quality == 0) {
return MAX_DIFF;
}
if (quality == 100) {
return 0;
}
// curve fudged to be roughly similar to quality of libjpeg
// except lowest 10 for really low number of colors
const double extra_low_quality_fudge = MAX(0,0.016/(0.001+quality) - 0.001);
return extra_low_quality_fudge + 2.5/pow(210.0 + quality, 1.2) * (100.1-quality)/100.0;
}
static unsigned int mse_to_quality(double mse)
{
for(int i=100; i > 0; i--) {
if (mse <= quality_to_mse(i) + 0.000001) { // + epsilon for floating point errors
return i;
}
}
return 0;
}
LIQ_EXPORT liq_error liq_set_quality(liq_attr* attr, int minimum, int target)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER;
if (target < 0 || target > 100 || target < minimum || minimum < 0) return LIQ_VALUE_OUT_OF_RANGE;
attr->target_mse = quality_to_mse(target);
attr->max_mse = quality_to_mse(minimum);
return LIQ_OK;
}
LIQ_EXPORT int liq_get_min_quality(const liq_attr *attr)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1;
return mse_to_quality(attr->max_mse);
}
LIQ_EXPORT int liq_get_max_quality(const liq_attr *attr)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1;
return mse_to_quality(attr->target_mse);
}
LIQ_EXPORT liq_error liq_set_max_colors(liq_attr* attr, int colors)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER;
if (colors < 2 || colors > 256) return LIQ_VALUE_OUT_OF_RANGE;
attr->max_colors = colors;
return LIQ_OK;
}
LIQ_EXPORT int liq_get_max_colors(const liq_attr *attr)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1;
return attr->max_colors;
}
LIQ_EXPORT liq_error liq_set_min_posterization(liq_attr *attr, int bits)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER;
if (bits < 0 || bits > 4) return LIQ_VALUE_OUT_OF_RANGE;
attr->min_posterization_output = bits;
return LIQ_OK;
}
LIQ_EXPORT int liq_get_min_posterization(const liq_attr *attr)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1;
return attr->min_posterization_output;
}
LIQ_EXPORT liq_error liq_set_speed(liq_attr* attr, int speed)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER;
if (speed < 1 || speed > 10) return LIQ_VALUE_OUT_OF_RANGE;
int iterations = MAX(8-speed,0); iterations += iterations * iterations/2;
attr->voronoi_iterations = iterations;
attr->voronoi_iteration_limit = 1.0/(double)(1<<(23-speed));
attr->feedback_loop_trials = MAX(56-9*speed, 0);
attr->max_histogram_entries = (1<<17) + (1<<18)*(10-speed);
attr->min_posterization_input = (speed >= 8) ? 1 : 0;
attr->fast_palette = (speed >= 7);
attr->use_dither_map = (speed <= (omp_get_max_threads() > 1 ? 7 : 5)); // parallelized dither map might speed up floyd remapping
attr->use_contrast_maps = (speed <= 7) || attr->use_dither_map;
attr->speed = speed;
return LIQ_OK;
}
LIQ_EXPORT int liq_get_speed(const liq_attr *attr)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1;
return attr->speed;
}
LIQ_EXPORT liq_error liq_set_output_gamma(liq_result* res, double gamma)
{
if (!CHECK_STRUCT_TYPE(res, liq_result)) return LIQ_INVALID_POINTER;
if (gamma <= 0 || gamma >= 1.0) return LIQ_VALUE_OUT_OF_RANGE;
if (res->remapping) {
liq_remapping_result_destroy(res->remapping);
res->remapping = NULL;
}
res->gamma = gamma;
to_f_set_gamma(res->gamma_lut, res->gamma);
return LIQ_OK;
}
LIQ_EXPORT liq_error liq_set_min_opacity(liq_attr* attr, int min)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER;
if (min < 0 || min > 255) return LIQ_VALUE_OUT_OF_RANGE;
attr->min_opaque_val = (double)min/255.0;
return LIQ_OK;
}
LIQ_EXPORT int liq_get_min_opacity(const liq_attr *attr)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1;
return MIN(255, 256.0 * attr->min_opaque_val);
}
LIQ_EXPORT void liq_set_last_index_transparent(liq_attr* attr, int is_last)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return;
attr->last_index_transparent = !!is_last;
}
LIQ_EXPORT void liq_set_log_callback(liq_attr *attr, liq_log_callback_function *callback, void* user_info)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return;
liq_verbose_printf_flush(attr);
attr->log_callback = callback;
attr->log_callback_user_info = user_info;
}
LIQ_EXPORT void liq_set_log_flush_callback(liq_attr *attr, liq_log_flush_callback_function *callback, void* user_info)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return;
attr->log_flush_callback = callback;
attr->log_flush_callback_user_info = user_info;
}
LIQ_EXPORT liq_attr* liq_attr_create()
{
return liq_attr_create_with_allocator(NULL, NULL);
}
LIQ_EXPORT void liq_attr_destroy(liq_attr *attr)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) {
return;
}
liq_verbose_printf_flush(attr);
attr->magic_header = liq_freed_magic;
attr->free(attr);
}
LIQ_EXPORT liq_attr* liq_attr_copy(liq_attr *orig)
{
if (!CHECK_STRUCT_TYPE(orig, liq_attr)) {
return NULL;
}
liq_attr *attr = orig->malloc(sizeof(liq_attr));
if (!attr) return NULL;
*attr = *orig;
return attr;
}
static void *liq_aligned_malloc(size_t size)
{
unsigned char *ptr = malloc(size + 16);
if (!ptr) {
return NULL;
}
uintptr_t offset = 16 - ((uintptr_t)ptr & 15); // also reserves 1 byte for ptr[-1]
ptr += offset;
assert(0 == (((uintptr_t)ptr) & 15));
ptr[-1] = offset ^ 0x59; // store how much pointer was shifted to get the original for free()
return ptr;
}
static void liq_aligned_free(void *inptr)
{
unsigned char *ptr = inptr;
size_t offset = ptr[-1] ^ 0x59;
assert(offset > 0 && offset <= 16);
free(ptr - offset);
}
LIQ_EXPORT liq_attr* liq_attr_create_with_allocator(void* (*custom_malloc)(size_t), void (*custom_free)(void*))
{
#if USE_SSE
if (!is_sse_available()) {
return NULL;
}
#endif
if (!custom_malloc && !custom_free) {
custom_malloc = liq_aligned_malloc;
custom_free = liq_aligned_free;
} else if (!custom_malloc != !custom_free) {
return NULL; // either specify both or none
}
liq_attr *attr = custom_malloc(sizeof(liq_attr));
if (!attr) return NULL;
*attr = (liq_attr) {
.magic_header = liq_attr_magic,
.malloc = custom_malloc,
.free = custom_free,
.max_colors = 256,
.min_opaque_val = 1, // whether preserve opaque colors for IE (1.0=no, does not affect alpha)
.last_index_transparent = false, // puts transparent color at last index. This is workaround for blu-ray subtitles.
.target_mse = 0,
.max_mse = MAX_DIFF,
};
liq_set_speed(attr, 5);
return attr;
}
LIQ_EXPORT liq_error liq_image_add_fixed_color(liq_image *img, liq_color color) {
if (!CHECK_STRUCT_TYPE(img, liq_image)) return LIQ_INVALID_POINTER;
if (img->fixed_colors_count > 255) return LIQ_BUFFER_TOO_SMALL;
img->fixed_colors[img->fixed_colors_count++] = to_f(img->gamma_lut, (rgba_pixel){
.r = color.r,
.g = color.g,
.b = color.b,
.a = color.a,
});
return LIQ_OK;
}
static bool liq_image_use_low_memory(liq_image *img)
{
img->temp_f_row = img->malloc(sizeof(img->f_pixels[0]) * img->width * omp_get_max_threads());
return img->temp_f_row != NULL;
}
static bool liq_image_should_use_low_memory(liq_image *img, const bool low_memory_hint)
{
return img->width * img->height > (low_memory_hint ? LIQ_HIGH_MEMORY_LIMIT/8 : LIQ_HIGH_MEMORY_LIMIT) / sizeof(f_pixel); // Watch out for integer overflow
}
static liq_image *liq_image_create_internal(liq_attr *attr, rgba_pixel* rows[], liq_image_get_rgba_row_callback *row_callback, void *row_callback_user_info, int width, int height, double gamma)
{
if (gamma < 0 || gamma > 1.0) {
liq_log_error(attr, "gamma must be >= 0 and <= 1 (try 1/gamma instead)");
return NULL;
}
if (!rows && !row_callback) {
liq_log_error(attr, "missing row data");
return NULL;
}
liq_image *img = attr->malloc(sizeof(liq_image));
if (!img) return NULL;
*img = (liq_image){
.magic_header = liq_image_magic,
.malloc = attr->malloc,
.free = attr->free,
.width = width, .height = height,
.gamma = gamma ? gamma : 0.45455,
.rows = rows,
.row_callback = row_callback,
.row_callback_user_info = row_callback_user_info,
.min_opaque_val = attr->min_opaque_val,
};
to_f_set_gamma(img->gamma_lut, img->gamma);
if (!rows || attr->min_opaque_val < 1.f) {
img->temp_row = attr->malloc(sizeof(img->temp_row[0]) * width * omp_get_max_threads());
if (!img->temp_row) return NULL;
}
// if image is huge or converted pixels are not likely to be reused then don't cache converted pixels
if (liq_image_should_use_low_memory(img, !img->temp_row && !attr->use_contrast_maps && !attr->use_dither_map)) {
verbose_print(attr, " conserving memory");
if (!liq_image_use_low_memory(img)) return NULL;
}
if (img->min_opaque_val < 1.f) {
verbose_print(attr, " Working around IE6 bug by making image less transparent...");
}
return img;
}
LIQ_EXPORT liq_error liq_image_set_memory_ownership(liq_image *img, int ownership_flags)
{
if (!CHECK_STRUCT_TYPE(img, liq_image)) return LIQ_INVALID_POINTER;
if (!img->rows || !ownership_flags || (ownership_flags & ~(LIQ_OWN_ROWS|LIQ_OWN_PIXELS))) {
return LIQ_VALUE_OUT_OF_RANGE;
}
if (ownership_flags & LIQ_OWN_ROWS) {
if (img->free_rows_internal) return LIQ_VALUE_OUT_OF_RANGE;
img->free_rows = true;
}
if (ownership_flags & LIQ_OWN_PIXELS) {
img->free_pixels = true;
if (!img->pixels) {
// for simplicity of this API there's no explicit bitmap argument,
// so the row with the lowest address is assumed to be at the start of the bitmap
img->pixels = img->rows[0];
for(unsigned int i=1; i < img->height; i++) {
img->pixels = MIN(img->pixels, img->rows[i]);
}
}
}
return LIQ_OK;
}
static bool check_image_size(const liq_attr *attr, const int width, const int height)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) {
return false;
}
if (width <= 0 || height <= 0) {
liq_log_error(attr, "width and height must be > 0");
return false;
}
if (width > INT_MAX/height) {
liq_log_error(attr, "image too large");
return false;
}
return true;
}
LIQ_EXPORT liq_image *liq_image_create_custom(liq_attr *attr, liq_image_get_rgba_row_callback *row_callback, void* user_info, int width, int height, double gamma)
{
if (!check_image_size(attr, width, height)) {
return NULL;
}
return liq_image_create_internal(attr, NULL, row_callback, user_info, width, height, gamma);
}
LIQ_EXPORT liq_image *liq_image_create_rgba_rows(liq_attr *attr, void* rows[], int width, int height, double gamma)
{
if (!check_image_size(attr, width, height)) {
return NULL;
}
for(int i=0; i < height; i++) {
if (!CHECK_USER_POINTER(rows+i) || !CHECK_USER_POINTER(rows[i])) {
liq_log_error(attr, "invalid row pointers");
return NULL;
}
}
return liq_image_create_internal(attr, (rgba_pixel**)rows, NULL, NULL, width, height, gamma);
}
LIQ_EXPORT liq_image *liq_image_create_rgba(liq_attr *attr, void* bitmap, int width, int height, double gamma)
{
if (!check_image_size(attr, width, height)) {
return NULL;
}
if (!CHECK_USER_POINTER(bitmap)) {
liq_log_error(attr, "invalid bitmap pointer");
return NULL;
}
rgba_pixel *pixels = bitmap;
rgba_pixel **rows = attr->malloc(sizeof(rows[0])*height);
if (!rows) return NULL;
for(int i=0; i < height; i++) {
rows[i] = pixels + width * i;
}
liq_image *image = liq_image_create_internal(attr, rows, NULL, NULL, width, height, gamma);
image->free_rows = true;
image->free_rows_internal = true;
return image;
}
NEVER_INLINE LIQ_EXPORT void liq_executing_user_callback(liq_image_get_rgba_row_callback *callback, liq_color *temp_row, int row, int width, void *user_info);
LIQ_EXPORT void liq_executing_user_callback(liq_image_get_rgba_row_callback *callback, liq_color *temp_row, int row, int width, void *user_info)
{
assert(callback);
assert(temp_row);
callback(temp_row, row, width, user_info);
}
inline static bool liq_image_can_use_rows(liq_image *img)
{
const bool iebug = img->min_opaque_val < 1.f;
return (img->rows && !iebug);
}
static const rgba_pixel *liq_image_get_row_rgba(liq_image *img, unsigned int row)
{
if (liq_image_can_use_rows(img)) {
return img->rows[row];
}
assert(img->temp_row);
rgba_pixel *temp_row = img->temp_row + img->width * omp_get_thread_num();
if (img->rows) {
memcpy(temp_row, img->rows[row], img->width * sizeof(temp_row[0]));
} else {
liq_executing_user_callback(img->row_callback, (liq_color*)temp_row, row, img->width, img->row_callback_user_info);
}
if (img->min_opaque_val < 1.f) modify_alpha(img, temp_row);
return temp_row;
}
static void convert_row_to_f(liq_image *img, f_pixel *row_f_pixels, const unsigned int row, const float gamma_lut[])
{
assert(row_f_pixels);
assert(!USE_SSE || 0 == ((uintptr_t)row_f_pixels & 15));
const rgba_pixel *const row_pixels = liq_image_get_row_rgba(img, row);
for(unsigned int col=0; col < img->width; col++) {
row_f_pixels[col] = to_f(gamma_lut, row_pixels[col]);
}
}
static const f_pixel *liq_image_get_row_f(liq_image *img, unsigned int row)
{
if (!img->f_pixels) {
if (img->temp_f_row) {
f_pixel *row_for_thread = img->temp_f_row + img->width * omp_get_thread_num();
convert_row_to_f(img, row_for_thread, row, img->gamma_lut);
return row_for_thread;
}
assert(omp_get_thread_num() == 0);
if (!liq_image_should_use_low_memory(img, false)) {
img->f_pixels = img->malloc(sizeof(img->f_pixels[0]) * img->width * img->height);
}
if (!img->f_pixels) {
if (!liq_image_use_low_memory(img)) return NULL;
return liq_image_get_row_f(img, row);
}
for(unsigned int i=0; i < img->height; i++) {
convert_row_to_f(img, &img->f_pixels[i*img->width], i, img->gamma_lut);
}
}
return img->f_pixels + img->width * row;
}
LIQ_EXPORT int liq_image_get_width(const liq_image *input_image)
{
if (!CHECK_STRUCT_TYPE(input_image, liq_image)) return -1;
return input_image->width;
}
LIQ_EXPORT int liq_image_get_height(const liq_image *input_image)
{
if (!CHECK_STRUCT_TYPE(input_image, liq_image)) return -1;
return input_image->height;
}
typedef void free_func(void*);
free_func *get_default_free_func(liq_image *img)
{
// When default allocator is used then user-supplied pointers must be freed with free()
if (img->free_rows_internal || img->free != liq_aligned_free) {
return img->free;
}
return free;
}
static void liq_image_free_rgba_source(liq_image *input_image)
{
if (input_image->free_pixels && input_image->pixels) {
get_default_free_func(input_image)(input_image->pixels);
input_image->pixels = NULL;
}
if (input_image->free_rows && input_image->rows) {
get_default_free_func(input_image)(input_image->rows);
input_image->rows = NULL;
}
}
LIQ_EXPORT void liq_image_destroy(liq_image *input_image)
{
if (!CHECK_STRUCT_TYPE(input_image, liq_image)) return;
liq_image_free_rgba_source(input_image);
if (input_image->noise) {
input_image->free(input_image->noise);
}
if (input_image->edges) {
input_image->free(input_image->edges);
}
if (input_image->dither_map) {
input_image->free(input_image->dither_map);
}
if (input_image->f_pixels) {
input_image->free(input_image->f_pixels);
}
if (input_image->temp_row) {
input_image->free(input_image->temp_row);
}
if (input_image->temp_f_row) {
input_image->free(input_image->temp_f_row);
}
input_image->magic_header = liq_freed_magic;
input_image->free(input_image);
}
LIQ_EXPORT liq_result *liq_quantize_image(liq_attr *attr, liq_image *img)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return NULL;
if (!CHECK_STRUCT_TYPE(img, liq_image)) {
liq_log_error(attr, "invalid image pointer");
return NULL;
}
histogram *hist = get_histogram(img, attr);
if (!hist) {
return NULL;
}
liq_result *result = pngquant_quantize(hist, attr, img);
pam_freeacolorhist(hist);
return result;
}
LIQ_EXPORT liq_error liq_image_quantize(liq_image *const img, liq_attr *const attr, liq_result **result)
{
*result = liq_quantize_image(attr, img);
return result ? LIQ_OK : LIQ_NOT_READY;
}
LIQ_EXPORT liq_error liq_set_dithering_level(liq_result *res, float dither_level)
{
if (!CHECK_STRUCT_TYPE(res, liq_result)) return LIQ_INVALID_POINTER;
if (res->remapping) {
liq_remapping_result_destroy(res->remapping);
res->remapping = NULL;
}
if (res->dither_level < 0 || res->dither_level > 1.0f) return LIQ_VALUE_OUT_OF_RANGE;
res->dither_level = dither_level;
return LIQ_OK;
}
static liq_remapping_result *liq_remapping_result_create(liq_result *result)
{
if (!CHECK_STRUCT_TYPE(result, liq_result)) {
return NULL;
}
liq_remapping_result *res = result->malloc(sizeof(liq_remapping_result));
if (!res) return NULL;
*res = (liq_remapping_result) {
.magic_header = liq_remapping_result_magic,
.malloc = result->malloc,
.free = result->free,
.dither_level = result->dither_level,
.use_dither_map = result->use_dither_map,
.palette_error = result->palette_error,
.gamma = result->gamma,
.palette = pam_duplicate_colormap(result->palette),
};
to_f_set_gamma(res->gamma_lut, res->gamma);
return res;
}
LIQ_EXPORT double liq_get_output_gamma(const liq_result *result)
{
if (!CHECK_STRUCT_TYPE(result, liq_result)) return -1;
return result->gamma;
}
static void liq_remapping_result_destroy(liq_remapping_result *result)
{
if (!CHECK_STRUCT_TYPE(result, liq_remapping_result)) return;
if (result->palette) pam_freecolormap(result->palette);
if (result->pixels) result->free(result->pixels);
result->magic_header = liq_freed_magic;
result->free(result);
}
LIQ_EXPORT void liq_result_destroy(liq_result *res)
{
if (!CHECK_STRUCT_TYPE(res, liq_result)) return;
memset(&res->int_palette, 0, sizeof(liq_palette));
if (res->remapping) {
memset(&res->remapping->int_palette, 0, sizeof(liq_palette));
liq_remapping_result_destroy(res->remapping);
}
pam_freecolormap(res->palette);
res->magic_header = liq_freed_magic;
res->free(res);
}
LIQ_EXPORT double liq_get_quantization_error(liq_result *result)
{
if (!CHECK_STRUCT_TYPE(result, liq_result)) return -1;
if (result->palette_error >= 0) {
return result->palette_error*65536.0/6.0;
}
if (result->remapping && result->remapping->palette_error >= 0) {
return result->remapping->palette_error*65536.0/6.0;
}
return result->palette_error;
}
LIQ_EXPORT int liq_get_quantization_quality(liq_result *result)
{
if (!CHECK_STRUCT_TYPE(result, liq_result)) return -1;
if (result->palette_error >= 0) {
return mse_to_quality(result->palette_error);
}
if (result->remapping && result->remapping->palette_error >= 0) {
return mse_to_quality(result->remapping->palette_error);
}
return result->palette_error;
}
static int compare_popularity(const void *ch1, const void *ch2)
{
const float v1 = ((const colormap_item*)ch1)->popularity;
const float v2 = ((const colormap_item*)ch2)->popularity;
return v1 > v2 ? -1 : 1;
}
static void sort_palette_qsort(colormap *map, int start, int nelem)
{
qsort(map->palette + start, nelem, sizeof(map->palette[0]), compare_popularity);
}
#define SWAP_PALETTE(map, a,b) { \
const colormap_item tmp = (map)->palette[(a)]; \
(map)->palette[(a)] = (map)->palette[(b)]; \
(map)->palette[(b)] = tmp; }
static void sort_palette(colormap *map, const liq_attr *options)
{
/*
** Step 3.5 [GRR]: remap the palette colors so that all entries with
** the maximal alpha value (i.e., fully opaque) are at the end and can
** therefore be omitted from the tRNS chunk.
*/
if (options->last_index_transparent) {
for(unsigned int i=0; i < map->colors; i++) {
if (map->palette[i].acolor.a < 1.0/256.0) {
const unsigned int old = i, transparent_dest = map->colors-1;
SWAP_PALETTE(map, transparent_dest, old);
/* colors sorted by popularity make pngs slightly more compressible */
sort_palette_qsort(map, 0, map->colors-1);
return;
}
}
}
/* move transparent colors to the beginning to shrink trns chunk */
unsigned int num_transparent=0;
for(unsigned int i=0; i < map->colors; i++) {
if (map->palette[i].acolor.a < 255.0/256.0) {
// current transparent color is swapped with earlier opaque one
if (i != num_transparent) {
SWAP_PALETTE(map, num_transparent, i);
i--;
}
num_transparent++;
}
}
liq_verbose_printf(options, " eliminated opaque tRNS-chunk entries...%d entr%s transparent", num_transparent, (num_transparent == 1)? "y" : "ies");
/* colors sorted by popularity make pngs slightly more compressible
* opaque and transparent are sorted separately
*/
sort_palette_qsort(map, 0, num_transparent);
sort_palette_qsort(map, num_transparent, map->colors-num_transparent);
if (map->colors > 16) {
SWAP_PALETTE(map, 7, 1); // slightly improves compression
SWAP_PALETTE(map, 8, 2);
SWAP_PALETTE(map, 9, 3);
}
}
inline static unsigned int posterize_channel(unsigned int color, unsigned int bits)
{
return (color & ~((1<<bits)-1)) | (color >> (8-bits));
}
static void set_rounded_palette(liq_palette *const dest, colormap *const map, const double gamma, const float gamma_lut[256], unsigned int posterize)
{
dest->count = map->colors;
for(unsigned int x = 0; x < map->colors; ++x) {
rgba_pixel px = to_rgb(gamma, map->palette[x].acolor);
px.r = posterize_channel(px.r, posterize);
px.g = posterize_channel(px.g, posterize);
px.b = posterize_channel(px.b, posterize);
px.a = posterize_channel(px.a, posterize);
map->palette[x].acolor = to_f(gamma_lut, px); /* saves rounding error introduced by to_rgb, which makes remapping & dithering more accurate */
if (!px.a) {
px.r = 'L'; px.g = 'i'; px.b = 'q';
}
dest->entries[x] = (liq_color){.r=px.r,.g=px.g,.b=px.b,.a=px.a};
}
}
LIQ_EXPORT const liq_palette *liq_get_palette(liq_result *result)
{
if (!CHECK_STRUCT_TYPE(result, liq_result)) return NULL;
if (result->remapping && result->remapping->int_palette.count) {
return &result->remapping->int_palette;
}
if (!result->int_palette.count) {
set_rounded_palette(&result->int_palette, result->palette, result->gamma, result->gamma_lut, result->min_posterization_output);
}
return &result->int_palette;
}
static float remap_to_palette(liq_image *const input_image, unsigned char *const *const output_pixels, colormap *const map, const bool fast)
{
const int rows = input_image->height;
const unsigned int cols = input_image->width;
const float min_opaque_val = input_image->min_opaque_val;
double remapping_error=0;
if (!liq_image_get_row_f(input_image, 0)) { // trigger lazy conversion
return -1;
}
struct nearest_map *const n = nearest_init(map, fast);
const unsigned int max_threads = omp_get_max_threads();
viter_state average_color[(VITER_CACHE_LINE_GAP+map->colors) * max_threads];
viter_init(map, max_threads, average_color);
#if __GNUC__ >= 9
#pragma omp parallel for if (rows*cols > 3000) \
schedule(static) default(none) shared(input_image,output_pixels,map,min_opaque_val,rows,cols,n,average_color) reduction(+:remapping_error)
#endif
for(int row = 0; row < rows; ++row) {
const f_pixel *const row_pixels = liq_image_get_row_f(input_image, row);
unsigned int last_match=0;
for(unsigned int col = 0; col < cols; ++col) {
f_pixel px = row_pixels[col];
float diff;
output_pixels[row][col] = last_match = nearest_search(n, px, last_match, min_opaque_val, &diff);
remapping_error += diff;
viter_update_color(px, 1.0, map, last_match, omp_get_thread_num(), average_color);
}
}
viter_finalize(map, max_threads, average_color);
nearest_free(n);
return remapping_error / (input_image->width * input_image->height);
}
inline static f_pixel get_dithered_pixel(const float dither_level, const float max_dither_error, const f_pixel thiserr, const f_pixel px)
{
/* Use Floyd-Steinberg errors to adjust actual color. */
const float sr = thiserr.r * dither_level,
sg = thiserr.g * dither_level,
sb = thiserr.b * dither_level,
sa = thiserr.a * dither_level;
float ratio = 1.0;
// allowing some overflow prevents undithered bands caused by clamping of all channels
if (px.r + sr > 1.03) ratio = MIN(ratio, (1.03-px.r)/sr);
else if (px.r + sr < 0) ratio = MIN(ratio, px.r/-sr);
if (px.g + sg > 1.03) ratio = MIN(ratio, (1.03-px.g)/sg);
else if (px.g + sg < 0) ratio = MIN(ratio, px.g/-sg);
if (px.b + sb > 1.03) ratio = MIN(ratio, (1.03-px.b)/sb);
else if (px.b + sb < 0) ratio = MIN(ratio, px.b/-sb);
float a = px.a + sa;
if (a > 1.0) { a = 1.0; }
else if (a < 0) { a = 0; }
// If dithering error is crazy high, don't propagate it that much
// This prevents crazy geen pixels popping out of the blue (or red or black! ;)
const float dither_error = sr*sr + sg*sg + sb*sb + sa*sa;
if (dither_error > max_dither_error) {
ratio *= 0.8;
} else if (dither_error < 2.f/256.f/256.f) {
// don't dither areas that don't have noticeable error — makes file smaller
return px;
}
return (f_pixel){
.r=px.r + sr * ratio,
.g=px.g + sg * ratio,
.b=px.b + sb * ratio,
.a=a,
};
}
/**
Uses edge/noise map to apply dithering only to flat areas. Dithering on edges creates jagged lines, and noisy areas are "naturally" dithered.
If output_image_is_remapped is true, only pixels noticeably changed by error diffusion will be written to output image.
*/
static void remap_to_palette_floyd(liq_image *input_image, unsigned char *const output_pixels[], const colormap *map, const float max_dither_error, const bool use_dither_map, const bool output_image_is_remapped, float base_dithering_level)
{
const unsigned int rows = input_image->height, cols = input_image->width;
const unsigned char *dither_map = use_dither_map ? (input_image->dither_map ? input_image->dither_map : input_image->edges) : NULL;
const float min_opaque_val = input_image->min_opaque_val;
const colormap_item *acolormap = map->palette;
struct nearest_map *const n = nearest_init(map, false);
/* Initialize Floyd-Steinberg error vectors. */
f_pixel *restrict thiserr, *restrict nexterr;
thiserr = input_image->malloc((cols + 2) * sizeof(*thiserr) * 2); // +2 saves from checking out of bounds access
nexterr = thiserr + (cols + 2);
srand(12345); /* deterministic dithering is better for comparing results */
if (!thiserr) return;
for (unsigned int col = 0; col < cols + 2; ++col) {
const double rand_max = RAND_MAX;
thiserr[col].r = ((double)rand() - rand_max/2.0)/rand_max/255.0;
thiserr[col].g = ((double)rand() - rand_max/2.0)/rand_max/255.0;
thiserr[col].b = ((double)rand() - rand_max/2.0)/rand_max/255.0;
thiserr[col].a = ((double)rand() - rand_max/2.0)/rand_max/255.0;
}
// response to this value is non-linear and without it any value < 0.8 would give almost no dithering
base_dithering_level = 1.0 - (1.0-base_dithering_level)*(1.0-base_dithering_level)*(1.0-base_dithering_level);
if (dither_map) {
base_dithering_level *= 1.0/255.0; // convert byte to float
}
base_dithering_level *= 15.0/16.0; // prevent small errors from accumulating
bool fs_direction = true;
unsigned int last_match=0;
for (unsigned int row = 0; row < rows; ++row) {
memset(nexterr, 0, (cols + 2) * sizeof(*nexterr));
unsigned int col = (fs_direction) ? 0 : (cols - 1);
const f_pixel *const row_pixels = liq_image_get_row_f(input_image, row);
do {
float dither_level = base_dithering_level;
if (dither_map) {
dither_level *= dither_map[row*cols + col];
}
const f_pixel spx = get_dithered_pixel(dither_level, max_dither_error, thiserr[col + 1], row_pixels[col]);
const unsigned int guessed_match = output_image_is_remapped ? output_pixels[row][col] : last_match;
output_pixels[row][col] = last_match = nearest_search(n, spx, guessed_match, min_opaque_val, NULL);
const f_pixel xp = acolormap[last_match].acolor;
f_pixel err = {
.r = (spx.r - xp.r),
.g = (spx.g - xp.g),
.b = (spx.b - xp.b),
.a = (spx.a - xp.a),
};
// If dithering error is crazy high, don't propagate it that much
// This prevents crazy geen pixels popping out of the blue (or red or black! ;)
if (err.r*err.r + err.g*err.g + err.b*err.b + err.a*err.a > max_dither_error) {
dither_level *= 0.75;
}
const float colorimp = (3.0f + acolormap[last_match].acolor.a)/4.0f * dither_level;
err.r *= colorimp;
err.g *= colorimp;
err.b *= colorimp;
err.a *= dither_level;
/* Propagate Floyd-Steinberg error terms. */
if (fs_direction) {
thiserr[col + 2].a += err.a * (7.f/16.f);
thiserr[col + 2].r += err.r * (7.f/16.f);
thiserr[col + 2].g += err.g * (7.f/16.f);
thiserr[col + 2].b += err.b * (7.f/16.f);
nexterr[col + 2].a = err.a * (1.f/16.f);
nexterr[col + 2].r = err.r * (1.f/16.f);
nexterr[col + 2].g = err.g * (1.f/16.f);
nexterr[col + 2].b = err.b * (1.f/16.f);
nexterr[col + 1].a += err.a * (5.f/16.f);
nexterr[col + 1].r += err.r * (5.f/16.f);
nexterr[col + 1].g += err.g * (5.f/16.f);
nexterr[col + 1].b += err.b * (5.f/16.f);
nexterr[col ].a += err.a * (3.f/16.f);
nexterr[col ].r += err.r * (3.f/16.f);
nexterr[col ].g += err.g * (3.f/16.f);
nexterr[col ].b += err.b * (3.f/16.f);
} else {
thiserr[col ].a += err.a * (7.f/16.f);
thiserr[col ].r += err.r * (7.f/16.f);
thiserr[col ].g += err.g * (7.f/16.f);
thiserr[col ].b += err.b * (7.f/16.f);
nexterr[col ].a = err.a * (1.f/16.f);
nexterr[col ].r = err.r * (1.f/16.f);
nexterr[col ].g = err.g * (1.f/16.f);
nexterr[col ].b = err.b * (1.f/16.f);
nexterr[col + 1].a += err.a * (5.f/16.f);
nexterr[col + 1].r += err.r * (5.f/16.f);
nexterr[col + 1].g += err.g * (5.f/16.f);
nexterr[col + 1].b += err.b * (5.f/16.f);
nexterr[col + 2].a += err.a * (3.f/16.f);
nexterr[col + 2].r += err.r * (3.f/16.f);
nexterr[col + 2].g += err.g * (3.f/16.f);
nexterr[col + 2].b += err.b * (3.f/16.f);
}
// remapping is done in zig-zag
if (fs_direction) {
++col;
if (col >= cols) break;
} else {
if (col <= 0) break;
--col;
}
} while(1);
f_pixel *const temperr = thiserr;
thiserr = nexterr;
nexterr = temperr;
fs_direction = !fs_direction;
}
input_image->free(MIN(thiserr, nexterr)); // MIN because pointers were swapped
nearest_free(n);
}
/* fixed colors are always included in the palette, so it would be wasteful to duplicate them in palette from histogram */
static void remove_fixed_colors_from_histogram(histogram *hist, const liq_image *input_image, const float target_mse) {
const float max_difference = MAX(target_mse/2.0, 2.0/256.0/256.0);
if (input_image->fixed_colors_count) {
for(int j=0; j < hist->size; j++) {
for(unsigned int i=0; i < input_image->fixed_colors_count; i++) {
if (colordifference(hist->achv[j].acolor, input_image->fixed_colors[i]) < max_difference) {
hist->achv[j] = hist->achv[--hist->size]; // remove color from histogram by overwriting with the last entry
j--; break; // continue searching histogram
}
}
}
}
}
/* histogram contains information how many times each color is present in the image, weighted by importance_map */
static histogram *get_histogram(liq_image *input_image, const liq_attr *options)
{
unsigned int ignorebits=MAX(options->min_posterization_output, options->min_posterization_input);
const unsigned int cols = input_image->width, rows = input_image->height;
if (!input_image->noise && options->use_contrast_maps) {
contrast_maps(input_image);
}
/*
** Step 2: attempt to make a histogram of the colors, unclustered.
** If at first we don't succeed, increase ignorebits to increase color
** coherence and try again.
*/
unsigned int maxcolors = options->max_histogram_entries;
struct acolorhash_table *acht;
const bool all_rows_at_once = liq_image_can_use_rows(input_image);
do {
acht = pam_allocacolorhash(maxcolors, rows*cols, ignorebits, options->malloc, options->free);
if (!acht) return NULL;
// histogram uses noise contrast map for importance. Color accuracy in noisy areas is not very important.
// noise map does not include edges to avoid ruining anti-aliasing
for(unsigned int row=0; row < rows; row++) {
bool added_ok;
if (all_rows_at_once) {
added_ok = pam_computeacolorhash(acht, (const rgba_pixel *const *)input_image->rows, cols, rows, input_image->noise);
if (added_ok) break;
} else {
const rgba_pixel* rows_p[1] = { liq_image_get_row_rgba(input_image, row) };
added_ok = pam_computeacolorhash(acht, rows_p, cols, 1, input_image->noise ? &input_image->noise[row * cols] : NULL);
}
if (!added_ok) {
ignorebits++;
liq_verbose_printf(options, " too many colors! Scaling colors to improve clustering... %d", ignorebits);
pam_freeacolorhash(acht);
acht = NULL;
break;
}
}
} while(!acht);
if (input_image->noise) {
input_image->free(input_image->noise);
input_image->noise = NULL;
}
if (input_image->free_pixels && input_image->f_pixels) {
liq_image_free_rgba_source(input_image); // bow can free the RGBA source if copy has been made in f_pixels
}
histogram *hist = pam_acolorhashtoacolorhist(acht, input_image->gamma, input_image->gamma_lut, options->malloc, options->free);
pam_freeacolorhash(acht);
if (hist) {
liq_verbose_printf(options, " made histogram...%d colors found", hist->size);
remove_fixed_colors_from_histogram(hist, input_image, options->target_mse);
}
return hist;
}
static void modify_alpha(liq_image *input_image, rgba_pixel *const row_pixels)
{
/* IE6 makes colors with even slightest transparency completely transparent,
thus to improve situation in IE, make colors that are less than ~10% transparent
completely opaque */
const float min_opaque_val = input_image->min_opaque_val;
const float almost_opaque_val = min_opaque_val * 169.f/256.f;
const unsigned int almost_opaque_val_int = (min_opaque_val * 169.f/256.f)*255.f;
for(unsigned int col = 0; col < input_image->width; col++) {
const rgba_pixel px = row_pixels[col];
/* ie bug: to avoid visible step caused by forced opaqueness, linearily raise opaqueness of almost-opaque colors */
if (px.a >= almost_opaque_val_int) {
float al = px.a / 255.f;
al = almost_opaque_val + (al-almost_opaque_val) * (1.f-almost_opaque_val) / (min_opaque_val-almost_opaque_val);
al *= 256.f;
row_pixels[col].a = al >= 255.f ? 255 : al;
}
}
}
/**
Builds two maps:
noise - approximation of areas with high-frequency noise, except straight edges. 1=flat, 0=noisy.
edges - noise map including all edges
*/
static void contrast_maps(liq_image *image)
{
const int cols = image->width, rows = image->height;
if (cols < 4 || rows < 4 || (3*cols*rows) > LIQ_HIGH_MEMORY_LIMIT) {
return;
}
unsigned char *restrict noise = image->malloc(cols*rows);
unsigned char *restrict edges = image->malloc(cols*rows);
unsigned char *restrict tmp = image->malloc(cols*rows);
if (!noise || !edges || !tmp) {
return;
}
const f_pixel *curr_row, *prev_row, *next_row;
curr_row = prev_row = next_row = liq_image_get_row_f(image, 0);
for (int j=0; j < rows; j++) {
prev_row = curr_row;
curr_row = next_row;
next_row = liq_image_get_row_f(image, MIN(rows-1,j+1));
f_pixel prev, curr = curr_row[0], next=curr;
for (int i=0; i < cols; i++) {
prev=curr;
curr=next;
next = curr_row[MIN(cols-1,i+1)];
// contrast is difference between pixels neighbouring horizontally and vertically
const float a = fabsf(prev.a+next.a - curr.a*2.f),
r = fabsf(prev.r+next.r - curr.r*2.f),
g = fabsf(prev.g+next.g - curr.g*2.f),
b = fabsf(prev.b+next.b - curr.b*2.f);
const f_pixel prevl = prev_row[i];
const f_pixel nextl = next_row[i];
const float a1 = fabsf(prevl.a+nextl.a - curr.a*2.f),
r1 = fabsf(prevl.r+nextl.r - curr.r*2.f),
g1 = fabsf(prevl.g+nextl.g - curr.g*2.f),
b1 = fabsf(prevl.b+nextl.b - curr.b*2.f);
const float horiz = MAX(MAX(a,r),MAX(g,b));
const float vert = MAX(MAX(a1,r1),MAX(g1,b1));
const float edge = MAX(horiz,vert);
float z = edge - fabsf(horiz-vert)*.5f;
z = 1.f - MAX(z,MIN(horiz,vert));
z *= z; // noise is amplified
z *= z;
z *= 256.f;
noise[j*cols+i] = z < 256 ? z : 255;
z = (1.f-edge)*256.f;
edges[j*cols+i] = z < 256 ? z : 255;
}
}
// noise areas are shrunk and then expanded to remove thin edges from the map
liq_max3(noise, tmp, cols, rows);
liq_max3(tmp, noise, cols, rows);
liq_blur(noise, tmp, noise, cols, rows, 3);
liq_max3(noise, tmp, cols, rows);
liq_min3(tmp, noise, cols, rows);
liq_min3(noise, tmp, cols, rows);
liq_min3(tmp, noise, cols, rows);
liq_min3(edges, tmp, cols, rows);
liq_max3(tmp, edges, cols, rows);
for(int i=0; i < cols*rows; i++) edges[i] = MIN(noise[i], edges[i]);
image->free(tmp);
image->noise = noise;
image->edges = edges;
}
/**
* Builds map of neighbor pixels mapped to the same palette entry
*
* For efficiency/simplicity it mainly looks for same consecutive pixels horizontally
* and peeks 1 pixel above/below. Full 2d algorithm doesn't improve it significantly.
* Correct flood fill doesn't have visually good properties.
*/
static void update_dither_map(unsigned char *const *const row_pointers, liq_image *input_image)
{
const unsigned int width = input_image->width;
const unsigned int height = input_image->height;
unsigned char *const edges = input_image->edges;
for(unsigned int row=0; row < height; row++) {
unsigned char lastpixel = row_pointers[row][0];
unsigned int lastcol=0;
for(unsigned int col=1; col < width; col++) {
const unsigned char px = row_pointers[row][col];
if (px != lastpixel || col == width-1) {
float neighbor_count = 2.5f + col-lastcol;
unsigned int i=lastcol;
while(i < col) {
if (row > 0) {
unsigned char pixelabove = row_pointers[row-1][i];
if (pixelabove == lastpixel) neighbor_count += 1.f;
}
if (row < height-1) {
unsigned char pixelbelow = row_pointers[row+1][i];
if (pixelbelow == lastpixel) neighbor_count += 1.f;
}
i++;
}
while(lastcol <= col) {
float e = edges[row*width + lastcol] / 255.f;
e *= 1.f - 2.5f/neighbor_count;
edges[row*width + lastcol++] = e * 255.f;
}
lastpixel = px;
}
}
}
input_image->dither_map = input_image->edges;
input_image->edges = NULL;
}
static colormap *add_fixed_colors_to_palette(colormap *palette, const int max_colors, const f_pixel fixed_colors[], const int fixed_colors_count, void* (*malloc)(size_t), void (*free)(void*)) {
if (!fixed_colors_count) return palette;
colormap *newpal = pam_colormap(MIN(max_colors, (palette ? palette->colors : 0) + fixed_colors_count), malloc, free);
unsigned int i=0;
if (palette && fixed_colors_count < max_colors) {
unsigned int palette_max = MIN(palette->colors, max_colors - fixed_colors_count);
for(; i < palette_max; i++) {
newpal->palette[i] = palette->palette[i];
}
}
for(int j=0; j < MIN(max_colors, fixed_colors_count); j++) {
newpal->palette[i++] = (colormap_item){
.acolor = fixed_colors[j],
.fixed = true,
};
}
if (palette) pam_freecolormap(palette);
return newpal;
}
static void adjust_histogram_callback(hist_item *item, float diff)
{
item->adjusted_weight = (item->perceptual_weight+item->adjusted_weight) * (sqrtf(1.f+diff));
}
/**
Repeats mediancut with different histogram weights to find palette with minimum error.
feedback_loop_trials controls how long the search will take. < 0 skips the iteration.
*/
static colormap *find_best_palette(histogram *hist, const liq_attr *options, const double max_mse, const f_pixel fixed_colors[], const unsigned int fixed_colors_count, double *palette_error_p)
{
unsigned int max_colors = options->max_colors;
// if output is posterized it doesn't make sense to aim for perfrect colors, so increase target_mse
// at this point actual gamma is not set, so very conservative posterization estimate is used
const double target_mse = MIN(max_mse, MAX(options->target_mse, pow((1<<options->min_posterization_output)/1024.0, 2)));
int feedback_loop_trials = options->feedback_loop_trials;
colormap *acolormap = NULL;
double least_error = MAX_DIFF;
double target_mse_overshoot = feedback_loop_trials>0 ? 1.05 : 1.0;
const double percent = (double)(feedback_loop_trials>0?feedback_loop_trials:1)/100.0;
do {
colormap *newmap;
if (hist->size && fixed_colors_count < max_colors) {
newmap = mediancut(hist, options->min_opaque_val, max_colors-fixed_colors_count, target_mse * target_mse_overshoot, MAX(MAX(90.0/65536.0, target_mse), least_error)*1.2,
options->malloc, options->free);
} else {
feedback_loop_trials = 0;
newmap = NULL;
}
newmap = add_fixed_colors_to_palette(newmap, max_colors, fixed_colors, fixed_colors_count, options->malloc, options->free);
if (!newmap) {
return NULL;
}
if (feedback_loop_trials <= 0) {
return newmap;
}
// after palette has been created, total error (MSE) is calculated to keep the best palette
// at the same time Voronoi iteration is done to improve the palette
// and histogram weights are adjusted based on remapping error to give more weight to poorly matched colors
const bool first_run_of_target_mse = !acolormap && target_mse > 0;
double total_error = viter_do_iteration(hist, newmap, options->min_opaque_val, first_run_of_target_mse ? NULL : adjust_histogram_callback, !acolormap || options->fast_palette);
// goal is to increase quality or to reduce number of colors used if quality is good enough
if (!acolormap || total_error < least_error || (total_error <= target_mse && newmap->colors < max_colors)) {
if (acolormap) pam_freecolormap(acolormap);
acolormap = newmap;
if (total_error < target_mse && total_error > 0) {
// voronoi iteration improves quality above what mediancut aims for
// this compensates for it, making mediancut aim for worse
target_mse_overshoot = MIN(target_mse_overshoot*1.25, target_mse/total_error);
}
least_error = total_error;
// if number of colors could be reduced, try to keep it that way
// but allow extra color as a bit of wiggle room in case quality can be improved too
max_colors = MIN(newmap->colors+1, max_colors);
feedback_loop_trials -= 1; // asymptotic improvement could make it go on forever
} else {
for(unsigned int j=0; j < hist->size; j++) {
hist->achv[j].adjusted_weight = (hist->achv[j].perceptual_weight + hist->achv[j].adjusted_weight)/2.0;
}
target_mse_overshoot = 1.0;
feedback_loop_trials -= 6;
// if error is really bad, it's unlikely to improve, so end sooner
if (total_error > least_error*4) feedback_loop_trials -= 3;
pam_freecolormap(newmap);
}
liq_verbose_printf(options, " selecting colors...%d%%",100-MAX(0,(int)(feedback_loop_trials/percent)));
}
while(feedback_loop_trials > 0);
*palette_error_p = least_error;
return acolormap;
}
static liq_result *pngquant_quantize(histogram *hist, const liq_attr *options, const liq_image *img)
{
colormap *acolormap;
double palette_error = -1;
// no point having perfect match with imperfect colors (ignorebits > 0)
const bool fast_palette = options->fast_palette || hist->ignorebits > 0;
const bool few_input_colors = hist->size+img->fixed_colors_count <= options->max_colors;
// If image has few colors to begin with (and no quality degradation is required)
// then it's possible to skip quantization entirely
if (few_input_colors && options->target_mse == 0) {
acolormap = pam_colormap(hist->size, options->malloc, options->free);
for(unsigned int i=0; i < hist->size; i++) {
acolormap->palette[i].acolor = hist->achv[i].acolor;
acolormap->palette[i].popularity = hist->achv[i].perceptual_weight;
}
acolormap = add_fixed_colors_to_palette(acolormap, options->max_colors, img->fixed_colors, img->fixed_colors_count, options->malloc, options->free);
palette_error = 0;
} else {
const double max_mse = options->max_mse * (few_input_colors ? 0.33 : 1.0); // when degrading image that's already paletted, require much higher improvement, since pal2pal often looks bad and there's little gain
acolormap = find_best_palette(hist, options, max_mse, img->fixed_colors, img->fixed_colors_count, &palette_error);
if (!acolormap) {
return NULL;
}
// Voronoi iteration approaches local minimum for the palette
const double iteration_limit = options->voronoi_iteration_limit;
unsigned int iterations = options->voronoi_iterations;
if (!iterations && palette_error < 0 && max_mse < MAX_DIFF) iterations = 1; // otherwise total error is never calculated and MSE limit won't work
if (iterations) {
// likely_colormap_index (used and set in viter_do_iteration) can't point to index outside colormap
if (acolormap->colors < 256) for(unsigned int j=0; j < hist->size; j++) {
if (hist->achv[j].tmp.likely_colormap_index >= acolormap->colors) {
hist->achv[j].tmp.likely_colormap_index = 0; // actual value doesn't matter, as the guess is out of date anyway
}
}
verbose_print(options, " moving colormap towards local minimum");
double previous_palette_error = MAX_DIFF;
for(unsigned int i=0; i < iterations; i++) {
palette_error = viter_do_iteration(hist, acolormap, options->min_opaque_val, NULL, i==0 || options->fast_palette);
if (fabs(previous_palette_error-palette_error) < iteration_limit) {
break;
}
if (palette_error > max_mse*1.5) { // probably hopeless
if (palette_error > max_mse*3.0) break; // definitely hopeless
i++;
}
previous_palette_error = palette_error;
}
}
if (palette_error > max_mse) {
liq_verbose_printf(options, " image degradation MSE=%.3f (Q=%d) exceeded limit of %.3f (%d)",
palette_error*65536.0/6.0, mse_to_quality(palette_error),
max_mse*65536.0/6.0, mse_to_quality(max_mse));
pam_freecolormap(acolormap);
return NULL;
}
}
sort_palette(acolormap, options);
liq_result *result = options->malloc(sizeof(liq_result));
if (!result) return NULL;
*result = (liq_result){
.magic_header = liq_result_magic,
.malloc = options->malloc,
.free = options->free,
.palette = acolormap,
.palette_error = palette_error,
.fast_palette = fast_palette,
.use_dither_map = options->use_dither_map,
.gamma = img->gamma,
.min_posterization_output = options->min_posterization_output,
};
to_f_set_gamma(result->gamma_lut, result->gamma);
return result;
}
LIQ_EXPORT liq_error liq_write_remapped_image(liq_result *result, liq_image *input_image, void *buffer, size_t buffer_size)
{
if (!CHECK_STRUCT_TYPE(result, liq_result)) {
return LIQ_INVALID_POINTER;
}
if (!CHECK_STRUCT_TYPE(input_image, liq_image)) {
return LIQ_INVALID_POINTER;
}
if (!CHECK_USER_POINTER(buffer)) {
return LIQ_INVALID_POINTER;
}
const size_t required_size = input_image->width * input_image->height;
if (buffer_size < required_size) {
return LIQ_BUFFER_TOO_SMALL;
}
unsigned char **rows = malloc(input_image->height * sizeof(unsigned char *));
unsigned char *buffer_bytes = buffer;
for(unsigned int i=0; i < input_image->height; i++) {
rows[i] = &buffer_bytes[input_image->width * i];
}
liq_error err = liq_write_remapped_image_rows(result, input_image, rows);
free(rows);
return err;
}
LIQ_EXPORT liq_error liq_write_remapped_image_rows(liq_result *quant, liq_image *input_image, unsigned char **row_pointers)
{
if (!CHECK_STRUCT_TYPE(quant, liq_result)) return LIQ_INVALID_POINTER;
if (!CHECK_STRUCT_TYPE(input_image, liq_image)) return LIQ_INVALID_POINTER;
for(unsigned int i=0; i < input_image->height; i++) {
if (!CHECK_USER_POINTER(row_pointers+i) || !CHECK_USER_POINTER(row_pointers[i])) return LIQ_INVALID_POINTER;
}
if (quant->remapping) {
liq_remapping_result_destroy(quant->remapping);
}
liq_remapping_result *const result = quant->remapping = liq_remapping_result_create(quant);
if (!result) return LIQ_OUT_OF_MEMORY;
if (!input_image->edges && !input_image->dither_map && quant->use_dither_map) {
contrast_maps(input_image);
}
/*
** Step 4: map the colors in the image to their closest match in the
** new colormap, and write 'em out.
*/
float remapping_error = result->palette_error;
if (result->dither_level == 0) {
set_rounded_palette(&result->int_palette, result->palette, result->gamma, result->gamma_lut, quant->min_posterization_output);
remapping_error = remap_to_palette(input_image, row_pointers, result->palette, quant->fast_palette);
} else {
const bool generate_dither_map = result->use_dither_map && (input_image->edges && !input_image->dither_map);
if (generate_dither_map) {
// If dithering (with dither map) is required, this image is used to find areas that require dithering
remapping_error = remap_to_palette(input_image, row_pointers, result->palette, quant->fast_palette);
update_dither_map(row_pointers, input_image);
}
// remapping above was the last chance to do voronoi iteration, hence the final palette is set after remapping
set_rounded_palette(&result->int_palette, result->palette, result->gamma, result->gamma_lut, quant->min_posterization_output);
remap_to_palette_floyd(input_image, row_pointers, result->palette,
MAX(remapping_error*2.4, 16.f/256.f), result->use_dither_map, generate_dither_map, result->dither_level);
}
// remapping error from dithered image is absurd, so always non-dithered value is used
// palette_error includes some perceptual weighting from histogram which is closer correlated with dssim
// so that should be used when possible.
if (result->palette_error < 0) {
result->palette_error = remapping_error;
}
return LIQ_OK;
}
LIQ_EXPORT int liq_version() {
return LIQ_VERSION;
}
|
LAGraph_bfs_pushpull.c | //------------------------------------------------------------------------------
// LAGraph_bfs_pushpull: push-pull breadth-first search
//------------------------------------------------------------------------------
/*
LAGraph: graph algorithms based on GraphBLAS
Copyright 2020 LAGraph Contributors.
(see Contributors.txt for a full list of Contributors; see
ContributionInstructions.txt for information on how you can Contribute to
this project).
All Rights Reserved.
NO WARRANTY. THIS MATERIAL IS FURNISHED ON AN "AS-IS" BASIS. THE LAGRAPH
CONTRIBUTORS MAKE NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED,
AS TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR
PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF
THE MATERIAL. THE CONTRIBUTORS DO NOT MAKE ANY WARRANTY OF ANY KIND WITH
RESPECT TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT.
Released under a BSD license, please see the LICENSE file distributed with
this Software or contact permission@sei.cmu.edu for full terms.
Created, in part, with funding and support from the United States
Government. (see Acknowledgments.txt file).
This program includes and/or can make use of certain third party source
code, object code, documentation and other files ("Third Party Software").
See LICENSE file for more details.
*/
//------------------------------------------------------------------------------
// LAGraph_bfs_pushpull: direction-optimized push/pull breadth first search,
// contributed by Tim Davis, Texas A&M.
// LAGraph_bfs_pushpull computes the BFS of a graph from a single given
// source node. The result is a vector v where v(i)=k if node i was placed
// at level k in the BFS.
// Usage:
// info = LAGraph_bfs_pushpull (&v, &pi, A, AT, source, max_level, vsparse) ;
// GrB_Vector *v: a vector containing the result, created on output.
// v(i) = k is the BFS level of node i in the graph, where a source
// node has v(source)=1. v(i) is implicitly zero if it is unreachable
// from the source node. That is, GrB_Vector_nvals (&nreach,v) is the
// size of the reachable set of the source node, for a single-source
// BFS. v may be returned as sparse, or full. If full, v(i)=0
// indicates that node i was not reached. If sparse, the pattern of v
// indicates the set of nodes reached.
// GrB_Vector *pi: a vector containing the BFS tree, in 1-based indexing.
// pi(source) = source+1 for source node. pi(i) = p+1 if p is the
// parent of i. If pi is sparse, and pi(i) is not present, then node
// i has not been reached. Otherwise, if pi is full, then pi(i)=0
// indicates that node i was not reached.
// GrB_Matrix A: a square matrix of any type. The values of A are not
// accessed. The presence of the entry A(i,j) indicates the edge
// (i,j). That is, an explicit entry A(i,j)=0 is treated as an edge.
// GrB_Matrix AT: an optional matrix of any type. If NULL, the algorithm
// is a conventional push-only BFS. If not NULL, AT must be the
// transpose of A, and a push-pull algorithm is used (NOTE: this
// assumes GraphBLAS stores its matrix in CSR form; see discussion
// below). Results are undefined if AT is not NULL but not identical
// to the transpose of A.
// int64_t source: the source node for the BFS.
// int64_t max_level: An optional limit on the levels searched for the
// single-source BFS. If zero, then no limit is enforced. If > 0,
// then only nodes with v(i) <= max_level will be visited. That is:
// 1: just the source node, 2: the source and its neighbors, 3: the
// source node, its neighbors, and their neighbors, etc.
// bool vsparse: if the result v may remain very sparse, then set this
// parameter to true. If v might have many entries, set it false. If
// you are unsure, then set it to true. This parameter speeds up
// the handling of v. If you guess wrong, there is a slight
// performance penalty. The results are not affected by this
// parameter, just the performance. This parameter is used only for
// the single-source BFS.
// single-source BFS:
// Given a graph A, a source node, find all nodes reachable from the
// source node. v(source)=1, v(i)=2 if edge (source,i) appears in the
// graph, and so on. If node i is not reachable from source, then
// implicitly v(i)=0. v is returned as a sparse vector, and v(i) is not
// an entry in this vector.
// This algorithm can use the push-pull strategy, which requires both A and
// AT=A' to be passed in. If the graph is known to be symmetric, then the same
// matrix A can be passed in for both arguments. Results are undefined if AT
// is not the transpose of A.
// If only A or AT is passed in, then only single strategy will be used: push
// or pull, but not both. In general, push-only performs well. A pull-only
// strategy is possible but it is exceedingly slow. Assuming A and AT are both
// in CSR format, then (let s = source node):
// LAGraph_bfs_pushpull (..., A, AT, s, ...) ; // push-pull (fastest)
// LAGraph_bfs_pushpull (..., A, NULL, s, ...) ; // push-only (good)
// LAGraph_bfs_pushpull (..., NULL, AT, s, ...) ; // pull-only (slow!)
// If A and AT are both in CSC format, then:
// LAGraph_bfs_pushpull (..., A, AT, s, ...) ; // push-pull (fastest)
// LAGraph_bfs_pushpull (..., NULL, AT, s, ...) ; // push-only (good)
// LAGraph_bfs_pushpull (..., A, NULL, s, ...) ; // pull-only (slow!)
// Since the pull-only method is exceedingly slow, SuiteSparse:GraphBLAS
// detects this case and refuses to do it.
// The basic step of this algorithm computes A'*q where q is the 'queue' of
// nodes in the current level. This can be done with GrB_vxm(q,A) = (q'*A)' =
// A'*q, or by GrB_mxv(AT,q) = AT*q = A'*q. Both steps compute the same thing,
// just in a different way. In GraphBLAS, unlike MATLAB, a GrB_Vector is
// simultaneously a row and column vector, so q and q' are interchangeable.
// To implement an efficient BFS using GraphBLAS, an assumption must be made in
// LAGraph about how the matrix is stored, whether by row or by column (or
// perhaps some other opaque data structure). The storage format has a huge
// impact on the relative performance of vxm(q,A) and mxv(AT,q).
// Storing A by row, if A(i,j) is the edge (i,j), means that A(i,:) is easily
// accessible. In terms of the graph A, this means that the out-adjacency
// list of node i can be traversed in time O(out-degree of node i).
// If AT is stored by row, then AT(i,:) is the in-adjacency list of node i,
// and traversing row i of AT can be done in O(in-degree of node i) time.
// The CSR (Compressed Sparse Row) format is the default for
// SuiteSparse:GraphBLAS, but no assumption can be made about any particular
// GraphBLAS library implementation.
// If A and AT are both stored by column instead, then A(i,:) is not easy to
// access. Instead, A(:,i) is the easily-accessible in-adjacency of node i,
// and AT(:,i) is the out-adjancency.
// A push step requires the out-adjacencies of each node, where as
// a pull step requires the in-adjacencies of each node.
// vxm(q,A) = A'*q, with A stored by row: a push step
// mxv(AT,q) = A'*q, with AT stored by row: a pull step
// vxm(q,A) = A'*q, with A stored by col: a pull step
// mxv(AT,q) = A'*q, with AT stored by col: a push step
// The GraphBLAS data structure is opaque. An implementation may decide to
// store the matrix A in both formats, internally, so that it easily traverse
// both in- and out-adjacencies of each node (equivalently, A(i,:) and A(:,i)
// can both be easily traversed). This would make a push-pull BFS easy to
// implement using just the opaque GrB_Matrix A, but it doubles the storage.
// Deciding which format to use automatically is not a simple task,
// particularly since the decision must work well throughout GraphBLAS, not
// just for the BFS.
// MATLAB stores its sparse matrices in CSC format (Compressed Sparse Column).
// As a result, the MATLAB expression x=AT*q is a push step, computed using a
// saxpy-based algorithm internally, and x=A'*q is a pull step, computed using
// a dot product.
// SuiteSparse:GraphBLAS can store a matrix in either format, but this requires
// an extension to the GraphBLAS C API (GxB_set (A, GxB_FORMAT, f)). where
// f = GxB_BY_ROW (that is, CSR) or GxB_BY_COL (that is, CSC). The library
// could be augmented in the future with f = Gxb_BY_BOTH. It currently does
// not select the format automatically. As a result, if GxB_set is not used,
// all its GrB_Matrix objects are stored by row (CSR).
// SuiteSparse:GraphBLAS allows the user to query (via GxB_get) an set (via
// GxB_set) the format, whether by row or by column. The hypersparsity of
// A is selected automatically, with optional hints from the user application,
// but a selection between hypersparsity vs standard CSR and CSC has no effect
// on the push vs pull decision made here.
// The push/pull and saxpy/dot connection can be described as follows.
// Assume for these first two examples that MATLAB stores its matrices in CSR
// format, where accessing A(i,:) is fast.
// If A is stored by row, then x = vxm(q,A) = q'*A can be written in MATLAB
// notation as:
/*
function x = vxm (q,A)
% a push step: compute x = q'*A where q is a column vector
x = sparse (1,n)
for i = 1:n
% a saxpy operation, using the ith row of A and the scalar q(i)
x = x + q (i) * A (i,:)
end
*/
// If AT is stored by row, then x = mvx(AT,q) = AT*q = A'*q becomes
// a dot product:
/*
function x = mxv (AT,q)
% a pull step: compute x = AT*q where q is a column vector
for i = 1:n
% a dot-product of the ith row of AT and the column vector q
x (i) = AT (i,:) * q
end
*/
// The above snippets describe how SuiteSparse:GraphBLAS computes vxm(q,A) and
// mxv(AT,q) by default, where A and AT are stored by row by default. However,
// they would be very slow in MATLAB, since it stores its sparse matrices in
// CSC format. In that case, if A is stored by column and thus accessing
// A(:,j) is efficient, then x = vxm(q,A) = q'*A becomes the dot product
// instead. These two snippets assume the matrices are both in CSR for, and
// thus make more efficient use of MATLAB:
/*
function x = vxm (q,A)
% a pull step: compute x = q'*A where q is a column vector
for j = 1:n
% a dot product of the row vector q' and the jth column of A
x (j) = q' * A (:,j)
end
*/
// If AT is stored by column, then x = mvx(AT,q) is
/*
function x = mxv (AT,q)
% a push step: compute x = AT*q where q is a column vector
for j = 1:n
% a saxpy operation, using the jth column of AT and the scalar q(i)
x = x + AT (:,j) * q
end
*/
// In MATLAB, if q is a sparse column vector and A is a sparse matrix, then
// x=A*q does in fact use a saxpy-based method, internally, and x=A'*q uses a
// dot product. You can view the code used internally in MATLAB for its sparse
// matrix multiplication in the SuiteSparse/MATLAB_Tools/SSMULT and SFMULT
// packages, at http://suitesparse.com.
// This raises an interesting puzzle for LAGraph, which is intended on being a
// graph library that can be run on any implementation of GraphBLAS. There are
// no mechanisms in the GraphBLAS C API for LAGraph (or other external packages
// or user applications) to provide hints to GraphBLAS. Likely, there are no
// query mechanisms where LAGraph can ask GraphBLAS how its matrices might be
// stored (LAGraphs asks, "Is A(i,:) fast? Or A(:,j)? Or both?"; the answer
// from GraphBLAS is silence). The GraphBLAS data structure is opaque, and it
// does not answer this query.
// There are two solutions to this puzzle. The most elegant one is for
// GraphBLAS to handle all this internally, and change formats as needed. It
// could choose to store A in both CSR and CSC format, or use an entirely
// different data structure, and it would make the decision between the push or
// pull, at each step of the BFS. This is not a simple task since the API is
// complex. Furthermore, the selection of the data structure for A has
// implications on all other GraphBLAS operations (submatrix assignment and
// extraction, for example).
// However, if A were to be stored in both CSR and CSC format, inside the
// opaque GraphBLAS GrB_Matrix data structure, then LAGraph_bfs_simple would
// become a push-pull BFS.
// The second solution is to allow the user application or library such as
// LAGraph to provide hints and allow it to query the GraphBLAS library.
// There are no such features in the GraphBLAS C API.
// SuiteSparse:GraphBLAS takes the second approach: It adds two functions that
// are extensions to the API: GxB_set changes the format (CSR or CSC), and
// GxB_get can query the format. Even this this simplication,
// SuiteSparse:GraphBLAS uses 24 different algorithmic variants inside GrB_mxm
// (per semiring), and selects between them automatically. By default, all of
// its matrices are stored in CSR format (either sparse or hypersparse,
// selected automatically). So if no GxB_* extensions are used, all matrices
// are in CSR format.
// If a GraphBLAS library other than SuiteSparse:GraphBLAS is in use, this
// particular function assumes that its input matrices are in CSR format, or at
// least A(i,:) and AT(i,:) can be easily accessed. With this assumption, it
// is the responsibilty of this function to select between using a push or a
// pull, for each step in the BFS.
// The following analysis assumes CSR format, and it assumes that dot-product
// (a pull step) can terminate early via a short-circuit rule with the OR
// monoid, as soon as it encounters a TRUE value. This cuts the time for the
// dot-product. Not all GraphBLAS libraries may use this, but SuiteSparse:
// GraphBLAS does (in version 2.3.0 and later). Early termination cannot be
// done for the saxpy (push step) method.
// The work done by the push method (saxpy) is very predictable. BFS uses a
// complemented mask. There is no simple way to exploit a complemented mask,
// and saxpy has no early termination rule. If the set of nodes in the current
// level is q, the work is nnz(A(q,:)). If d = nnz(A)/n is the average degree,
// this becomes d*nq where nq = length (q):
// pushwork = d*nq
// The work done by the pull (dot product) method is less predictable. It can
// exploit the complemented mask, and so it only computes (n-nvisited) dot
// products, if nvisited is the # of nodes visited so far (in all levels).
// With no early-termination, the dot product will take d * log2 (nq) time,
// assuming that q is large and a binary search is used internally. That is,
// the dot product will scan through the d entries in A(i,:), and do a binary
// search for each entry in q. To account for the higher constant of a binary
// search, log2(nq) is replaced with (3*(1+log2(nq))). With early termination,
// d is too high. If the nodes are randomly marked, the probability of each
// node being marked is nvisited/n. The expected number of trials until
// success, for a sequence of events with probabilty p, is 1/p. Thus, the
// expected number of iterations in a dot product before an early termination
// is 1/p = (n/nvisited+1), where +1 is added to avoid a divide by zero.
// However, it cannot exceed d. Thus, the total work for the dot product
// (pull) method can be estimated as:
// per_dot = min (d, n / (nvisited+1))
// pullwork = (n-nvisited) * per_dot * (3 * (1 + log2 ((double) nq)))
// The above expressions are valid for SuiteSparse:GraphBLAS v2.3.0 and later,
// and may be reasonable for other GraphBLAS implementations. Push or pull
// is selected as the one with the least work.
// TODO: change the formula for v3.2.0
// The push/pull decision requires that both A and AT be passed in, but this
// function can use just one or the other. If only A is passed in and AT is
// NULL, then only vxm(q,A) will be used (a push step if A is CSR, or a pull
// step if A is CSC). If only AT is passed in and A is NULL, then only
// mxv(AT,q) will be used (a pull step if AT is CSR, or a push step if AT is
// CSC).
// In general, while a push-pull strategy is the fastest, a push-only BFS will
// give good peformance. In particular, the time to compute AT=A' plus the
// time for the push-pull BFS is typically higher than just a push-only BFS.
// This why this function does not compute AT=A'. To take advantage of the
// push-pull method, both A and AT must already be available, with the cost to
// construct them amortized across other computations such as this one.
// A pull-only strategy will be *exceeding* slow.
// The input matrix A must be square. It can be non-binary, but best
// performance will be obtained if it is GrB_BOOL. It can have explicit
// entries equal to zero. These are safely ignored, and are treated as
// non-edges.
// SuiteSparse:GraphBLAS can detect the CSR vs CSC format of its inputs.
// In this case, if both matrices are provided, they must be in the same
// format (both GxB_BY_ROW or both GxB_BY_COL). If the matrices are in CSC
// format, vxm(q,A) is the pull step and mxv(AT,q) is the push step.
// If only A or AT are provided, and the result is a pull-only algorithm,
// an error is returned.
// References:
// Carl Yang, Aydin Buluc, and John D. Owens. 2018. Implementing Push-Pull
// Efficiently in GraphBLAS. In Proceedings of the 47th International
// Conference on Parallel Processing (ICPP 2018). ACM, New York, NY, USA,
// Article 89, 11 pages. DOI: https://doi.org/10.1145/3225058.3225122
// Scott Beamer, Krste Asanovic and David A. Patterson,
// The GAP Benchmark Suite, http://arxiv.org/abs/1508.03619, 2015.
// http://gap.cs.berkeley.edu/
#include "LAGraph_internal.h"
#define LAGRAPH_FREE_ALL \
{ \
GrB_free (&v) ; \
GrB_free (&t) ; \
GrB_free (&q) ; \
GrB_free (&pi) ; \
}
GrB_Info LAGraph_bfs_pushpull // push-pull BFS, or push-only if AT = NULL
(
GrB_Vector *v_output, // v(i) is the BFS level of node i in the graph
GrB_Vector *pi_output, // pi(i) = p+1 if p is the parent of node i.
// if NULL, the parent is not computed.
GrB_Matrix A, // input graph, treated as if boolean in semiring
GrB_Matrix AT, // transpose of A (optional; push-only if NULL)
int64_t source, // starting node of the BFS
int64_t max_level, // optional limit of # levels to search
bool vsparse // if true, v is expected to be very sparse
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GrB_Info info ;
GrB_Vector q = NULL ; // nodes visited at each level
GrB_Vector v = NULL ; // result vector
GrB_Vector t = NULL ; // temporary vector
GrB_Vector pi = NULL ; // parent vector
if (v_output == NULL || (A == NULL && AT == NULL))
{
// required output argument is missing
LAGRAPH_ERROR ("required arguments are NULL", GrB_NULL_POINTER) ;
}
(*v_output) = NULL ;
bool compute_tree = (pi_output != NULL) ;
#if defined ( GxB_SUITESPARSE_GRAPHBLAS ) \
&& ( GxB_IMPLEMENTATION >= GxB_VERSION (3,2,0) )
GrB_Descriptor desc_s = GrB_DESC_S ;
GrB_Descriptor desc_sc = GrB_DESC_SC ;
GrB_Descriptor desc_rc = GrB_DESC_RC ;
GrB_Descriptor desc_r = GrB_DESC_R ;
#else
GrB_Descriptor desc_s = NULL ;
GrB_Descriptor desc_sc = LAGraph_desc_ooco ;
GrB_Descriptor desc_rc = LAGraph_desc_oocr ;
GrB_Descriptor desc_r = LAGraph_desc_ooor ;
#endif
bool use_vxm_with_A ;
GrB_Index nrows, ncols, nvalA, ignore, nvals ;
if (A == NULL)
{
// only AT is provided
LAGr_Matrix_ncols (&nrows, AT) ;
LAGr_Matrix_nrows (&ncols, AT) ;
LAGr_Matrix_nvals (&nvalA, AT) ;
use_vxm_with_A = false ;
}
else
{
// A is provided. AT may or may not be provided
LAGr_Matrix_nrows (&nrows, A) ;
LAGr_Matrix_ncols (&ncols, A) ;
LAGr_Matrix_nvals (&nvalA, A) ;
use_vxm_with_A = true ;
}
// push/pull requires both A and AT
bool push_pull = (A != NULL && AT != NULL) ;
if (nrows != ncols)
{
// A must be square
LAGRAPH_ERROR ("A must be square", GrB_NULL_POINTER) ;
}
//--------------------------------------------------------------------------
// check the format of A and AT
//--------------------------------------------------------------------------
bool csr = true ;
// csr is true if A and AT are known (or assumed) to be in CSR format; if
// false, they are known to be in CSC format.
// This can be tested in SuiteSparse:GraphBLAS. Other libraries can use
// this section for their own library-specific tests, if they have them.
// LAGraph_bfs_pushpull will work just fine if nothing is changed or if the
// following is disabled (even SuiteSparse:GraphBLAS). The push/pull
// behaviour will be unpredicatble, however, unless the library default
// format is CSR.
#ifdef GxB_SUITESPARSE_GRAPHBLAS
// The CSR vs CSC status can be tested in SuiteSparse:GraphBLAS.
// However, even with SuiteSparse:GraphBLAS, this step is optional.
GxB_Format_Value A_format = -1, AT_format = -1 ;
bool A_csr = true, AT_csr = true ;
if (A != NULL)
{
// A_csr is true if accessing A(i,:) is fast
LAGr_get (A , GxB_FORMAT, &A_format) ;
A_csr = (A_format == GxB_BY_ROW) ;
}
if (AT != NULL)
{
// AT_csr is true if accessing AT(i,:) is fast
LAGr_get (AT, GxB_FORMAT, &AT_format) ;
AT_csr = (AT_format == GxB_BY_ROW) ;
}
// Assume CSR if A(i,:) and AT(i,:) are both fast. If csr is false,
// then the algorithm below will reverse the use of vxm and mxv.
csr = A_csr && AT_csr ;
if (push_pull)
{
// both A and AT are provided. Require they have the same format.
// Either both A(i,:) and AT(i,:) are efficient to accesss, or both
// A(:,j) and AT(:,j) are efficient to access.
if (A_csr != AT_csr)
{
LAGRAPH_ERROR ("A and AT must in the same format:\n"
"both GxB_BY_ROW, or both GxB_BY_COL",
GrB_INVALID_VALUE) ;
}
}
else
{
// only A or AT are provided. Refuse to do the pull-only version.
if (A != NULL && A_format == GxB_BY_COL)
{
// this would result in a pull-only BFS ... exceedingly slow
LAGRAPH_ERROR (
"SuiteSparse: AT not provided, so A must be GxB_BY_ROW\n"
"(or provide both A and AT, both in the same format,\n"
"either both GxB_BY_COL or both GxB_BY_ROW)",
GrB_INVALID_VALUE) ;
}
if (AT != NULL && AT_format == GxB_BY_ROW)
{
// this would result in a pull-only BFS ... exceedingly slow
LAGRAPH_ERROR (
"SuiteSparse: A not provided, so AT must be GxB_BY_COL\n"
"(or provide both A and AT, both in the same format,\n"
"either both GxB_BY_COL or both GxB_BY_ROW)",
GrB_INVALID_VALUE) ;
}
}
#endif
//--------------------------------------------------------------------------
// initializations
//--------------------------------------------------------------------------
GrB_Index n = nrows ;
int nthreads = LAGraph_get_nthreads ( ) ;
nthreads = LAGRAPH_MIN (n / 4096, nthreads) ;
nthreads = LAGRAPH_MAX (nthreads, 1) ;
// just traverse from the source node
max_level = (max_level <= 0) ? n : LAGRAPH_MIN (n, max_level) ;
// create an empty vector v
GrB_Type int_type = (n > INT32_MAX) ? GrB_INT64 : GrB_INT32 ;
LAGr_Vector_new (&v, int_type, n) ;
// make v dense if requested
int64_t vlimit = LAGRAPH_MAX (256, sqrt ((double) n)) ;
if (!vsparse)
{
// v is expected to have many entries, so convert v to dense.
// If the guess is wrong, v can be made dense later on.
LAGr_assign (v, NULL, NULL, 0, GrB_ALL, n, NULL) ;
}
GrB_Semiring first_semiring, second_semiring ;
if (compute_tree)
{
// create an integer vector q, and set q(source) to source+1
LAGr_Vector_new (&q, int_type, n) ;
LAGr_Vector_setElement (q, source+1, source) ;
if (n > INT32_MAX)
{
#if defined ( GxB_SUITESPARSE_GRAPHBLAS ) \
&& ( GxB_IMPLEMENTATION >= GxB_VERSION (3,2,0) )
// terminates as soon as it finds any parent; nondeterministic
first_semiring = GxB_ANY_FIRST_INT64 ;
second_semiring = GxB_ANY_SECOND_INT64 ;
#else
// deterministic, but cannot terminate early
first_semiring = LAGraph_MIN_FIRST_INT64 ;
second_semiring = LAGraph_MIN_SECOND_INT64 ;
#endif
}
else
{
#if defined ( GxB_SUITESPARSE_GRAPHBLAS ) \
&& ( GxB_IMPLEMENTATION >= GxB_VERSION (3,2,0) )
// terminates as soon as it finds any parent; nondeterministic
first_semiring = GxB_ANY_FIRST_INT32 ;
second_semiring = GxB_ANY_SECOND_INT32 ;
#else
// deterministic, but cannot terminate early
first_semiring = LAGraph_MIN_FIRST_INT32 ;
second_semiring = LAGraph_MIN_SECOND_INT32 ;
#endif
}
// create the empty parent vector
LAGr_Vector_new (&pi, int_type, n) ;
if (!vsparse)
{
// make pi a dense vector of all zeros
LAGr_assign (pi, NULL, NULL, 0, GrB_ALL, n, NULL) ;
}
// pi (source) = source+1 denotes a root of the BFS tree
LAGr_Vector_setElement (pi, source+1, source) ;
}
else
{
// create a boolean vector q, and set q(source) to true
LAGr_Vector_new (&q, GrB_BOOL, n) ;
LAGr_Vector_setElement (q, true, source) ;
#if defined ( GxB_SUITESPARSE_GRAPHBLAS ) \
&& ( GxB_IMPLEMENTATION >= GxB_VERSION (3,2,0) )
// terminates as soon as it finds any pair
first_semiring = GxB_ANY_PAIR_BOOL ;
second_semiring = GxB_ANY_PAIR_BOOL ;
#else
// can terminate early, but requires more data movement internally
first_semiring = LAGraph_LOR_FIRST_BOOL ;
second_semiring = LAGraph_LOR_SECOND_BOOL ;
#endif
}
// average node degree
double d = (n == 0) ? 0 : (((double) nvalA) / (double) n) ;
int64_t nvisited = 0 ; // # nodes visited so far
GrB_Index nq = 1 ; // number of nodes in the current level
//--------------------------------------------------------------------------
// BFS traversal and label the nodes
//--------------------------------------------------------------------------
for (int64_t level = 1 ; ; level++)
{
//----------------------------------------------------------------------
// set v to the current level, for all nodes in q
//----------------------------------------------------------------------
// v<q> = level: set v(i) = level for all nodes i in q
LAGr_assign (v, q, NULL, level, GrB_ALL, n, desc_s) ;
//----------------------------------------------------------------------
// check if done
//----------------------------------------------------------------------
nvisited += nq ;
if (nq == 0 || nvisited == n || level >= max_level) break ;
//----------------------------------------------------------------------
// check if v should be converted to dense
//----------------------------------------------------------------------
if (vsparse && nvisited > vlimit)
{
// Convert v from sparse to dense to speed up the rest of the work.
// If this case is triggered, it would have been faster to pass in
// vsparse = false on input.
// v <!v> = 0
LAGr_assign (v, v, NULL, 0, GrB_ALL, n, desc_sc) ;
LAGr_Vector_nvals (&ignore, v) ;
if (compute_tree)
{
// Convert pi from sparse to dense, to speed up the work.
// pi<!pi> = 0
LAGr_assign (pi, pi, NULL, 0, GrB_ALL, n, desc_sc) ;
LAGr_Vector_nvals (&ignore, pi) ;
}
vsparse = false ;
}
//----------------------------------------------------------------------
// select push vs pull
//----------------------------------------------------------------------
if (push_pull)
{
double pushwork = d * nq ;
double expected = (double) n / (double) (nvisited+1) ;
double per_dot = LAGRAPH_MIN (d, expected) ;
double binarysearch = (3 * (1 + log2 ((double) nq))) ;
double pullwork = (n-nvisited) * per_dot * binarysearch ;
use_vxm_with_A = (pushwork < pullwork) ;
if (!csr)
{
// Neither A(i,:) nor AT(i,:) is efficient. Instead, both
// A(:,j) and AT(:,j) is fast (that is, the two matrices
// are in CSC format). Swap the
use_vxm_with_A = !use_vxm_with_A ;
}
}
//----------------------------------------------------------------------
// q = next level of the BFS
//----------------------------------------------------------------------
if (use_vxm_with_A)
{
// q'<!v> = q'*A
// this is a push step if A is in CSR format; pull if CSC
LAGr_vxm (q, v, NULL, first_semiring, q, A, desc_rc) ;
}
else
{
// q<!v> = AT*q
// this is a pull step if AT is in CSR format; push if CSC
LAGr_mxv (q, v, NULL, second_semiring, AT, q, desc_rc) ;
}
//----------------------------------------------------------------------
// move to next level
//----------------------------------------------------------------------
if (compute_tree)
{
//------------------------------------------------------------------
// assign parents
//------------------------------------------------------------------
// q(i) currently contains the parent of node i in tree (off by one
// so it won't have any zero values, for valued mask).
// pi<q> = q
LAGr_assign (pi, q, NULL, q, GrB_ALL, n, desc_s) ;
//------------------------------------------------------------------
// replace q with current node numbers
//------------------------------------------------------------------
// TODO this could be a unaryop
// q(i) = i+1 for all entries in q.
#ifdef GxB_SUITESPARSE_GRAPHBLAS
GrB_Index *qi ;
if (n > INT32_MAX)
{
int64_t *qx ;
LAGr_Vector_export (&q, &int_type, &n, &nq, &qi,
(void **) (&qx), NULL) ;
int nth = LAGRAPH_MIN (nq / (64*1024), nthreads) ;
nth = LAGRAPH_MAX (nth, 1) ;
#pragma omp parallel for num_threads(nth) schedule(static)
for (int64_t k = 0 ; k < nq ; k++)
{
qx [k] = qi [k] + 1 ;
}
LAGr_Vector_import (&q, int_type, n, nq, &qi,
(void **) (&qx), NULL) ;
}
else
{
int32_t *qx ;
LAGr_Vector_export (&q, &int_type, &n, &nq, &qi,
(void **) (&qx), NULL) ;
int nth = LAGRAPH_MIN (nq / (64*1024), nthreads) ;
nth = LAGRAPH_MAX (nth, 1) ;
#pragma omp parallel for num_threads(nth) schedule(static)
for (int32_t k = 0 ; k < nq ; k++)
{
qx [k] = qi [k] + 1 ;
}
LAGr_Vector_import (&q, int_type, n, nq, &qi,
(void **) (&qx), NULL) ;
}
#else
// TODO: use extractTuples and build instead
// Or use something like:
// extract tuples into I
// let e = 1:n be created once, in initialization phase
// q<q> = e (I)
fprintf (stderr, "TODO: use extractTuples here\n") ;
abort ( ) ;
#endif
}
else
{
//------------------------------------------------------------------
// count the nodes in the current level
//------------------------------------------------------------------
LAGr_Vector_nvals (&nq, q) ;
}
}
//--------------------------------------------------------------------------
// return the parent vector, if computed
//--------------------------------------------------------------------------
if (compute_tree)
{
(*pi_output) = pi ;
pi = NULL ;
}
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
(*v_output) = v ; // return result
v = NULL ; // set to NULL so LAGRAPH_FREE_ALL doesn't free it
LAGRAPH_FREE_ALL ; // free all workspace (except for result v)
return (GrB_SUCCESS) ;
}
|
soxr.c | /* SoX Resampler Library Copyright (c) 2007-18 robs@users.sourceforge.net
* Licence for this file: LGPL v2.1 See LICENCE for details. */
#include <math.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include "soxr.h"
#include "data-io.h"
#include "internal.h"
#if AVUTIL_FOUND
#include <libavutil/cpu.h>
#endif
#if WITH_DEV_TRACE
#include <stdarg.h>
#include <stdio.h>
int _soxr_trace_level;
void _soxr_trace(char const * fmt, ...)
{
va_list args;
va_start(args, fmt);
vfprintf(stderr, fmt, args);
fputc('\n', stderr);
va_end(args);
}
#endif
char const * soxr_version(void)
{
return "libsoxr-" SOXR_THIS_VERSION_STR;
}
typedef void sample_t; /* float or double */
typedef void (* fn_t)(void);
typedef fn_t control_block_t[10];
#define resampler_input (*(sample_t * (*)(void *, sample_t * samples, size_t n))p->control_block[0])
#define resampler_process (*(void (*)(void *, size_t))p->control_block[1])
#define resampler_output (*(sample_t const * (*)(void *, sample_t * samples, size_t * n))p->control_block[2])
#define resampler_flush (*(void (*)(void *))p->control_block[3])
#define resampler_close (*(void (*)(void *))p->control_block[4])
#define resampler_delay (*(double (*)(void *))p->control_block[5])
#define resampler_sizes (*(void (*)(size_t * shared, size_t * channel))p->control_block[6])
#define resampler_create (*(char const * (*)(void * channel, void * shared, double io_ratio, soxr_quality_spec_t * q_spec, soxr_runtime_spec_t * r_spec, double scale))p->control_block[7])
#define resampler_set_io_ratio (*(void (*)(void *, double io_ratio, size_t len))p->control_block[8])
#define resampler_id (*(char const * (*)(void))p->control_block[9])
typedef void * resampler_t; /* For one channel. */
typedef void * resampler_shared_t; /* Between channels. */
typedef void (* deinterleave_t)(sample_t * * dest,
soxr_datatype_t data_type, void const * * src0, size_t n, unsigned ch);
typedef size_t (* interleave_t)(soxr_datatype_t data_type, void * * dest,
sample_t const * const * src, size_t, unsigned, unsigned long *);
struct soxr {
unsigned num_channels;
double io_ratio;
soxr_error_t error;
soxr_quality_spec_t q_spec;
soxr_io_spec_t io_spec;
soxr_runtime_spec_t runtime_spec;
void * input_fn_state;
soxr_input_fn_t input_fn;
size_t max_ilen;
resampler_shared_t shared;
resampler_t * resamplers;
control_block_t control_block;
deinterleave_t deinterleave;
interleave_t interleave;
void * * channel_ptrs;
size_t clips;
unsigned long seed;
int flushing;
};
#if WITH_CR32 || WITH_CR32S || WITH_CR64 || WITH_CR64S
#include "filter.h"
#else
#define lsx_to_3dB(x) ((x)/(x))
#endif
soxr_quality_spec_t soxr_quality_spec(unsigned long recipe, unsigned long flags)
{
soxr_quality_spec_t spec, * p = &spec;
unsigned q = recipe & 0xf; /* TODO: move to soxr-lsr.c: */
unsigned quality = q > SOXR_LSR2Q+2? SOXR_VHQ : q > SOXR_LSR2Q? SOXR_QQ : q;
double rej;
memset(p, 0, sizeof(*p));
if (quality > SOXR_PRECISIONQ) {
p->e = "invalid quality type";
return spec;
}
flags |= quality < SOXR_LSR0Q ? RESET_ON_CLEAR : 0;
p->phase_response = "\62\31\144"[(recipe & 0x30)>>4];
p->stopband_begin = 1;
p->precision =
quality == SOXR_QQ ? 0 :
quality <= SOXR_16_BITQ ? 16 :
quality <= SOXR_32_BITQ ? 4 + quality * 4 :
quality <= SOXR_LSR2Q ? 55 - quality * 4 : /* TODO: move to soxr-lsr.c */
0;
rej = p->precision * linear_to_dB(2.);
p->flags = flags;
if (quality <= SOXR_32_BITQ || quality == SOXR_PRECISIONQ) {
#define LOW_Q_BW0 (1385 / 2048.) /* 0.67625 rounded to be a FP exact. */
p->passband_end = quality == 1? LOW_Q_BW0 : 1 - .05 / lsx_to_3dB(rej);
if (quality <= 2)
p->flags &= ~SOXR_ROLLOFF_NONE, p->flags |= SOXR_ROLLOFF_MEDIUM;
}
else { /* TODO: move to soxr-lsr.c */
static float const bw[] = {.931f, .832f, .663f};
p->passband_end = bw[quality - SOXR_LSR0Q];
if (quality == SOXR_LSR2Q) {
p->flags &= ~SOXR_ROLLOFF_NONE;
p->flags |= SOXR_ROLLOFF_LSR2Q | SOXR_PROMOTE_TO_LQ;
}
}
if (recipe & SOXR_STEEP_FILTER)
p->passband_end = 1 - .01 / lsx_to_3dB(rej);
return spec;
}
char const * soxr_engine(soxr_t p)
{
return resampler_id();
}
size_t * soxr_num_clips(soxr_t p)
{
return &p->clips;
}
soxr_error_t soxr_error(soxr_t p)
{
return p->error;
}
soxr_runtime_spec_t soxr_runtime_spec(unsigned num_threads)
{
soxr_runtime_spec_t spec, * p = &spec;
memset(p, 0, sizeof(*p));
p->log2_min_dft_size = 10;
p->log2_large_dft_size = 17;
p->coef_size_kbytes = 400;
p->num_threads = num_threads;
return spec;
}
soxr_io_spec_t soxr_io_spec(
soxr_datatype_t itype,
soxr_datatype_t otype)
{
soxr_io_spec_t spec, * p = &spec;
memset(p, 0, sizeof(*p));
if ((itype | otype) >= SOXR_SPLIT * 2)
p->e = "invalid io datatype(s)";
else {
p->itype = itype;
p->otype = otype;
p->scale = 1;
}
return spec;
}
#if (WITH_CR32S && WITH_CR32) || (WITH_CR64S && WITH_CR64)
#if defined __GNUC__ && defined __x86_64__
#define CPUID(type, eax_, ebx_, ecx_, edx_) \
__asm__ __volatile__ ( \
"cpuid \n\t" \
: "=a" (eax_), "=b" (ebx_), "=c" (ecx_), "=d" (edx_) \
: "a" (type), "c" (0));
#elif defined __GNUC__ && defined __i386__
#define CPUID(type, eax_, ebx_, ecx_, edx_) \
__asm__ __volatile__ ( \
"mov %%ebx, %%edi \n\t" \
"cpuid \n\t" \
"xchg %%edi, %%ebx \n\t" \
: "=a" (eax_), "=D" (ebx_), "=c" (ecx_), "=d" (edx_) \
: "a" (type), "c" (0));
#elif defined _M_X64 && defined _MSC_VER && _MSC_VER > 1500
void __cpuidex(int CPUInfo[4], int info_type, int ecxvalue);
#pragma intrinsic(__cpuidex)
#define CPUID(type, eax_, ebx_, ecx_, edx_) do { \
int regs[4]; \
__cpuidex(regs, type, 0); \
eax_ = regs[0], ebx_ = regs[1], ecx_ = regs[2], edx_ = regs[3]; \
} while(0)
#elif defined _M_X64 && defined _MSC_VER
void __cpuidex(int CPUInfo[4], int info_type);
#pragma intrinsic(__cpuidex)
#define CPUID(type, eax_, ebx_, ecx_, edx_) do { \
int regs[4]; \
__cpuidex(regs, type); \
eax_ = regs[0], ebx_ = regs[1], ecx_ = regs[2], edx_ = regs[3]; \
} while(0)
#elif defined _M_IX86 && defined _MSC_VER
#define CPUID(type, eax_, ebx_, ecx_, edx_) \
__asm pushad \
__asm mov eax, type \
__asm xor ecx, ecx \
__asm cpuid \
__asm mov eax_, eax \
__asm mov ebx_, ebx \
__asm mov ecx_, ecx \
__asm mov edx_, edx \
__asm popad
#endif
#endif
#if WITH_CR32S && WITH_CR32
static bool cpu_has_simd32(void)
{
#if defined __x86_64__ || defined _M_X64
return true;
#elif defined __i386__ || defined _M_IX86
enum {SSE = 1 << 25, SSE2 = 1 << 26};
unsigned eax_, ebx_, ecx_, edx_;
CPUID(1, eax_, ebx_, ecx_, edx_);
return (edx_ & (SSE|SSE2)) != 0;
#elif defined AV_CPU_FLAG_NEON
return !!(av_get_cpu_flags() & AV_CPU_FLAG_NEON);
#else
return false;
#endif
}
static bool should_use_simd32(void)
{
char const * e;
return ((e = getenv("SOXR_USE_SIMD" )))? !!atoi(e) :
((e = getenv("SOXR_USE_SIMD32")))? !!atoi(e) : cpu_has_simd32();
}
#else
#define should_use_simd32() true
#endif
#if WITH_CR64S && WITH_CR64
#if defined __GNUC__
#define XGETBV(type, eax_, edx_) \
__asm__ __volatile__ ( \
".byte 0x0f, 0x01, 0xd0\n" \
: "=a"(eax_), "=d"(edx_) : "c" (type));
#elif defined _M_X64 && defined _MSC_FULL_VER && _MSC_FULL_VER >= 160040219
#include <immintrin.h>
#define XGETBV(type, eax_, edx_) do { \
union {uint64_t x; uint32_t y[2];} a = {_xgetbv(0)}; \
eax_ = a.y[0], edx_ = a.y[1]; \
} while(0)
#elif defined _M_IX86 && defined _MSC_VER
#define XGETBV(type, eax_, edx_) \
__asm pushad \
__asm mov ecx, type \
__asm _emit 0x0f \
__asm _emit 0x01 \
__asm _emit 0xd0 \
__asm mov eax_, eax \
__asm mov edx_, edx \
__asm popad
#else
#define XGETBV(type, eax_, edx_) eax_ = edx_ = 0
#endif
static bool cpu_has_simd64(void)
{
enum {OSXSAVE = 1 << 27, AVX = 1 << 28};
unsigned eax_, ebx_, ecx_, edx_;
CPUID(1, eax_, ebx_, ecx_, edx_);
if ((ecx_ & (OSXSAVE|AVX)) == (OSXSAVE|AVX)) {
XGETBV(0, eax_, edx_);
return (eax_ & 6) == 6;
}
return false;
}
static bool should_use_simd64(void)
{
char const * e;
return ((e = getenv("SOXR_USE_SIMD" )))? !!atoi(e) :
((e = getenv("SOXR_USE_SIMD64")))? !!atoi(e) : cpu_has_simd64();
}
#else
#define should_use_simd64() true
#endif
extern control_block_t
_soxr_rate32_cb,
_soxr_rate32s_cb,
_soxr_rate64_cb,
_soxr_rate64s_cb,
_soxr_vr32_cb;
static void runtime_num(char const * env_name,
int min, int max, unsigned * field)
{
char const * e = getenv(env_name);
if (e) {
int i = atoi(e);
if (i >= min && i <= max)
*field = (unsigned)i;
}
}
static void runtime_flag(char const * env_name,
unsigned n_bits, unsigned n_shift, unsigned long * flags)
{
char const * e = getenv(env_name);
if (e) {
int i = atoi(e);
unsigned long mask = (1UL << n_bits) - 1;
if (i >= 0 && i <= (int)mask)
*flags &= ~(mask << n_shift), *flags |= ((unsigned long)i << n_shift);
}
}
soxr_t soxr_create(
double input_rate, double output_rate,
unsigned num_channels,
soxr_error_t * error0,
soxr_io_spec_t const * io_spec,
soxr_quality_spec_t const * q_spec,
soxr_runtime_spec_t const * runtime_spec)
{
double io_ratio = output_rate!=0? input_rate!=0?
input_rate / output_rate : -1 : input_rate!=0? -1 : 0;
static const float datatype_full_scale[] = {1, 1, 65536.*32768, 32768};
soxr_t p = 0;
soxr_error_t error = 0;
#if WITH_DEV_TRACE
#define _(x) (char)(sizeof(x)>=10? 'a'+(char)(sizeof(x)-10):'0'+(char)sizeof(x))
char const * e = getenv("SOXR_TRACE");
_soxr_trace_level = e? atoi(e) : 0;
{
static char const arch[] = {_(char), _(short), _(int), _(long), _(long long)
, ' ', _(float), _(double), _(long double)
, ' ', _(int *), _(int (*)(int))
, ' ', HAVE_BIGENDIAN ? 'B' : 'L'
#if defined _OPENMP
, ' ', 'O', 'M', 'P'
#endif
, 0};
#undef _
lsx_debug("arch: %s", arch);
}
#endif
if (q_spec && q_spec->e) error = q_spec->e;
else if (io_spec && (io_spec->itype | io_spec->otype) >= SOXR_SPLIT * 2)
error = "invalid io datatype(s)";
if (!error && !(p = calloc(sizeof(*p), 1))) error = "malloc failed";
if (p) {
control_block_t * control_block;
p->q_spec = q_spec? *q_spec : soxr_quality_spec(SOXR_HQ, 0);
if (q_spec) { /* Backwards compatibility with original API: */
if (p->q_spec.passband_end > 2)
p->q_spec.passband_end /= 100;
if (p->q_spec.stopband_begin > 2)
p->q_spec.stopband_begin = 2 - p->q_spec.stopband_begin / 100;
}
p->io_ratio = io_ratio;
p->num_channels = num_channels;
if (io_spec)
p->io_spec = *io_spec;
else
p->io_spec.scale = 1;
p->runtime_spec = runtime_spec? *runtime_spec : soxr_runtime_spec(1);
runtime_num("SOXR_MIN_DFT_SIZE", 8, 15, &p->runtime_spec.log2_min_dft_size);
runtime_num("SOXR_LARGE_DFT_SIZE", 8, 20, &p->runtime_spec.log2_large_dft_size);
runtime_num("SOXR_COEFS_SIZE", 100, 800, &p->runtime_spec.coef_size_kbytes);
runtime_num("SOXR_NUM_THREADS", 0, 64, &p->runtime_spec.num_threads);
runtime_flag("SOXR_COEF_INTERP", 2, 0, &p->runtime_spec.flags);
runtime_flag("SOXR_STRICT_BUF", 1, 2, &p->runtime_spec.flags);
runtime_flag("SOXR_NOSMALLINTOPT", 1, 3, &p->runtime_spec.flags);
p->io_spec.scale *= datatype_full_scale[p->io_spec.otype & 3] /
datatype_full_scale[p->io_spec.itype & 3];
p->seed = (unsigned long)time(0) ^ (unsigned long)(size_t)p;
#if WITH_CR32 || WITH_CR32S || WITH_VR32
if (0
#if WITH_VR32
|| ((!WITH_CR32 && !WITH_CR32S) || (p->q_spec.flags & SOXR_VR))
#endif
#if WITH_CR32 || WITH_CR32S
|| !(WITH_CR64 || WITH_CR64S) || (p->q_spec.precision <= 20 && !(p->q_spec.flags & SOXR_DOUBLE_PRECISION))
#endif
) {
p->deinterleave = (deinterleave_t)_soxr_deinterleave_f;
p->interleave = (interleave_t)_soxr_interleave_f;
control_block =
#if WITH_VR32
((!WITH_CR32 && !WITH_CR32S) || (p->q_spec.flags & SOXR_VR))? &_soxr_vr32_cb :
#endif
#if WITH_CR32S
!WITH_CR32 || should_use_simd32()? &_soxr_rate32s_cb :
#endif
&_soxr_rate32_cb;
}
#if WITH_CR64 || WITH_CR64S
else
#endif
#endif
#if WITH_CR64 || WITH_CR64S
{
p->deinterleave = (deinterleave_t)_soxr_deinterleave;
p->interleave = (interleave_t)_soxr_interleave;
control_block =
#if WITH_CR64S
!WITH_CR64 || should_use_simd64()? &_soxr_rate64s_cb :
#endif
&_soxr_rate64_cb;
}
#endif
memcpy(&p->control_block, control_block, sizeof(p->control_block));
if (p->num_channels && io_ratio!=0)
error = soxr_set_io_ratio(p, io_ratio, 0);
}
if (error)
soxr_delete(p), p = 0;
if (error0)
*error0 = error;
return p;
}
soxr_error_t soxr_set_input_fn(soxr_t p,
soxr_input_fn_t input_fn, void * input_fn_state, size_t max_ilen)
{
p->input_fn_state = input_fn_state;
p->input_fn = input_fn;
p->max_ilen = max_ilen? max_ilen : (size_t)-1;
return 0;
}
static void soxr_delete0(soxr_t p)
{
unsigned i;
if (p->resamplers) for (i = 0; i < p->num_channels; ++i) {
if (p->resamplers[i])
resampler_close(p->resamplers[i]);
free(p->resamplers[i]);
}
free(p->resamplers);
free(p->channel_ptrs);
free(p->shared);
memset(p, 0, sizeof(*p));
}
double soxr_delay(soxr_t p)
{
return
(p && !p->error && p->resamplers)? resampler_delay(p->resamplers[0]) : 0;
}
static soxr_error_t fatal_error(soxr_t p, soxr_error_t error)
{
soxr_delete0(p);
return p->error = error;
}
static soxr_error_t initialise(soxr_t p)
{
unsigned i;
size_t shared_size, channel_size;
resampler_sizes(&shared_size, &channel_size);
p->channel_ptrs = calloc(sizeof(*p->channel_ptrs), p->num_channels);
p->shared = calloc(shared_size, 1);
p->resamplers = calloc(sizeof(*p->resamplers), p->num_channels);
if (!p->shared || !p->channel_ptrs || !p->resamplers)
return fatal_error(p, "malloc failed");
for (i = 0; i < p->num_channels; ++i) {
soxr_error_t error;
if (!(p->resamplers[i] = calloc(channel_size, 1)))
return fatal_error(p, "malloc failed");
error = resampler_create(
p->resamplers[i],
p->shared,
p->io_ratio,
&p->q_spec,
&p->runtime_spec,
p->io_spec.scale);
if (error)
return fatal_error(p, error);
}
return 0;
}
soxr_error_t soxr_set_num_channels(soxr_t p, unsigned num_channels)
{
if (!p) return "invalid soxr_t pointer";
if (num_channels == p->num_channels) return p->error;
if (!num_channels) return "invalid # of channels";
if (p->resamplers) return "# of channels can't be changed";
p->num_channels = num_channels;
return soxr_set_io_ratio(p, p->io_ratio, 0);
}
soxr_error_t soxr_set_io_ratio(soxr_t p, double io_ratio, size_t slew_len)
{
unsigned i;
soxr_error_t error;
if (!p) return "invalid soxr_t pointer";
if ((error = p->error)) return error;
if (!p->num_channels) return "must set # channels before O/I ratio";
if (io_ratio <= 0) return "I/O ratio out-of-range";
if (!p->channel_ptrs) {
p->io_ratio = io_ratio;
return initialise(p);
}
if (p->control_block[8]) {
for (i = 0; !error && i < p->num_channels; ++i)
resampler_set_io_ratio(p->resamplers[i], io_ratio, slew_len);
return error;
}
return fabs(p->io_ratio - io_ratio) < 1e-15? 0 :
"varying O/I ratio is not supported with this quality level";
}
void soxr_delete(soxr_t p)
{
if (p)
soxr_delete0(p), free(p);
}
soxr_error_t soxr_clear(soxr_t p) /* TODO: this, properly. */
{
if (p) {
struct soxr tmp = *p;
soxr_delete0(p);
memset(p, 0, sizeof(*p));
p->input_fn = tmp.input_fn;
p->runtime_spec = tmp.runtime_spec;
p->q_spec = tmp.q_spec;
p->io_spec = tmp.io_spec;
p->num_channels = tmp.num_channels;
p->input_fn_state = tmp.input_fn_state;
memcpy(p->control_block, tmp.control_block, sizeof(p->control_block));
p->deinterleave = tmp.deinterleave;
p->interleave = tmp.interleave;
return (p->q_spec.flags & RESET_ON_CLEAR)?
soxr_set_io_ratio(p, tmp.io_ratio, 0) : 0;
}
return "invalid soxr_t pointer";
}
static void soxr_input_1ch(soxr_t p, unsigned i, soxr_cbuf_t src, size_t len)
{
sample_t * dest = resampler_input(p->resamplers[i], NULL, len);
(*p->deinterleave)(&dest, p->io_spec.itype, &src, len, 1);
}
static size_t soxr_input(soxr_t p, void const * in, size_t len)
{
bool separated = !!(p->io_spec.itype & SOXR_SPLIT);
unsigned i;
if (!p || p->error) return 0;
if (!in && len) {p->error = "null input buffer pointer"; return 0;}
if (!len) {
p->flushing = true;
return 0;
}
if (separated)
for (i = 0; i < p->num_channels; ++i)
soxr_input_1ch(p, i, ((soxr_cbufs_t)in)[i], len);
else {
for (i = 0; i < p->num_channels; ++i)
p->channel_ptrs[i] = resampler_input(p->resamplers[i], NULL, len);
(*p->deinterleave)(
(sample_t **)p->channel_ptrs, p->io_spec.itype, &in, len, p->num_channels);
}
return len;
}
static size_t soxr_output_1ch(soxr_t p, unsigned i, soxr_buf_t dest, size_t len, bool separated)
{
sample_t const * src;
if (p->flushing)
resampler_flush(p->resamplers[i]);
resampler_process(p->resamplers[i], len);
src = resampler_output(p->resamplers[i], NULL, &len);
if (separated)
p->clips += (p->interleave)(p->io_spec.otype, &dest, &src,
len, 1, (p->io_spec.flags & SOXR_NO_DITHER)? 0 : &p->seed);
else p->channel_ptrs[i] = (void /* const */ *)src;
return len;
}
static size_t soxr_output_no_callback(soxr_t p, soxr_buf_t out, size_t len)
{
unsigned u;
size_t done = 0;
bool separated = !!(p->io_spec.otype & SOXR_SPLIT);
#if defined _OPENMP
int i;
if (!p->runtime_spec.num_threads && p->num_channels > 1)
#pragma omp parallel for
for (i = 0; i < (int)p->num_channels; ++i) {
size_t done1;
done1 = soxr_output_1ch(p, (unsigned)i, ((soxr_bufs_t)out)[i], len, separated);
if (!i)
done = done1;
} else
#endif
for (u = 0; u < p->num_channels; ++u)
done = soxr_output_1ch(p, u, ((soxr_bufs_t)out)[u], len, separated);
if (!separated)
p->clips += (p->interleave)(p->io_spec.otype, &out, (sample_t const * const *)p->channel_ptrs,
done, p->num_channels, (p->io_spec.flags & SOXR_NO_DITHER)? 0 : &p->seed);
return done;
}
size_t soxr_output(soxr_t p, void * out, size_t len0)
{
size_t odone, odone0 = 0, olen = len0, osize, idone;
size_t ilen = min(p->max_ilen, (size_t)ceil((double)olen *p->io_ratio));
void const * in = out; /* Set to !=0, so that caller may leave unset. */
bool was_flushing;
if (!p || p->error) return 0;
if (!out && len0) {p->error = "null output buffer pointer"; return 0;}
do {
odone = soxr_output_no_callback(p, out, olen);
odone0 += odone;
if (odone0 == len0 || !p->input_fn || p->flushing)
break;
osize = soxr_datatype_size(p->io_spec.otype) * p->num_channels;
out = (char *)out + osize * odone;
olen -= odone;
idone = p->input_fn(p->input_fn_state, &in, ilen);
was_flushing = p->flushing;
if (!in)
p->error = "input function reported failure";
else soxr_input(p, in, idone);
} while (odone || idone || (!was_flushing && p->flushing));
return odone0;
}
static size_t soxr_i_for_o(soxr_t p, size_t olen, size_t ilen)
{
size_t result;
#if 0
if (p->runtime_spec.flags & SOXR_STRICT_BUFFERING)
result = rate_i_for_o(p->resamplers[0], olen);
else
#endif
result = (size_t)ceil((double)olen * p->io_ratio);
return min(result, ilen);
}
#if 0
static size_t soxr_o_for_i(soxr_t p, size_t ilen, size_t olen)
{
size_t result = (size_t)ceil((double)ilen / p->io_ratio);
return min(result, olen);
}
#endif
soxr_error_t soxr_process(soxr_t p,
void const * in , size_t ilen0, size_t * idone0,
void * out, size_t olen , size_t * odone0)
{
size_t ilen, idone, odone = 0;
unsigned u;
bool flush_requested = false;
if (!p) return "null pointer";
if (!in)
flush_requested = true, ilen = ilen0 = 0;
else {
if ((ptrdiff_t)ilen0 < 0)
flush_requested = true, ilen0 = ~ilen0;
if (idone0 && (1 || flush_requested))
ilen = soxr_i_for_o(p, olen, ilen0);
else
ilen = ilen0/*, olen = soxr_o_for_i(p, ilen, olen)*/;
}
p->flushing |= ilen == ilen0 && flush_requested;
if (!out && !in)
idone = ilen;
else if (p->io_spec.itype & p->io_spec.otype & SOXR_SPLIT) { /* Both i & o */
#if defined _OPENMP
int i;
if (!p->runtime_spec.num_threads && p->num_channels > 1)
#pragma omp parallel for
for (i = 0; i < (int)p->num_channels; ++i) {
size_t done;
if (in)
soxr_input_1ch(p, (unsigned)i, ((soxr_cbufs_t)in)[i], ilen);
done = soxr_output_1ch(p, (unsigned)i, ((soxr_bufs_t)out)[i], olen, true);
if (!i)
odone = done;
} else
#endif
for (u = 0; u < p->num_channels; ++u) {
if (in)
soxr_input_1ch(p, u, ((soxr_cbufs_t)in)[u], ilen);
odone = soxr_output_1ch(p, u, ((soxr_bufs_t)out)[u], olen, true);
}
idone = ilen;
}
else {
idone = ilen? soxr_input (p, in , ilen) : 0;
odone = soxr_output(p, out, olen);
}
if (idone0) *idone0 = idone;
if (odone0) *odone0 = odone;
return p->error;
}
soxr_error_t soxr_oneshot(
double irate, double orate,
unsigned num_channels,
void const * in , size_t ilen, size_t * idone,
void * out, size_t olen, size_t * odone,
soxr_io_spec_t const * io_spec,
soxr_quality_spec_t const * q_spec,
soxr_runtime_spec_t const * runtime_spec)
{
soxr_t resampler;
soxr_error_t error = q_spec? q_spec->e : 0;
if (!error) {
soxr_quality_spec_t q_spec1;
if (!q_spec)
q_spec1 = soxr_quality_spec(SOXR_LQ, 0), q_spec = &q_spec1;
resampler = soxr_create(irate, orate, num_channels,
&error, io_spec, q_spec, runtime_spec);
}
if (!error) {
error = soxr_process(resampler, in, ~ilen, idone, out, olen, odone);
soxr_delete(resampler);
}
return error;
}
soxr_error_t soxr_set_error(soxr_t p, soxr_error_t error)
{
if (!p) return "null pointer";
if (!p->error && p->error != error) return p->error;
p->error = error;
return 0;
}
|
target_teams_distribute_misc_messages.c | // RUN: %clang_cc1 -fsyntax-only -fopenmp -verify %s -Wuninitialized
// RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -verify %s -Wuninitialized
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp target teams distribute'}}
#pragma omp target teams distribute
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp target teams distribute'}}
#pragma omp target teams distribute foo
void test_no_clause(void) {
int i;
#pragma omp target teams distribute
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{statement after '#pragma omp target teams distribute' must be a for loop}}
#pragma omp target teams distribute
++i;
}
void test_branch_protected_scope(void) {
int i = 0;
L1:
++i;
int x[24];
#pragma omp target teams distribute
for (i = 0; i < 16; ++i) {
if (i == 5)
goto L1; // expected-error {{use of undeclared label 'L1'}}
else if (i == 6)
return; // expected-error {{cannot return from OpenMP region}}
else if (i == 7)
goto L2;
else if (i == 8) {
L2:
x[i]++;
}
}
if (x[0] == 0)
goto L2; // expected-error {{use of undeclared label 'L2'}}
else if (x[1] == 1)
goto L1;
}
void test_invalid_clause(void) {
int i;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp target teams distribute' are ignored}}
#pragma omp target teams distribute foo bar
for (i = 0; i < 16; ++i)
;
}
void test_non_identifiers(void) {
int i, x;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp target teams distribute' are ignored}}
#pragma omp target teams distribute;
for (i = 0; i < 16; ++i)
;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp target teams distribute' are ignored}}
#pragma omp target teams distribute private(x);
for (i = 0; i < 16; ++i)
;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp target teams distribute' are ignored}}
#pragma omp target teams distribute, private(x);
for (i = 0; i < 16; ++i)
;
}
extern int foo(void);
void test_collapse(void) {
int i;
// expected-error@+1 {{expected '('}}
#pragma omp target teams distribute collapse
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target teams distribute collapse(
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute collapse()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target teams distribute collapse(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target teams distribute collapse(, )
for (i = 0; i < 16; ++i)
;
// expected-warning@+2 {{extra tokens at the end of '#pragma omp target teams distribute' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp target teams distribute collapse 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target teams distribute collapse(4
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target teams distribute collapse(4,
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target teams distribute collapse(4, )
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute', but found only 1}}
// expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target teams distribute collapse(4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target teams distribute collapse(4 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target teams distribute collapse(4, , 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute', but found only 1}}
#pragma omp target teams distribute collapse(4)
for (int i1 = 0; i1 < 16; ++i1)
for (int i2 = 0; i2 < 16; ++i2)
for (int i3 = 0; i3 < 16; ++i3)
for (int i4 = 0; i4 < 16; ++i4)
foo();
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target teams distribute collapse(4, 8)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute', but found only 1}}
// expected-error@+1 {{integer constant expression}}
#pragma omp target teams distribute collapse(2.5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{integer constant expression}}
#pragma omp target teams distribute collapse(foo())
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp target teams distribute collapse(-5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp target teams distribute collapse(0)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp target teams distribute collapse(5 - 5)
for (i = 0; i < 16; ++i)
;
// expected-error@+3 {{loop iteration variable in the associated loop of 'omp target teams distribute' directive may not be firstprivate, predetermined as private}}
// expected-note@+1 {{defined as firstprivate}}
#pragma omp target teams distribute collapse(2) firstprivate(i)
for (i = 0; i < 16; ++i)
for (int j = 0; j < 16; ++j)
#pragma omp parallel for reduction(+ : i, j)
for (int k = 0; k < 16; ++k)
i += j;
}
void test_private(void) {
int i;
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target teams distribute private(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp target teams distribute private(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{expected expression}}
#pragma omp target teams distribute private(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute private()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute private(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp target teams distribute private(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp target teams distribute private(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target teams distribute private(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target teams distribute private(x, y, z)
for (i = 0; i < 16; ++i) {
x = y * i + z;
}
}
void test_lastprivate(void) {
int i;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute lastprivate(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp target teams distribute lastprivate(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{expected expression}}
#pragma omp target teams distribute lastprivate(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute lastprivate()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute lastprivate(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp target teams distribute lastprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp target teams distribute lastprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target teams distribute lastprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target teams distribute lastprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_firstprivate(void) {
int i;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute firstprivate(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp target teams distribute firstprivate(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{expected expression}}
#pragma omp target teams distribute firstprivate(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute firstprivate()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute firstprivate(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp target teams distribute firstprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
// expected-error@+1 {{lastprivate variable cannot be firstprivate}} expected-note@+1 {{defined as lastprivate}}
#pragma omp target teams distribute lastprivate(x) firstprivate(x)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{lastprivate variable cannot be firstprivate}} expected-note@+1 2 {{defined as lastprivate}}
#pragma omp target teams distribute lastprivate(x, y) firstprivate(x, y)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 3 {{lastprivate variable cannot be firstprivate}} expected-note@+1 3 {{defined as lastprivate}}
#pragma omp target teams distribute lastprivate(x, y, z) firstprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_loop_messages(void) {
float a[100], b[100], c[100];
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp target teams distribute
for (float fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp target teams distribute
for (double fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
}
|
pr58809.c | /* PR middle-end/58809 */
/* { dg-do compile } */
/* { dg-options "-fopenmp -O" } */
int i;
#pragma omp threadprivate (i)
void foo()
{
_Complex int j;
#pragma omp parallel copyin (i) reduction (&&:j)
;
}
|
3d25pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 16;
tile_size[3] = 512;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=floord(Nt-1,3);t1++) {
lbp=max(ceild(t1,2),ceild(6*t1-Nt+2,6));
ubp=min(floord(4*Nt+Nz-9,24),floord(12*t1+Nz+6,24));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(max(0,ceild(3*t1-3*t2,2)),ceild(3*t1-2,4)),ceild(24*t2-Nz-3,16));t3<=min(min(min(floord(4*Nt+Ny-9,16),floord(12*t1+Ny+15,16)),floord(24*t2+Ny+11,16)),floord(24*t1-24*t2+Nz+Ny+13,16));t3++) {
for (t4=max(max(max(max(0,ceild(3*t1-3*t2-62,64)),ceild(3*t1-126,128)),ceild(24*t2-Nz-499,512)),ceild(16*t3-Ny-499,512));t4<=min(min(min(min(floord(4*Nt+Nx-9,512),floord(12*t1+Nx+15,512)),floord(24*t2+Nx+11,512)),floord(16*t3+Nx+3,512)),floord(24*t1-24*t2+Nz+Nx+13,512));t4++) {
for (t5=max(max(max(max(max(0,ceild(24*t2-Nz+5,4)),ceild(16*t3-Ny+5,4)),ceild(512*t4-Nx+5,4)),3*t1),6*t1-6*t2+1);t5<=min(min(min(min(min(floord(24*t1-24*t2+Nz+18,4),Nt-1),3*t1+5),6*t2+4),4*t3+2),128*t4+126);t5++) {
for (t6=max(max(24*t2,4*t5+4),-24*t1+24*t2+8*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=max(16*t3,4*t5+4);t7<=min(16*t3+15,4*t5+Ny-5);t7++) {
lbv=max(512*t4,4*t5+4);
ubv=min(512*t4+511,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
omp-simd-clone.c | /* OMP constructs' SIMD clone supporting code.
Copyright (C) 2005-2020 Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "backend.h"
#include "target.h"
#include "tree.h"
#include "gimple.h"
#include "cfghooks.h"
#include "alloc-pool.h"
#include "tree-pass.h"
#include "ssa.h"
#include "cgraph.h"
#include "pretty-print.h"
#include "diagnostic-core.h"
#include "fold-const.h"
#include "stor-layout.h"
#include "cfganal.h"
#include "gimplify.h"
#include "gimple-iterator.h"
#include "gimplify-me.h"
#include "gimple-walk.h"
#include "langhooks.h"
#include "tree-cfg.h"
#include "tree-into-ssa.h"
#include "tree-dfa.h"
#include "cfgloop.h"
#include "symbol-summary.h"
#include "ipa-param-manipulation.h"
#include "tree-eh.h"
#include "varasm.h"
#include "stringpool.h"
#include "attribs.h"
#include "omp-simd-clone.h"
/* Return the number of elements in vector type VECTYPE, which is associated
with a SIMD clone. At present these always have a constant length. */
static unsigned HOST_WIDE_INT
simd_clone_subparts (tree vectype)
{
return TYPE_VECTOR_SUBPARTS (vectype).to_constant ();
}
/* Allocate a fresh `simd_clone' and return it. NARGS is the number
of arguments to reserve space for. */
static struct cgraph_simd_clone *
simd_clone_struct_alloc (int nargs)
{
struct cgraph_simd_clone *clone_info;
size_t len = (sizeof (struct cgraph_simd_clone)
+ nargs * sizeof (struct cgraph_simd_clone_arg));
clone_info = (struct cgraph_simd_clone *)
ggc_internal_cleared_alloc (len);
return clone_info;
}
/* Make a copy of the `struct cgraph_simd_clone' in FROM to TO. */
static inline void
simd_clone_struct_copy (struct cgraph_simd_clone *to,
struct cgraph_simd_clone *from)
{
memcpy (to, from, (sizeof (struct cgraph_simd_clone)
+ ((from->nargs - from->inbranch)
* sizeof (struct cgraph_simd_clone_arg))));
}
/* Fill an empty vector ARGS with parameter types of function FNDECL. This
uses TYPE_ARG_TYPES if available, otherwise falls back to types of
DECL_ARGUMENTS types. */
static void
simd_clone_vector_of_formal_parm_types (vec<tree> *args, tree fndecl)
{
if (TYPE_ARG_TYPES (TREE_TYPE (fndecl)))
{
push_function_arg_types (args, TREE_TYPE (fndecl));
return;
}
push_function_arg_decls (args, fndecl);
unsigned int i;
tree arg;
FOR_EACH_VEC_ELT (*args, i, arg)
(*args)[i] = TREE_TYPE ((*args)[i]);
}
/* Given a simd function in NODE, extract the simd specific
information from the OMP clauses passed in CLAUSES, and return
the struct cgraph_simd_clone * if it should be cloned. *INBRANCH_SPECIFIED
is set to TRUE if the `inbranch' or `notinbranch' clause specified,
otherwise set to FALSE. */
static struct cgraph_simd_clone *
simd_clone_clauses_extract (struct cgraph_node *node, tree clauses,
bool *inbranch_specified)
{
auto_vec<tree> args;
simd_clone_vector_of_formal_parm_types (&args, node->decl);
tree t;
int n;
*inbranch_specified = false;
n = args.length ();
if (n > 0 && args.last () == void_type_node)
n--;
/* Allocate one more than needed just in case this is an in-branch
clone which will require a mask argument. */
struct cgraph_simd_clone *clone_info = simd_clone_struct_alloc (n + 1);
clone_info->nargs = n;
if (!clauses)
goto out;
clauses = TREE_VALUE (clauses);
if (!clauses || TREE_CODE (clauses) != OMP_CLAUSE)
goto out;
for (t = clauses; t; t = OMP_CLAUSE_CHAIN (t))
{
switch (OMP_CLAUSE_CODE (t))
{
case OMP_CLAUSE_INBRANCH:
clone_info->inbranch = 1;
*inbranch_specified = true;
break;
case OMP_CLAUSE_NOTINBRANCH:
clone_info->inbranch = 0;
*inbranch_specified = true;
break;
case OMP_CLAUSE_SIMDLEN:
clone_info->simdlen
= TREE_INT_CST_LOW (OMP_CLAUSE_SIMDLEN_EXPR (t));
break;
case OMP_CLAUSE_LINEAR:
{
tree decl = OMP_CLAUSE_DECL (t);
tree step = OMP_CLAUSE_LINEAR_STEP (t);
int argno = TREE_INT_CST_LOW (decl);
if (OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (t))
{
enum cgraph_simd_clone_arg_type arg_type;
if (TREE_CODE (args[argno]) == REFERENCE_TYPE)
switch (OMP_CLAUSE_LINEAR_KIND (t))
{
case OMP_CLAUSE_LINEAR_REF:
arg_type
= SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP;
break;
case OMP_CLAUSE_LINEAR_UVAL:
arg_type
= SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP;
break;
case OMP_CLAUSE_LINEAR_VAL:
case OMP_CLAUSE_LINEAR_DEFAULT:
arg_type
= SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP;
break;
default:
gcc_unreachable ();
}
else
arg_type = SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP;
clone_info->args[argno].arg_type = arg_type;
clone_info->args[argno].linear_step = tree_to_shwi (step);
gcc_assert (clone_info->args[argno].linear_step >= 0
&& clone_info->args[argno].linear_step < n);
}
else
{
if (POINTER_TYPE_P (args[argno]))
step = fold_convert (ssizetype, step);
if (!tree_fits_shwi_p (step))
{
warning_at (OMP_CLAUSE_LOCATION (t), 0,
"ignoring large linear step");
return NULL;
}
else if (integer_zerop (step))
{
warning_at (OMP_CLAUSE_LOCATION (t), 0,
"ignoring zero linear step");
return NULL;
}
else
{
enum cgraph_simd_clone_arg_type arg_type;
if (TREE_CODE (args[argno]) == REFERENCE_TYPE)
switch (OMP_CLAUSE_LINEAR_KIND (t))
{
case OMP_CLAUSE_LINEAR_REF:
arg_type
= SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP;
break;
case OMP_CLAUSE_LINEAR_UVAL:
arg_type
= SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP;
break;
case OMP_CLAUSE_LINEAR_VAL:
case OMP_CLAUSE_LINEAR_DEFAULT:
arg_type
= SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP;
break;
default:
gcc_unreachable ();
}
else
arg_type = SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP;
clone_info->args[argno].arg_type = arg_type;
clone_info->args[argno].linear_step = tree_to_shwi (step);
}
}
break;
}
case OMP_CLAUSE_UNIFORM:
{
tree decl = OMP_CLAUSE_DECL (t);
int argno = tree_to_uhwi (decl);
clone_info->args[argno].arg_type
= SIMD_CLONE_ARG_TYPE_UNIFORM;
break;
}
case OMP_CLAUSE_ALIGNED:
{
/* Ignore aligned (x) for declare simd, for the ABI we really
need an alignment specified. */
if (OMP_CLAUSE_ALIGNED_ALIGNMENT (t) == NULL_TREE)
break;
tree decl = OMP_CLAUSE_DECL (t);
int argno = tree_to_uhwi (decl);
clone_info->args[argno].alignment
= TREE_INT_CST_LOW (OMP_CLAUSE_ALIGNED_ALIGNMENT (t));
break;
}
default:
break;
}
}
out:
if (TYPE_ATOMIC (TREE_TYPE (TREE_TYPE (node->decl))))
{
warning_at (DECL_SOURCE_LOCATION (node->decl), 0,
"ignoring %<#pragma omp declare simd%> on function "
"with %<_Atomic%> qualified return type");
return NULL;
}
for (unsigned int argno = 0; argno < clone_info->nargs; argno++)
if (TYPE_ATOMIC (args[argno])
&& clone_info->args[argno].arg_type != SIMD_CLONE_ARG_TYPE_UNIFORM)
{
warning_at (DECL_SOURCE_LOCATION (node->decl), 0,
"ignoring %<#pragma omp declare simd%> on function "
"with %<_Atomic%> qualified non-%<uniform%> argument");
args.release ();
return NULL;
}
return clone_info;
}
/* Given a SIMD clone in NODE, calculate the characteristic data
type and return the coresponding type. The characteristic data
type is computed as described in the Intel Vector ABI. */
static tree
simd_clone_compute_base_data_type (struct cgraph_node *node,
struct cgraph_simd_clone *clone_info)
{
tree type = integer_type_node;
tree fndecl = node->decl;
/* a) For non-void function, the characteristic data type is the
return type. */
if (TREE_CODE (TREE_TYPE (TREE_TYPE (fndecl))) != VOID_TYPE)
type = TREE_TYPE (TREE_TYPE (fndecl));
/* b) If the function has any non-uniform, non-linear parameters,
then the characteristic data type is the type of the first
such parameter. */
else
{
auto_vec<tree> map;
simd_clone_vector_of_formal_parm_types (&map, fndecl);
for (unsigned int i = 0; i < clone_info->nargs; ++i)
if (clone_info->args[i].arg_type == SIMD_CLONE_ARG_TYPE_VECTOR)
{
type = map[i];
break;
}
}
/* c) If the characteristic data type determined by a) or b) above
is struct, union, or class type which is pass-by-value (except
for the type that maps to the built-in complex data type), the
characteristic data type is int. */
if (RECORD_OR_UNION_TYPE_P (type)
&& !aggregate_value_p (type, NULL)
&& TREE_CODE (type) != COMPLEX_TYPE)
return integer_type_node;
/* d) If none of the above three classes is applicable, the
characteristic data type is int. */
return type;
/* e) For Intel Xeon Phi native and offload compilation, if the
resulting characteristic data type is 8-bit or 16-bit integer
data type, the characteristic data type is int. */
/* Well, we don't handle Xeon Phi yet. */
}
static tree
simd_clone_mangle (struct cgraph_node *node,
struct cgraph_simd_clone *clone_info)
{
char vecsize_mangle = clone_info->vecsize_mangle;
char mask = clone_info->inbranch ? 'M' : 'N';
unsigned int simdlen = clone_info->simdlen;
unsigned int n;
pretty_printer pp;
gcc_assert (vecsize_mangle && simdlen);
pp_string (&pp, "_ZGV");
pp_character (&pp, vecsize_mangle);
pp_character (&pp, mask);
pp_decimal_int (&pp, simdlen);
for (n = 0; n < clone_info->nargs; ++n)
{
struct cgraph_simd_clone_arg arg = clone_info->args[n];
switch (arg.arg_type)
{
case SIMD_CLONE_ARG_TYPE_UNIFORM:
pp_character (&pp, 'u');
break;
case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP:
pp_character (&pp, 'l');
goto mangle_linear;
case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP:
pp_character (&pp, 'R');
goto mangle_linear;
case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP:
pp_character (&pp, 'L');
goto mangle_linear;
case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP:
pp_character (&pp, 'U');
goto mangle_linear;
mangle_linear:
gcc_assert (arg.linear_step != 0);
if (arg.linear_step > 1)
pp_unsigned_wide_integer (&pp, arg.linear_step);
else if (arg.linear_step < 0)
{
pp_character (&pp, 'n');
pp_unsigned_wide_integer (&pp, (-(unsigned HOST_WIDE_INT)
arg.linear_step));
}
break;
case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP:
pp_string (&pp, "ls");
pp_unsigned_wide_integer (&pp, arg.linear_step);
break;
case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP:
pp_string (&pp, "Rs");
pp_unsigned_wide_integer (&pp, arg.linear_step);
break;
case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP:
pp_string (&pp, "Ls");
pp_unsigned_wide_integer (&pp, arg.linear_step);
break;
case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP:
pp_string (&pp, "Us");
pp_unsigned_wide_integer (&pp, arg.linear_step);
break;
default:
pp_character (&pp, 'v');
}
if (arg.alignment)
{
pp_character (&pp, 'a');
pp_decimal_int (&pp, arg.alignment);
}
}
pp_underscore (&pp);
const char *str = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (node->decl));
if (*str == '*')
++str;
pp_string (&pp, str);
str = pp_formatted_text (&pp);
/* If there already is a SIMD clone with the same mangled name, don't
add another one. This can happen e.g. for
#pragma omp declare simd
#pragma omp declare simd simdlen(8)
int foo (int, int);
if the simdlen is assumed to be 8 for the first one, etc. */
for (struct cgraph_node *clone = node->simd_clones; clone;
clone = clone->simdclone->next_clone)
if (id_equal (DECL_ASSEMBLER_NAME (clone->decl), str))
return NULL_TREE;
return get_identifier (str);
}
/* Create a simd clone of OLD_NODE and return it. */
static struct cgraph_node *
simd_clone_create (struct cgraph_node *old_node)
{
struct cgraph_node *new_node;
if (old_node->definition)
{
if (!old_node->has_gimple_body_p ())
return NULL;
old_node->get_body ();
new_node = old_node->create_version_clone_with_body (vNULL, NULL, NULL,
NULL, NULL,
"simdclone");
}
else
{
tree old_decl = old_node->decl;
tree new_decl = copy_node (old_node->decl);
DECL_NAME (new_decl) = clone_function_name_numbered (old_decl,
"simdclone");
SET_DECL_ASSEMBLER_NAME (new_decl, DECL_NAME (new_decl));
SET_DECL_RTL (new_decl, NULL);
DECL_STATIC_CONSTRUCTOR (new_decl) = 0;
DECL_STATIC_DESTRUCTOR (new_decl) = 0;
new_node = old_node->create_version_clone (new_decl, vNULL, NULL);
if (old_node->in_other_partition)
new_node->in_other_partition = 1;
}
if (new_node == NULL)
return new_node;
set_decl_built_in_function (new_node->decl, NOT_BUILT_IN, 0);
TREE_PUBLIC (new_node->decl) = TREE_PUBLIC (old_node->decl);
DECL_COMDAT (new_node->decl) = DECL_COMDAT (old_node->decl);
DECL_WEAK (new_node->decl) = DECL_WEAK (old_node->decl);
DECL_EXTERNAL (new_node->decl) = DECL_EXTERNAL (old_node->decl);
DECL_VISIBILITY_SPECIFIED (new_node->decl)
= DECL_VISIBILITY_SPECIFIED (old_node->decl);
DECL_VISIBILITY (new_node->decl) = DECL_VISIBILITY (old_node->decl);
DECL_DLLIMPORT_P (new_node->decl) = DECL_DLLIMPORT_P (old_node->decl);
if (DECL_ONE_ONLY (old_node->decl))
make_decl_one_only (new_node->decl, DECL_ASSEMBLER_NAME (new_node->decl));
/* The method cgraph_version_clone_with_body () will force the new
symbol local. Undo this, and inherit external visibility from
the old node. */
new_node->local = old_node->local;
new_node->externally_visible = old_node->externally_visible;
return new_node;
}
/* Adjust the return type of the given function to its appropriate
vector counterpart. Returns a simd array to be used throughout the
function as a return value. */
static tree
simd_clone_adjust_return_type (struct cgraph_node *node)
{
tree fndecl = node->decl;
tree orig_rettype = TREE_TYPE (TREE_TYPE (fndecl));
unsigned int veclen;
tree t;
/* Adjust the function return type. */
if (orig_rettype == void_type_node)
return NULL_TREE;
t = TREE_TYPE (TREE_TYPE (fndecl));
if (INTEGRAL_TYPE_P (t) || POINTER_TYPE_P (t))
veclen = node->simdclone->vecsize_int;
else
veclen = node->simdclone->vecsize_float;
veclen /= GET_MODE_BITSIZE (SCALAR_TYPE_MODE (t));
if (veclen > node->simdclone->simdlen)
veclen = node->simdclone->simdlen;
if (POINTER_TYPE_P (t))
t = pointer_sized_int_node;
if (veclen == node->simdclone->simdlen)
t = build_vector_type (t, node->simdclone->simdlen);
else
{
t = build_vector_type (t, veclen);
t = build_array_type_nelts (t, node->simdclone->simdlen / veclen);
}
TREE_TYPE (TREE_TYPE (fndecl)) = t;
if (!node->definition)
return NULL_TREE;
t = DECL_RESULT (fndecl);
/* Adjust the DECL_RESULT. */
gcc_assert (TREE_TYPE (t) != void_type_node);
TREE_TYPE (t) = TREE_TYPE (TREE_TYPE (fndecl));
relayout_decl (t);
tree atype = build_array_type_nelts (orig_rettype,
node->simdclone->simdlen);
if (veclen != node->simdclone->simdlen)
return build1 (VIEW_CONVERT_EXPR, atype, t);
/* Set up a SIMD array to use as the return value. */
tree retval = create_tmp_var_raw (atype, "retval");
gimple_add_tmp_var (retval);
return retval;
}
/* Each vector argument has a corresponding array to be used locally
as part of the eventual loop. Create such temporary array and
return it.
PREFIX is the prefix to be used for the temporary.
TYPE is the inner element type.
SIMDLEN is the number of elements. */
static tree
create_tmp_simd_array (const char *prefix, tree type, int simdlen)
{
tree atype = build_array_type_nelts (type, simdlen);
tree avar = create_tmp_var_raw (atype, prefix);
gimple_add_tmp_var (avar);
return avar;
}
/* Modify the function argument types to their corresponding vector
counterparts if appropriate. Also, create one array for each simd
argument to be used locally when using the function arguments as
part of the loop.
NODE is the function whose arguments are to be adjusted.
If NODE does not represent function definition, returns NULL. Otherwise
returns an adjustment class that will be filled describing how the argument
declarations will be remapped. New arguments which are not to be remapped
are marked with USER_FLAG. */
static ipa_param_body_adjustments *
simd_clone_adjust_argument_types (struct cgraph_node *node)
{
auto_vec<tree> args;
if (node->definition)
push_function_arg_decls (&args, node->decl);
else
simd_clone_vector_of_formal_parm_types (&args, node->decl);
struct cgraph_simd_clone *sc = node->simdclone;
vec<ipa_adjusted_param, va_gc> *new_params = NULL;
vec_safe_reserve (new_params, sc->nargs);
unsigned i, j, veclen;
for (i = 0; i < sc->nargs; ++i)
{
ipa_adjusted_param adj;
memset (&adj, 0, sizeof (adj));
tree parm = args[i];
tree parm_type = node->definition ? TREE_TYPE (parm) : parm;
adj.base_index = i;
adj.prev_clone_index = i;
sc->args[i].orig_arg = node->definition ? parm : NULL_TREE;
sc->args[i].orig_type = parm_type;
switch (sc->args[i].arg_type)
{
default:
/* No adjustment necessary for scalar arguments. */
adj.op = IPA_PARAM_OP_COPY;
break;
case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP:
case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP:
if (node->definition)
sc->args[i].simd_array
= create_tmp_simd_array (IDENTIFIER_POINTER (DECL_NAME (parm)),
TREE_TYPE (parm_type),
sc->simdlen);
adj.op = IPA_PARAM_OP_COPY;
break;
case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP:
case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP:
case SIMD_CLONE_ARG_TYPE_VECTOR:
if (INTEGRAL_TYPE_P (parm_type) || POINTER_TYPE_P (parm_type))
veclen = sc->vecsize_int;
else
veclen = sc->vecsize_float;
veclen /= GET_MODE_BITSIZE (SCALAR_TYPE_MODE (parm_type));
if (veclen > sc->simdlen)
veclen = sc->simdlen;
adj.op = IPA_PARAM_OP_NEW;
adj.param_prefix_index = IPA_PARAM_PREFIX_SIMD;
if (POINTER_TYPE_P (parm_type))
adj.type = build_vector_type (pointer_sized_int_node, veclen);
else
adj.type = build_vector_type (parm_type, veclen);
sc->args[i].vector_type = adj.type;
for (j = veclen; j < sc->simdlen; j += veclen)
{
vec_safe_push (new_params, adj);
if (j == veclen)
{
memset (&adj, 0, sizeof (adj));
adj.op = IPA_PARAM_OP_NEW;
adj.user_flag = 1;
adj.param_prefix_index = IPA_PARAM_PREFIX_SIMD;
adj.base_index = i;
adj.prev_clone_index = i;
adj.type = sc->args[i].vector_type;
}
}
if (node->definition)
sc->args[i].simd_array
= create_tmp_simd_array (DECL_NAME (parm)
? IDENTIFIER_POINTER (DECL_NAME (parm))
: NULL, parm_type, sc->simdlen);
}
vec_safe_push (new_params, adj);
}
if (sc->inbranch)
{
tree base_type = simd_clone_compute_base_data_type (sc->origin, sc);
ipa_adjusted_param adj;
memset (&adj, 0, sizeof (adj));
adj.op = IPA_PARAM_OP_NEW;
adj.user_flag = 1;
adj.param_prefix_index = IPA_PARAM_PREFIX_MASK;
adj.base_index = i;
adj.prev_clone_index = i;
if (INTEGRAL_TYPE_P (base_type) || POINTER_TYPE_P (base_type))
veclen = sc->vecsize_int;
else
veclen = sc->vecsize_float;
veclen /= GET_MODE_BITSIZE (SCALAR_TYPE_MODE (base_type));
if (veclen > sc->simdlen)
veclen = sc->simdlen;
if (sc->mask_mode != VOIDmode)
adj.type
= lang_hooks.types.type_for_mode (sc->mask_mode, 1);
else if (POINTER_TYPE_P (base_type))
adj.type = build_vector_type (pointer_sized_int_node, veclen);
else
adj.type = build_vector_type (base_type, veclen);
vec_safe_push (new_params, adj);
for (j = veclen; j < sc->simdlen; j += veclen)
vec_safe_push (new_params, adj);
/* We have previously allocated one extra entry for the mask. Use
it and fill it. */
sc->nargs++;
if (sc->mask_mode != VOIDmode)
base_type = boolean_type_node;
if (node->definition)
{
sc->args[i].orig_arg
= build_decl (UNKNOWN_LOCATION, PARM_DECL, NULL, base_type);
if (sc->mask_mode == VOIDmode)
sc->args[i].simd_array
= create_tmp_simd_array ("mask", base_type, sc->simdlen);
else if (veclen < sc->simdlen)
sc->args[i].simd_array
= create_tmp_simd_array ("mask", adj.type, sc->simdlen / veclen);
else
sc->args[i].simd_array = NULL_TREE;
}
sc->args[i].orig_type = base_type;
sc->args[i].arg_type = SIMD_CLONE_ARG_TYPE_MASK;
}
if (node->definition)
{
ipa_param_body_adjustments *adjustments
= new ipa_param_body_adjustments (new_params, node->decl);
adjustments->modify_formal_parameters ();
return adjustments;
}
else
{
tree new_arg_types = NULL_TREE, new_reversed;
bool last_parm_void = false;
if (args.length () > 0 && args.last () == void_type_node)
last_parm_void = true;
gcc_assert (TYPE_ARG_TYPES (TREE_TYPE (node->decl)));
j = vec_safe_length (new_params);
for (i = 0; i < j; i++)
{
struct ipa_adjusted_param *adj = &(*new_params)[i];
tree ptype;
if (adj->op == IPA_PARAM_OP_COPY)
ptype = args[adj->base_index];
else
ptype = adj->type;
new_arg_types = tree_cons (NULL_TREE, ptype, new_arg_types);
}
new_reversed = nreverse (new_arg_types);
if (last_parm_void)
{
if (new_reversed)
TREE_CHAIN (new_arg_types) = void_list_node;
else
new_reversed = void_list_node;
}
TYPE_ARG_TYPES (TREE_TYPE (node->decl)) = new_reversed;
return NULL;
}
}
/* Initialize and copy the function arguments in NODE to their
corresponding local simd arrays. Returns a fresh gimple_seq with
the instruction sequence generated. */
static gimple_seq
simd_clone_init_simd_arrays (struct cgraph_node *node,
ipa_param_body_adjustments *adjustments)
{
gimple_seq seq = NULL;
unsigned i = 0, j = 0, k;
for (tree arg = DECL_ARGUMENTS (node->decl);
arg;
arg = DECL_CHAIN (arg), i++, j++)
{
if ((*adjustments->m_adj_params)[j].op == IPA_PARAM_OP_COPY
|| POINTER_TYPE_P (TREE_TYPE (arg)))
continue;
node->simdclone->args[i].vector_arg = arg;
tree array = node->simdclone->args[i].simd_array;
if (node->simdclone->mask_mode != VOIDmode
&& node->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_MASK)
{
if (array == NULL_TREE)
continue;
unsigned int l
= tree_to_uhwi (TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (array))));
for (k = 0; k <= l; k++)
{
if (k)
{
arg = DECL_CHAIN (arg);
j++;
}
tree t = build4 (ARRAY_REF, TREE_TYPE (TREE_TYPE (array)),
array, size_int (k), NULL, NULL);
t = build2 (MODIFY_EXPR, TREE_TYPE (t), t, arg);
gimplify_and_add (t, &seq);
}
continue;
}
if (simd_clone_subparts (TREE_TYPE (arg)) == node->simdclone->simdlen)
{
tree ptype = build_pointer_type (TREE_TYPE (TREE_TYPE (array)));
tree ptr = build_fold_addr_expr (array);
tree t = build2 (MEM_REF, TREE_TYPE (arg), ptr,
build_int_cst (ptype, 0));
t = build2 (MODIFY_EXPR, TREE_TYPE (t), t, arg);
gimplify_and_add (t, &seq);
}
else
{
unsigned int simdlen = simd_clone_subparts (TREE_TYPE (arg));
tree ptype = build_pointer_type (TREE_TYPE (TREE_TYPE (array)));
for (k = 0; k < node->simdclone->simdlen; k += simdlen)
{
tree ptr = build_fold_addr_expr (array);
int elemsize;
if (k)
{
arg = DECL_CHAIN (arg);
j++;
}
tree elemtype = TREE_TYPE (TREE_TYPE (arg));
elemsize = GET_MODE_SIZE (SCALAR_TYPE_MODE (elemtype));
tree t = build2 (MEM_REF, TREE_TYPE (arg), ptr,
build_int_cst (ptype, k * elemsize));
t = build2 (MODIFY_EXPR, TREE_TYPE (t), t, arg);
gimplify_and_add (t, &seq);
}
}
}
return seq;
}
/* Callback info for ipa_simd_modify_stmt_ops below. */
struct modify_stmt_info {
ipa_param_body_adjustments *adjustments;
gimple *stmt;
gimple *after_stmt;
/* True if the parent statement was modified by
ipa_simd_modify_stmt_ops. */
bool modified;
};
/* Callback for walk_gimple_op.
Adjust operands from a given statement as specified in the
adjustments vector in the callback data. */
static tree
ipa_simd_modify_stmt_ops (tree *tp, int *walk_subtrees, void *data)
{
struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
struct modify_stmt_info *info = (struct modify_stmt_info *) wi->info;
tree *orig_tp = tp;
if (TREE_CODE (*tp) == ADDR_EXPR)
tp = &TREE_OPERAND (*tp, 0);
if (TREE_CODE (*tp) == BIT_FIELD_REF
|| TREE_CODE (*tp) == IMAGPART_EXPR
|| TREE_CODE (*tp) == REALPART_EXPR)
tp = &TREE_OPERAND (*tp, 0);
tree repl = NULL_TREE;
ipa_param_body_replacement *pbr = NULL;
if (TREE_CODE (*tp) == PARM_DECL)
{
pbr = info->adjustments->get_expr_replacement (*tp, true);
if (pbr)
repl = pbr->repl;
}
else if (TYPE_P (*tp))
*walk_subtrees = 0;
if (repl)
repl = unshare_expr (repl);
else
{
if (tp != orig_tp)
{
*walk_subtrees = 0;
bool modified = info->modified;
info->modified = false;
walk_tree (tp, ipa_simd_modify_stmt_ops, wi, wi->pset);
if (!info->modified)
{
info->modified = modified;
return NULL_TREE;
}
info->modified = modified;
repl = *tp;
}
else
return NULL_TREE;
}
if (tp != orig_tp)
{
if (gimple_code (info->stmt) == GIMPLE_PHI
&& pbr
&& TREE_CODE (*orig_tp) == ADDR_EXPR
&& TREE_CODE (TREE_OPERAND (*orig_tp, 0)) == PARM_DECL
&& pbr->dummy)
{
gcc_assert (TREE_CODE (pbr->dummy) == SSA_NAME);
*orig_tp = pbr->dummy;
info->modified = true;
return NULL_TREE;
}
repl = build_fold_addr_expr (repl);
gimple *stmt;
if (is_gimple_debug (info->stmt))
{
tree vexpr = make_node (DEBUG_EXPR_DECL);
stmt = gimple_build_debug_source_bind (vexpr, repl, NULL);
DECL_ARTIFICIAL (vexpr) = 1;
TREE_TYPE (vexpr) = TREE_TYPE (repl);
SET_DECL_MODE (vexpr, TYPE_MODE (TREE_TYPE (repl)));
repl = vexpr;
}
else
{
stmt = gimple_build_assign (make_ssa_name (TREE_TYPE (repl)), repl);
repl = gimple_assign_lhs (stmt);
}
gimple_stmt_iterator gsi;
if (gimple_code (info->stmt) == GIMPLE_PHI)
{
if (info->after_stmt)
gsi = gsi_for_stmt (info->after_stmt);
else
gsi = gsi_after_labels (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
/* Cache SSA_NAME for next time. */
if (pbr
&& TREE_CODE (*orig_tp) == ADDR_EXPR
&& TREE_CODE (TREE_OPERAND (*orig_tp, 0)) == PARM_DECL)
{
gcc_assert (!pbr->dummy);
pbr->dummy = repl;
}
}
else
gsi = gsi_for_stmt (info->stmt);
if (info->after_stmt)
gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
else
gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
if (gimple_code (info->stmt) == GIMPLE_PHI)
info->after_stmt = stmt;
*orig_tp = repl;
}
else if (!useless_type_conversion_p (TREE_TYPE (*tp), TREE_TYPE (repl)))
{
tree vce = build1 (VIEW_CONVERT_EXPR, TREE_TYPE (*tp), repl);
*tp = vce;
}
else
*tp = repl;
info->modified = true;
return NULL_TREE;
}
/* Traverse the function body and perform all modifications as
described in ADJUSTMENTS. At function return, ADJUSTMENTS will be
modified such that the replacement/reduction value will now be an
offset into the corresponding simd_array.
This function will replace all function argument uses with their
corresponding simd array elements, and ajust the return values
accordingly. */
static void
ipa_simd_modify_function_body (struct cgraph_node *node,
ipa_param_body_adjustments *adjustments,
tree retval_array, tree iter)
{
basic_block bb;
unsigned int i, j;
/* Register replacements for every function argument use to an offset into
the corresponding simd_array. */
for (i = 0, j = 0; i < node->simdclone->nargs; ++i, ++j)
{
if (!node->simdclone->args[i].vector_arg
|| (*adjustments->m_adj_params)[j].user_flag)
continue;
tree basetype = TREE_TYPE (node->simdclone->args[i].orig_arg);
tree vectype = TREE_TYPE (node->simdclone->args[i].vector_arg);
tree r = build4 (ARRAY_REF, basetype, node->simdclone->args[i].simd_array,
iter, NULL_TREE, NULL_TREE);
adjustments->register_replacement (&(*adjustments->m_adj_params)[j], r);
if (simd_clone_subparts (vectype) < node->simdclone->simdlen)
j += node->simdclone->simdlen / simd_clone_subparts (vectype) - 1;
}
tree name;
FOR_EACH_SSA_NAME (i, name, cfun)
{
tree base_var;
if (SSA_NAME_VAR (name)
&& TREE_CODE (SSA_NAME_VAR (name)) == PARM_DECL
&& (base_var
= adjustments->get_replacement_ssa_base (SSA_NAME_VAR (name))))
{
if (SSA_NAME_IS_DEFAULT_DEF (name))
{
tree old_decl = SSA_NAME_VAR (name);
bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
gimple_stmt_iterator gsi = gsi_after_labels (bb);
tree repl = adjustments->lookup_replacement (old_decl, 0);
gcc_checking_assert (repl);
repl = unshare_expr (repl);
set_ssa_default_def (cfun, old_decl, NULL_TREE);
SET_SSA_NAME_VAR_OR_IDENTIFIER (name, base_var);
SSA_NAME_IS_DEFAULT_DEF (name) = 0;
gimple *stmt = gimple_build_assign (name, repl);
gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
}
else
SET_SSA_NAME_VAR_OR_IDENTIFIER (name, base_var);
}
}
struct modify_stmt_info info;
info.adjustments = adjustments;
FOR_EACH_BB_FN (bb, DECL_STRUCT_FUNCTION (node->decl))
{
gimple_stmt_iterator gsi;
for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
gphi *phi = as_a <gphi *> (gsi_stmt (gsi));
int i, n = gimple_phi_num_args (phi);
info.stmt = phi;
info.after_stmt = NULL;
struct walk_stmt_info wi;
memset (&wi, 0, sizeof (wi));
info.modified = false;
wi.info = &info;
for (i = 0; i < n; ++i)
{
int walk_subtrees = 1;
tree arg = gimple_phi_arg_def (phi, i);
tree op = arg;
ipa_simd_modify_stmt_ops (&op, &walk_subtrees, &wi);
if (op != arg)
{
SET_PHI_ARG_DEF (phi, i, op);
gcc_assert (TREE_CODE (op) == SSA_NAME);
if (gimple_phi_arg_edge (phi, i)->flags & EDGE_ABNORMAL)
SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op) = 1;
}
}
}
gsi = gsi_start_bb (bb);
while (!gsi_end_p (gsi))
{
gimple *stmt = gsi_stmt (gsi);
info.stmt = stmt;
info.after_stmt = NULL;
struct walk_stmt_info wi;
memset (&wi, 0, sizeof (wi));
info.modified = false;
wi.info = &info;
walk_gimple_op (stmt, ipa_simd_modify_stmt_ops, &wi);
if (greturn *return_stmt = dyn_cast <greturn *> (stmt))
{
tree retval = gimple_return_retval (return_stmt);
edge e = find_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun));
e->flags |= EDGE_FALLTHRU;
if (!retval)
{
gsi_remove (&gsi, true);
continue;
}
/* Replace `return foo' with `retval_array[iter] = foo'. */
tree ref = build4 (ARRAY_REF, TREE_TYPE (retval),
retval_array, iter, NULL, NULL);
stmt = gimple_build_assign (ref, retval);
gsi_replace (&gsi, stmt, true);
info.modified = true;
}
if (info.modified)
{
update_stmt (stmt);
/* If the above changed the var of a debug bind into something
different, remove the debug stmt. We could also for all the
replaced parameters add VAR_DECLs for debug info purposes,
add debug stmts for those to be the simd array accesses and
replace debug stmt var operand with that var. Debugging of
vectorized loops doesn't work too well, so don't bother for
now. */
if ((gimple_debug_bind_p (stmt)
&& !DECL_P (gimple_debug_bind_get_var (stmt)))
|| (gimple_debug_source_bind_p (stmt)
&& !DECL_P (gimple_debug_source_bind_get_var (stmt))))
{
gsi_remove (&gsi, true);
continue;
}
if (maybe_clean_eh_stmt (stmt))
gimple_purge_dead_eh_edges (gimple_bb (stmt));
}
gsi_next (&gsi);
}
}
}
/* Helper function of simd_clone_adjust, return linear step addend
of Ith argument. */
static tree
simd_clone_linear_addend (struct cgraph_node *node, unsigned int i,
tree addtype, basic_block entry_bb)
{
tree ptype = NULL_TREE;
switch (node->simdclone->args[i].arg_type)
{
case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP:
case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP:
case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP:
case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP:
return build_int_cst (addtype, node->simdclone->args[i].linear_step);
case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP:
case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP:
ptype = TREE_TYPE (node->simdclone->args[i].orig_arg);
break;
case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP:
case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP:
ptype = TREE_TYPE (TREE_TYPE (node->simdclone->args[i].orig_arg));
break;
default:
gcc_unreachable ();
}
unsigned int idx = node->simdclone->args[i].linear_step;
tree arg = node->simdclone->args[idx].orig_arg;
gcc_assert (is_gimple_reg_type (TREE_TYPE (arg)));
gimple_stmt_iterator gsi = gsi_after_labels (entry_bb);
gimple *g;
tree ret;
if (is_gimple_reg (arg))
ret = get_or_create_ssa_default_def (cfun, arg);
else
{
g = gimple_build_assign (make_ssa_name (TREE_TYPE (arg)), arg);
gsi_insert_before (&gsi, g, GSI_SAME_STMT);
ret = gimple_assign_lhs (g);
}
if (TREE_CODE (TREE_TYPE (arg)) == REFERENCE_TYPE)
{
g = gimple_build_assign (make_ssa_name (TREE_TYPE (TREE_TYPE (arg))),
build_simple_mem_ref (ret));
gsi_insert_before (&gsi, g, GSI_SAME_STMT);
ret = gimple_assign_lhs (g);
}
if (!useless_type_conversion_p (addtype, TREE_TYPE (ret)))
{
g = gimple_build_assign (make_ssa_name (addtype), NOP_EXPR, ret);
gsi_insert_before (&gsi, g, GSI_SAME_STMT);
ret = gimple_assign_lhs (g);
}
if (POINTER_TYPE_P (ptype))
{
tree size = TYPE_SIZE_UNIT (TREE_TYPE (ptype));
if (size && TREE_CODE (size) == INTEGER_CST)
{
g = gimple_build_assign (make_ssa_name (addtype), MULT_EXPR,
ret, fold_convert (addtype, size));
gsi_insert_before (&gsi, g, GSI_SAME_STMT);
ret = gimple_assign_lhs (g);
}
}
return ret;
}
/* Adjust the argument types in NODE to their appropriate vector
counterparts. */
static void
simd_clone_adjust (struct cgraph_node *node)
{
push_cfun (DECL_STRUCT_FUNCTION (node->decl));
TREE_TYPE (node->decl) = build_distinct_type_copy (TREE_TYPE (node->decl));
targetm.simd_clone.adjust (node);
tree retval = simd_clone_adjust_return_type (node);
ipa_param_body_adjustments *adjustments
= simd_clone_adjust_argument_types (node);
gcc_assert (adjustments);
push_gimplify_context ();
gimple_seq seq = simd_clone_init_simd_arrays (node, adjustments);
/* Adjust all uses of vector arguments accordingly. Adjust all
return values accordingly. */
tree iter = create_tmp_var (unsigned_type_node, "iter");
tree iter1 = make_ssa_name (iter);
tree iter2 = NULL_TREE;
ipa_simd_modify_function_body (node, adjustments, retval, iter1);
delete adjustments;
/* Initialize the iteration variable. */
basic_block entry_bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
basic_block body_bb = split_block_after_labels (entry_bb)->dest;
gimple_stmt_iterator gsi = gsi_after_labels (entry_bb);
/* Insert the SIMD array and iv initialization at function
entry. */
gsi_insert_seq_before (&gsi, seq, GSI_NEW_STMT);
pop_gimplify_context (NULL);
gimple *g;
basic_block incr_bb = NULL;
class loop *loop = NULL;
/* Create a new BB right before the original exit BB, to hold the
iteration increment and the condition/branch. */
if (EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds))
{
basic_block orig_exit = EDGE_PRED (EXIT_BLOCK_PTR_FOR_FN (cfun), 0)->src;
incr_bb = create_empty_bb (orig_exit);
incr_bb->count = profile_count::zero ();
add_bb_to_loop (incr_bb, body_bb->loop_father);
while (EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds))
{
edge e = EDGE_PRED (EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
redirect_edge_succ (e, incr_bb);
incr_bb->count += e->count ();
}
}
else if (node->simdclone->inbranch)
{
incr_bb = create_empty_bb (entry_bb);
incr_bb->count = profile_count::zero ();
add_bb_to_loop (incr_bb, body_bb->loop_father);
}
if (incr_bb)
{
make_single_succ_edge (incr_bb, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
gsi = gsi_last_bb (incr_bb);
iter2 = make_ssa_name (iter);
g = gimple_build_assign (iter2, PLUS_EXPR, iter1,
build_int_cst (unsigned_type_node, 1));
gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
/* Mostly annotate the loop for the vectorizer (the rest is done
below). */
loop = alloc_loop ();
cfun->has_force_vectorize_loops = true;
loop->safelen = node->simdclone->simdlen;
loop->force_vectorize = true;
loop->header = body_bb;
}
/* Branch around the body if the mask applies. */
if (node->simdclone->inbranch)
{
gsi = gsi_last_bb (loop->header);
tree mask_array
= node->simdclone->args[node->simdclone->nargs - 1].simd_array;
tree mask;
if (node->simdclone->mask_mode != VOIDmode)
{
tree shift_cnt;
if (mask_array == NULL_TREE)
{
tree arg = node->simdclone->args[node->simdclone->nargs
- 1].vector_arg;
mask = get_or_create_ssa_default_def (cfun, arg);
shift_cnt = iter1;
}
else
{
tree maskt = TREE_TYPE (mask_array);
int c = tree_to_uhwi (TYPE_MAX_VALUE (TYPE_DOMAIN (maskt)));
c = node->simdclone->simdlen / (c + 1);
int s = exact_log2 (c);
gcc_assert (s > 0);
c--;
tree idx = make_ssa_name (TREE_TYPE (iter1));
g = gimple_build_assign (idx, RSHIFT_EXPR, iter1,
build_int_cst (NULL_TREE, s));
gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
mask = make_ssa_name (TREE_TYPE (TREE_TYPE (mask_array)));
tree aref = build4 (ARRAY_REF,
TREE_TYPE (TREE_TYPE (mask_array)),
mask_array, idx, NULL, NULL);
g = gimple_build_assign (mask, aref);
gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
shift_cnt = make_ssa_name (TREE_TYPE (iter1));
g = gimple_build_assign (shift_cnt, BIT_AND_EXPR, iter1,
build_int_cst (TREE_TYPE (iter1), c));
gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
}
g = gimple_build_assign (make_ssa_name (TREE_TYPE (mask)),
RSHIFT_EXPR, mask, shift_cnt);
gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
mask = gimple_assign_lhs (g);
g = gimple_build_assign (make_ssa_name (TREE_TYPE (mask)),
BIT_AND_EXPR, mask,
build_int_cst (TREE_TYPE (mask), 1));
gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
mask = gimple_assign_lhs (g);
}
else
{
mask = make_ssa_name (TREE_TYPE (TREE_TYPE (mask_array)));
tree aref = build4 (ARRAY_REF,
TREE_TYPE (TREE_TYPE (mask_array)),
mask_array, iter1, NULL, NULL);
g = gimple_build_assign (mask, aref);
gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
int bitsize = GET_MODE_BITSIZE (SCALAR_TYPE_MODE (TREE_TYPE (aref)));
if (!INTEGRAL_TYPE_P (TREE_TYPE (aref)))
{
aref = build1 (VIEW_CONVERT_EXPR,
build_nonstandard_integer_type (bitsize, 0),
mask);
mask = make_ssa_name (TREE_TYPE (aref));
g = gimple_build_assign (mask, aref);
gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
}
}
g = gimple_build_cond (EQ_EXPR, mask, build_zero_cst (TREE_TYPE (mask)),
NULL, NULL);
gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
edge e = make_edge (loop->header, incr_bb, EDGE_TRUE_VALUE);
e->probability = profile_probability::unlikely ().guessed ();
incr_bb->count += e->count ();
edge fallthru = FALLTHRU_EDGE (loop->header);
fallthru->flags = EDGE_FALSE_VALUE;
fallthru->probability = profile_probability::likely ().guessed ();
}
basic_block latch_bb = NULL;
basic_block new_exit_bb = NULL;
/* Generate the condition. */
if (incr_bb)
{
gsi = gsi_last_bb (incr_bb);
g = gimple_build_cond (LT_EXPR, iter2,
build_int_cst (unsigned_type_node,
node->simdclone->simdlen),
NULL, NULL);
gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
edge e = split_block (incr_bb, gsi_stmt (gsi));
latch_bb = e->dest;
new_exit_bb = split_block_after_labels (latch_bb)->dest;
loop->latch = latch_bb;
redirect_edge_succ (FALLTHRU_EDGE (latch_bb), body_bb);
edge new_e = make_edge (incr_bb, new_exit_bb, EDGE_FALSE_VALUE);
/* FIXME: Do we need to distribute probabilities for the conditional? */
new_e->probability = profile_probability::guessed_never ();
/* The successor of incr_bb is already pointing to latch_bb; just
change the flags.
make_edge (incr_bb, latch_bb, EDGE_TRUE_VALUE); */
FALLTHRU_EDGE (incr_bb)->flags = EDGE_TRUE_VALUE;
}
gphi *phi = create_phi_node (iter1, body_bb);
edge preheader_edge = find_edge (entry_bb, body_bb);
edge latch_edge = NULL;
add_phi_arg (phi, build_zero_cst (unsigned_type_node), preheader_edge,
UNKNOWN_LOCATION);
if (incr_bb)
{
latch_edge = single_succ_edge (latch_bb);
add_phi_arg (phi, iter2, latch_edge, UNKNOWN_LOCATION);
/* Generate the new return. */
gsi = gsi_last_bb (new_exit_bb);
if (retval
&& TREE_CODE (retval) == VIEW_CONVERT_EXPR
&& TREE_CODE (TREE_OPERAND (retval, 0)) == RESULT_DECL)
retval = TREE_OPERAND (retval, 0);
else if (retval)
{
retval = build1 (VIEW_CONVERT_EXPR,
TREE_TYPE (TREE_TYPE (node->decl)),
retval);
retval = force_gimple_operand_gsi (&gsi, retval, true, NULL,
false, GSI_CONTINUE_LINKING);
}
g = gimple_build_return (retval);
gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
}
/* Handle aligned clauses by replacing default defs of the aligned
uniform args with __builtin_assume_aligned (arg_N(D), alignment)
lhs. Handle linear by adding PHIs. */
for (unsigned i = 0; i < node->simdclone->nargs; i++)
if (node->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_UNIFORM
&& (TREE_ADDRESSABLE (node->simdclone->args[i].orig_arg)
|| !is_gimple_reg_type
(TREE_TYPE (node->simdclone->args[i].orig_arg))))
{
tree orig_arg = node->simdclone->args[i].orig_arg;
if (is_gimple_reg_type (TREE_TYPE (orig_arg)))
iter1 = make_ssa_name (TREE_TYPE (orig_arg));
else
{
iter1 = create_tmp_var_raw (TREE_TYPE (orig_arg));
gimple_add_tmp_var (iter1);
}
gsi = gsi_after_labels (entry_bb);
g = gimple_build_assign (iter1, orig_arg);
gsi_insert_before (&gsi, g, GSI_NEW_STMT);
gsi = gsi_after_labels (body_bb);
g = gimple_build_assign (orig_arg, iter1);
gsi_insert_before (&gsi, g, GSI_NEW_STMT);
}
else if (node->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_UNIFORM
&& DECL_BY_REFERENCE (node->simdclone->args[i].orig_arg)
&& TREE_CODE (TREE_TYPE (node->simdclone->args[i].orig_arg))
== REFERENCE_TYPE
&& TREE_ADDRESSABLE
(TREE_TYPE (TREE_TYPE (node->simdclone->args[i].orig_arg))))
{
tree orig_arg = node->simdclone->args[i].orig_arg;
tree def = ssa_default_def (cfun, orig_arg);
if (def && !has_zero_uses (def))
{
iter1 = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (orig_arg)));
gimple_add_tmp_var (iter1);
gsi = gsi_after_labels (entry_bb);
g = gimple_build_assign (iter1, build_simple_mem_ref (def));
gsi_insert_before (&gsi, g, GSI_NEW_STMT);
gsi = gsi_after_labels (body_bb);
g = gimple_build_assign (build_simple_mem_ref (def), iter1);
gsi_insert_before (&gsi, g, GSI_NEW_STMT);
}
}
else if (node->simdclone->args[i].alignment
&& node->simdclone->args[i].arg_type
== SIMD_CLONE_ARG_TYPE_UNIFORM
&& (node->simdclone->args[i].alignment
& (node->simdclone->args[i].alignment - 1)) == 0
&& TREE_CODE (TREE_TYPE (node->simdclone->args[i].orig_arg))
== POINTER_TYPE)
{
unsigned int alignment = node->simdclone->args[i].alignment;
tree orig_arg = node->simdclone->args[i].orig_arg;
tree def = ssa_default_def (cfun, orig_arg);
if (def && !has_zero_uses (def))
{
tree fn = builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED);
gimple_seq seq = NULL;
bool need_cvt = false;
gcall *call
= gimple_build_call (fn, 2, def, size_int (alignment));
g = call;
if (!useless_type_conversion_p (TREE_TYPE (orig_arg),
ptr_type_node))
need_cvt = true;
tree t = make_ssa_name (need_cvt ? ptr_type_node : orig_arg);
gimple_call_set_lhs (g, t);
gimple_seq_add_stmt_without_update (&seq, g);
if (need_cvt)
{
t = make_ssa_name (orig_arg);
g = gimple_build_assign (t, NOP_EXPR, gimple_call_lhs (g));
gimple_seq_add_stmt_without_update (&seq, g);
}
gsi_insert_seq_on_edge_immediate
(single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun)), seq);
entry_bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
node->create_edge (cgraph_node::get_create (fn),
call, entry_bb->count);
imm_use_iterator iter;
use_operand_p use_p;
gimple *use_stmt;
tree repl = gimple_get_lhs (g);
FOR_EACH_IMM_USE_STMT (use_stmt, iter, def)
if (is_gimple_debug (use_stmt) || use_stmt == call)
continue;
else
FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
SET_USE (use_p, repl);
}
}
else if ((node->simdclone->args[i].arg_type
== SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP)
|| (node->simdclone->args[i].arg_type
== SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP)
|| (node->simdclone->args[i].arg_type
== SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP)
|| (node->simdclone->args[i].arg_type
== SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP))
{
tree orig_arg = node->simdclone->args[i].orig_arg;
gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (orig_arg))
|| POINTER_TYPE_P (TREE_TYPE (orig_arg)));
tree def = NULL_TREE;
if (TREE_ADDRESSABLE (orig_arg))
{
def = make_ssa_name (TREE_TYPE (orig_arg));
iter1 = make_ssa_name (TREE_TYPE (orig_arg));
if (incr_bb)
iter2 = make_ssa_name (TREE_TYPE (orig_arg));
gsi = gsi_after_labels (entry_bb);
g = gimple_build_assign (def, orig_arg);
gsi_insert_before (&gsi, g, GSI_NEW_STMT);
}
else
{
def = ssa_default_def (cfun, orig_arg);
if (!def || has_zero_uses (def))
def = NULL_TREE;
else
{
iter1 = make_ssa_name (orig_arg);
if (incr_bb)
iter2 = make_ssa_name (orig_arg);
}
}
if (def)
{
phi = create_phi_node (iter1, body_bb);
add_phi_arg (phi, def, preheader_edge, UNKNOWN_LOCATION);
if (incr_bb)
{
add_phi_arg (phi, iter2, latch_edge, UNKNOWN_LOCATION);
enum tree_code code = INTEGRAL_TYPE_P (TREE_TYPE (orig_arg))
? PLUS_EXPR : POINTER_PLUS_EXPR;
tree addtype = INTEGRAL_TYPE_P (TREE_TYPE (orig_arg))
? TREE_TYPE (orig_arg) : sizetype;
tree addcst = simd_clone_linear_addend (node, i, addtype,
entry_bb);
gsi = gsi_last_bb (incr_bb);
g = gimple_build_assign (iter2, code, iter1, addcst);
gsi_insert_before (&gsi, g, GSI_SAME_STMT);
}
imm_use_iterator iter;
use_operand_p use_p;
gimple *use_stmt;
if (TREE_ADDRESSABLE (orig_arg))
{
gsi = gsi_after_labels (body_bb);
g = gimple_build_assign (orig_arg, iter1);
gsi_insert_before (&gsi, g, GSI_NEW_STMT);
}
else
FOR_EACH_IMM_USE_STMT (use_stmt, iter, def)
if (use_stmt == phi)
continue;
else
FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
SET_USE (use_p, iter1);
}
}
else if (node->simdclone->args[i].arg_type
== SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP
|| (node->simdclone->args[i].arg_type
== SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP))
{
tree orig_arg = node->simdclone->args[i].orig_arg;
tree def = ssa_default_def (cfun, orig_arg);
gcc_assert (!TREE_ADDRESSABLE (orig_arg)
&& TREE_CODE (TREE_TYPE (orig_arg)) == REFERENCE_TYPE);
if (def && !has_zero_uses (def))
{
tree rtype = TREE_TYPE (TREE_TYPE (orig_arg));
iter1 = make_ssa_name (orig_arg);
if (incr_bb)
iter2 = make_ssa_name (orig_arg);
tree iter3 = make_ssa_name (rtype);
tree iter4 = make_ssa_name (rtype);
tree iter5 = incr_bb ? make_ssa_name (rtype) : NULL_TREE;
gsi = gsi_after_labels (entry_bb);
gimple *load
= gimple_build_assign (iter3, build_simple_mem_ref (def));
gsi_insert_before (&gsi, load, GSI_NEW_STMT);
tree array = node->simdclone->args[i].simd_array;
TREE_ADDRESSABLE (array) = 1;
tree ptr = build_fold_addr_expr (array);
phi = create_phi_node (iter1, body_bb);
add_phi_arg (phi, ptr, preheader_edge, UNKNOWN_LOCATION);
if (incr_bb)
{
add_phi_arg (phi, iter2, latch_edge, UNKNOWN_LOCATION);
g = gimple_build_assign (iter2, POINTER_PLUS_EXPR, iter1,
TYPE_SIZE_UNIT (TREE_TYPE (iter3)));
gsi = gsi_last_bb (incr_bb);
gsi_insert_before (&gsi, g, GSI_SAME_STMT);
}
phi = create_phi_node (iter4, body_bb);
add_phi_arg (phi, iter3, preheader_edge, UNKNOWN_LOCATION);
if (incr_bb)
{
add_phi_arg (phi, iter5, latch_edge, UNKNOWN_LOCATION);
enum tree_code code = INTEGRAL_TYPE_P (TREE_TYPE (iter3))
? PLUS_EXPR : POINTER_PLUS_EXPR;
tree addtype = INTEGRAL_TYPE_P (TREE_TYPE (iter3))
? TREE_TYPE (iter3) : sizetype;
tree addcst = simd_clone_linear_addend (node, i, addtype,
entry_bb);
g = gimple_build_assign (iter5, code, iter4, addcst);
gsi = gsi_last_bb (incr_bb);
gsi_insert_before (&gsi, g, GSI_SAME_STMT);
}
g = gimple_build_assign (build_simple_mem_ref (iter1), iter4);
gsi = gsi_after_labels (body_bb);
gsi_insert_before (&gsi, g, GSI_SAME_STMT);
imm_use_iterator iter;
use_operand_p use_p;
gimple *use_stmt;
FOR_EACH_IMM_USE_STMT (use_stmt, iter, def)
if (use_stmt == load)
continue;
else
FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
SET_USE (use_p, iter1);
if (!TYPE_READONLY (rtype) && incr_bb)
{
tree v = make_ssa_name (rtype);
tree aref = build4 (ARRAY_REF, rtype, array,
size_zero_node, NULL_TREE,
NULL_TREE);
gsi = gsi_after_labels (new_exit_bb);
g = gimple_build_assign (v, aref);
gsi_insert_before (&gsi, g, GSI_SAME_STMT);
g = gimple_build_assign (build_simple_mem_ref (def), v);
gsi_insert_before (&gsi, g, GSI_SAME_STMT);
}
}
}
calculate_dominance_info (CDI_DOMINATORS);
if (loop)
add_loop (loop, loop->header->loop_father);
update_ssa (TODO_update_ssa);
pop_cfun ();
}
/* If the function in NODE is tagged as an elemental SIMD function,
create the appropriate SIMD clones. */
void
expand_simd_clones (struct cgraph_node *node)
{
tree attr = lookup_attribute ("omp declare simd",
DECL_ATTRIBUTES (node->decl));
if (attr == NULL_TREE
|| node->inlined_to
|| lookup_attribute ("noclone", DECL_ATTRIBUTES (node->decl)))
return;
/* Ignore
#pragma omp declare simd
extern int foo ();
in C, there we don't know the argument types at all. */
if (!node->definition
&& TYPE_ARG_TYPES (TREE_TYPE (node->decl)) == NULL_TREE)
return;
/* Call this before creating clone_info, as it might ggc_collect. */
if (node->definition && node->has_gimple_body_p ())
node->get_body ();
do
{
/* Start with parsing the "omp declare simd" attribute(s). */
bool inbranch_clause_specified;
struct cgraph_simd_clone *clone_info
= simd_clone_clauses_extract (node, TREE_VALUE (attr),
&inbranch_clause_specified);
if (clone_info == NULL)
continue;
int orig_simdlen = clone_info->simdlen;
tree base_type = simd_clone_compute_base_data_type (node, clone_info);
/* The target can return 0 (no simd clones should be created),
1 (just one ISA of simd clones should be created) or higher
count of ISA variants. In that case, clone_info is initialized
for the first ISA variant. */
int count
= targetm.simd_clone.compute_vecsize_and_simdlen (node, clone_info,
base_type, 0);
if (count == 0)
continue;
/* Loop over all COUNT ISA variants, and if !INBRANCH_CLAUSE_SPECIFIED,
also create one inbranch and one !inbranch clone of it. */
for (int i = 0; i < count * 2; i++)
{
struct cgraph_simd_clone *clone = clone_info;
if (inbranch_clause_specified && (i & 1) != 0)
continue;
if (i != 0)
{
clone = simd_clone_struct_alloc (clone_info->nargs
+ ((i & 1) != 0));
simd_clone_struct_copy (clone, clone_info);
/* Undo changes targetm.simd_clone.compute_vecsize_and_simdlen
and simd_clone_adjust_argument_types did to the first
clone's info. */
clone->nargs -= clone_info->inbranch;
clone->simdlen = orig_simdlen;
/* And call the target hook again to get the right ISA. */
targetm.simd_clone.compute_vecsize_and_simdlen (node, clone,
base_type,
i / 2);
if ((i & 1) != 0)
clone->inbranch = 1;
}
/* simd_clone_mangle might fail if such a clone has been created
already. */
tree id = simd_clone_mangle (node, clone);
if (id == NULL_TREE)
{
if (i == 0)
clone->nargs += clone->inbranch;
continue;
}
/* Only when we are sure we want to create the clone actually
clone the function (or definitions) or create another
extern FUNCTION_DECL (for prototypes without definitions). */
struct cgraph_node *n = simd_clone_create (node);
if (n == NULL)
{
if (i == 0)
clone->nargs += clone->inbranch;
continue;
}
n->simdclone = clone;
clone->origin = node;
clone->next_clone = NULL;
if (node->simd_clones == NULL)
{
clone->prev_clone = n;
node->simd_clones = n;
}
else
{
clone->prev_clone = node->simd_clones->simdclone->prev_clone;
clone->prev_clone->simdclone->next_clone = n;
node->simd_clones->simdclone->prev_clone = n;
}
symtab->change_decl_assembler_name (n->decl, id);
/* And finally adjust the return type, parameters and for
definitions also function body. */
if (node->definition)
simd_clone_adjust (n);
else
{
TREE_TYPE (n->decl)
= build_distinct_type_copy (TREE_TYPE (n->decl));
targetm.simd_clone.adjust (n);
simd_clone_adjust_return_type (n);
simd_clone_adjust_argument_types (n);
}
}
}
while ((attr = lookup_attribute ("omp declare simd", TREE_CHAIN (attr))));
}
/* Entry point for IPA simd clone creation pass. */
static unsigned int
ipa_omp_simd_clone (void)
{
struct cgraph_node *node;
FOR_EACH_FUNCTION (node)
expand_simd_clones (node);
return 0;
}
namespace {
const pass_data pass_data_omp_simd_clone =
{
SIMPLE_IPA_PASS, /* type */
"simdclone", /* name */
OPTGROUP_OMP, /* optinfo_flags */
TV_NONE, /* tv_id */
( PROP_ssa | PROP_cfg ), /* properties_required */
0, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
0, /* todo_flags_finish */
};
class pass_omp_simd_clone : public simple_ipa_opt_pass
{
public:
pass_omp_simd_clone(gcc::context *ctxt)
: simple_ipa_opt_pass(pass_data_omp_simd_clone, ctxt)
{}
/* opt_pass methods: */
virtual bool gate (function *);
virtual unsigned int execute (function *) { return ipa_omp_simd_clone (); }
};
bool
pass_omp_simd_clone::gate (function *)
{
return targetm.simd_clone.compute_vecsize_and_simdlen != NULL;
}
} // anon namespace
simple_ipa_opt_pass *
make_pass_omp_simd_clone (gcc::context *ctxt)
{
return new pass_omp_simd_clone (ctxt);
}
|
create_thread.c | #include <stdio.h>
#include "omp.h"
/*
Basado en el tutorial:
http://openmp.org/mp-documents/omp-hands-on-SC08.pdf
*/
void print_square(int n);
void main(){
omp_set_num_threads(8);
#pragma omp parallel
{
int id = omp_get_thread_num();
print_square(id);
}
}
void print_square(int n){
printf("%d\n", n*n);
}
|
critical.c | /*
* critical.c -- Archer testcase
*/
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
//
// See tools/archer/LICENSE.txt for details.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// RUN: %libarcher-compile-and-run | FileCheck %s
// REQUIRES: tsan
#include <omp.h>
#include <stdio.h>
int main(int argc, char *argv[]) {
int var = 0;
#pragma omp parallel num_threads(8) shared(var)
{
#pragma omp critical
{ var++; }
}
fprintf(stderr, "DONE\n");
int error = (var != 8);
return error;
}
// CHECK-NOT: ThreadSanitizer: data race
// CHECK-NOT: ThreadSanitizer: reported
// CHECK: DONE
|
thread_info.h | // -----------------------------------------------------------------------------
//
// Copyright (C) 2021 CERN & Newcastle University for the benefit of the
// BioDynaMo collaboration. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
//
// See the LICENSE file distributed with this work for details.
// See the NOTICE file distributed with this work for additional information
// regarding copyright ownership.
//
// -----------------------------------------------------------------------------
#ifndef CORE_UTIL_THREAD_INFO_H_
#define CORE_UTIL_THREAD_INFO_H_
#include <omp.h>
#include <sched.h>
#include <atomic>
#include <vector>
#include "core/util/log.h"
#include "core/util/numa.h"
namespace bdm {
/// \brief This class stores information about each thread. (e.g. to which NUMA
/// node it belongs to.)
/// NB: Threads **must** be bound to CPUs using `OMP_PROC_BIND=true`.
class ThreadInfo {
public:
static ThreadInfo* GetInstance() {
static ThreadInfo kInstance;
return &kInstance;
}
// FIXME add test
int GetMyThreadId() const { return omp_get_thread_num(); }
// FIXME add test
int GetMyNumaNode() const { return GetNumaNode(GetMyThreadId()); }
/// Return the numa thread id of an openmp thread.
int GetMyNumaThreadId() const { return GetNumaThreadId(GetMyThreadId()); }
/// Returns the number of NUMA nodes on this machine
int GetNumaNodes() const { return numa_nodes_; }
/// Returns the numa node the given openmp thread is bound to.
int GetNumaNode(int omp_thread_id) const {
return thread_numa_mapping_[omp_thread_id];
}
/// Returns the number of threads in a given NUMA node.
int GetThreadsInNumaNode(int numa_node) const {
return threads_in_numa_[numa_node];
}
/// Return the numa thread id of an openmp thread.
int GetNumaThreadId(int omp_thread_id) const {
return numa_thread_id_[omp_thread_id];
}
/// Return the maximum number of threads.
int GetMaxThreads() const { return max_threads_; }
/// Returns a unique thread id even for parallel regions that
/// don't use OpenMP.
uint64_t GetUniversalThreadId() const;
uint64_t GetMaxUniversalThreadId() const { return thread_counter_; }
/// Renews the metadata.\n
/// Whenever a thread is scheduled on a different cpu, e.g. using
/// `numa_run_on_node`, `Renew()` must be called to update the thread
/// metadata.
void Renew() {
max_threads_ = omp_get_max_threads();
numa_nodes_ = numa_num_configured_nodes();
thread_numa_mapping_.clear();
numa_thread_id_.clear();
threads_in_numa_.clear();
thread_numa_mapping_.resize(max_threads_, 0);
numa_thread_id_.resize(max_threads_, 0);
threads_in_numa_.resize(numa_nodes_, 0);
// (openmp thread id -> numa node)
#pragma omp parallel
{
int tid = omp_get_thread_num();
thread_numa_mapping_[tid] = numa_node_of_cpu(sched_getcpu());
}
// (numa -> number of associated threads), and
// (omp_thread_id -> thread id in numa)
for (uint16_t n = 0; n < numa_nodes_; n++) {
uint64_t cnt = 0;
for (uint64_t t = 0; t < max_threads_; t++) {
int numa = thread_numa_mapping_[t];
if (n == numa) {
numa_thread_id_[t] = cnt;
cnt++;
}
}
threads_in_numa_[n] = cnt;
}
}
friend std::ostream& operator<<(std::ostream& str, const ThreadInfo& ti) {
str << "max_threads\t\t: " << ti.max_threads_
<< "\nnum_numa nodes\t\t: " << ti.numa_nodes_;
str << "\nthread to numa mapping\t: ";
for (auto& el : ti.thread_numa_mapping_) {
str << el << " ";
}
str << "\nthread id in numa node\t: ";
for (auto& el : ti.numa_thread_id_) {
str << el << " ";
}
str << "\nnum threads per numa\t: ";
for (auto& el : ti.threads_in_numa_) {
str << el << " ";
}
str << "\n";
return str;
}
private:
static std::atomic<uint64_t> thread_counter_;
/// Maximum number of threads for this simulation.
uint64_t max_threads_;
/// Number of NUMA nodes on this machine.
uint16_t numa_nodes_;
/// Contains the mapping thread id -> numa node \n
/// vector position = omp_thread_id \n
/// vector value = numa node
std::vector<int> thread_numa_mapping_;
/// Contains the mapping omp_thread_id -> numa thread id \n
/// each thread in a numa domain has a unique id in the range 0 to number \n
/// of threads in this numa domain
std::vector<int> numa_thread_id_;
/// Contains the mapping numa node -> total number of threads in this numa
/// node \n
/// vector position: numa node \n
/// vector value number of threads
std::vector<int> threads_in_numa_;
ThreadInfo() {
auto proc_bind = omp_get_proc_bind();
if (proc_bind != 1 && proc_bind != 4) {
// 4 corresponds to OMP_PROC_BIND=spread
// Due to some reason some OpenMP implementations set proc bind to spread
// even though OMP_PROC_BIND is set to true.
// A performance analysis showed almost identical results between true,
// and spread.
Log::Warning(
"ThreadInfo::ThreadInfo",
"The environment variable OMP_PROC_BIND must be set to "
"true prior to running BioDynaMo ('export OMP_PROC_BIND=true')");
}
Renew();
}
};
} // namespace bdm
#endif // CORE_UTIL_THREAD_INFO_H_
|
3d7pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 32;
tile_size[1] = 32;
tile_size[2] = 16;
tile_size[3] = 256;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,16);t1++) {
lbp=max(ceild(t1,2),ceild(32*t1-Nt+3,32));
ubp=min(floord(Nt+Nz-4,32),floord(16*t1+Nz+13,32));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(32*t2-Nz-12,16)),t1);t3<=min(min(min(floord(Nt+Ny-4,16),floord(16*t1+Ny+29,16)),floord(32*t2+Ny+28,16)),floord(32*t1-32*t2+Nz+Ny+27,16));t3++) {
for (t4=max(max(max(0,ceild(t1-15,16)),ceild(32*t2-Nz-252,256)),ceild(16*t3-Ny-252,256));t4<=min(min(min(min(floord(Nt+Nx-4,256),floord(16*t1+Nx+29,256)),floord(32*t2+Nx+28,256)),floord(16*t3+Nx+12,256)),floord(32*t1-32*t2+Nz+Nx+27,256));t4++) {
for (t5=max(max(max(max(max(0,16*t1),32*t1-32*t2+1),32*t2-Nz+2),16*t3-Ny+2),256*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,16*t1+31),32*t2+30),16*t3+14),256*t4+254),32*t1-32*t2+Nz+29);t5++) {
for (t6=max(max(32*t2,t5+1),-32*t1+32*t2+2*t5-31);t6<=min(min(32*t2+31,-32*t1+32*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(16*t3,t5+1);t7<=min(16*t3+15,t5+Ny-2);t7++) {
lbv=max(256*t4,t5+1);
ubv=min(256*t4+255,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
8537.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4000. */
#include "covariance.h"
/* Array initialization. */
static
void init_array (int m, int n,
DATA_TYPE *float_n,
DATA_TYPE POLYBENCH_2D(data,M,N,m,n))
{
int i, j;
*float_n = 1.2;
for (i = 0; i < M; i++)
for (j = 0; j < N; j++)
data[i][j] = ((DATA_TYPE) i*j) / M;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int m,
DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m))
{
int i, j;
for (i = 0; i < m; i++)
for (j = 0; j < m; j++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, symmat[i][j]);
if ((i * m + j) % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_covariance(int m, int n,
DATA_TYPE float_n,
DATA_TYPE POLYBENCH_2D(data,M,N,m,n),
DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m),
DATA_TYPE POLYBENCH_1D(mean,M,m))
{
int i, j, j1, j2;
#pragma scop
/* Determine mean of column vectors of input data matrix */
{
#pragma omp parallel for schedule(static, 28) num_threads(28)
for (j = 0; j < _PB_M; j++)
{
mean[j] = 0.0;
for (i = 0; i < _PB_N; i++)
mean[j] += data[i][j];
mean[j] /= float_n;
}
/* Center the column vectors. */
#pragma omp parallel for schedule(static, 28) num_threads(28)
for (i = 0; i < _PB_N; i++)
{
for (j = 0; j < _PB_M; j++)
{
data[i][j] -= mean[j];
}
}
/* Calculate the m * m covariance matrix. */
#pragma omp parallel for schedule(static, 28) num_threads(28)
for (j1 = 0; j1 < _PB_M; j1++)
{
for (j2 = j1; j2 < _PB_M; j2++)
{
symmat[j1][j2] = 0.0;
for (i = 0; i < _PB_N; i++)
symmat[j1][j2] += data[i][j1] * data[i][j2];
symmat[j2][j1] = symmat[j1][j2];
}
}
}
#pragma endscop
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int n = N;
int m = M;
/* Variable declaration/allocation. */
DATA_TYPE float_n;
POLYBENCH_2D_ARRAY_DECL(data,DATA_TYPE,M,N,m,n);
POLYBENCH_2D_ARRAY_DECL(symmat,DATA_TYPE,M,M,m,m);
POLYBENCH_1D_ARRAY_DECL(mean,DATA_TYPE,M,m);
/* Initialize array(s). */
init_array (m, n, &float_n, POLYBENCH_ARRAY(data));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_covariance (m, n, float_n,
POLYBENCH_ARRAY(data),
POLYBENCH_ARRAY(symmat),
POLYBENCH_ARRAY(mean));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(m, POLYBENCH_ARRAY(symmat)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(data);
POLYBENCH_FREE_ARRAY(symmat);
POLYBENCH_FREE_ARRAY(mean);
return 0;
}
|
cloud.c |
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include "cloud.h"
#include "verlet.h"
#if defined(_OPENMP)
#include <omp.h>
#endif
#define PI 3.1415926535897932384626433832795L
int safe_malloc (size_t count, void *out, const char * file, const char * fn, int line)
{
if (!count) {
*((void **) out) = NULL;
return 0;
}
*((void **) out) = malloc (count);
if (!(*((void **) out))) {
fprintf (stderr, "%s, %s (%d): failed to malloc %zu bytes\n", file, fn, line, count);
return 1;
}
return 0;
}
#define safeMALLOC(count,out) safe_malloc (count, out, __FILE__, __func__, __LINE__)
#define CHK(Q) if (Q) return Q
static void
compute_hamiltonian (int Np, real_t *H,
const real_t *x, const real_t *y, const real_t *z,
const real_t *u, const real_t *v, const real_t *w)
{
for (int i = 0; i < Np; i++) {
H[i] = 0.5 * (u[i]*u[i] + v[i]*v[i] + w[i]*w[i]) /* kinetic energy */
- 1. / sqrt (x[i]*x[i] + y[i]*y[i] + z[i]*z[i]); /* potential energy */
}
}
static int
write_step (int Np, int k, const char *basename, const real_t *x, const real_t *y, const real_t *z, const real_t *H)
{
char outname[BUFSIZ];
FILE *fp;
snprintf (outname, BUFSIZ-1, "%s_%d.vtk", basename, k);
fp = fopen (outname, "w");
if (!fp) {
fprintf (stderr, "unable to open %s for output\n", outname);
return 1;
}
fprintf (fp, "# vtk DataFile Version 2.0\n");
fprintf (fp, "Point cloud example\n");
fprintf (fp, "ASCII\n");
fprintf (fp, "DATASET POLYDATA\n");
fprintf (fp, "POINTS %d FLOAT\n", Np);
for (int i = 0; i < Np; i++) {
fprintf (fp, "%f %f %f\n", (float) x[i], (float) y[i], (float) z[i]);
}
fprintf (fp, "\nPOINT_DATA %d\n", Np);
fprintf (fp, "SCALARS Hamiltonian float\n");
fprintf (fp, "LOOKUP_TABLE default\n");
for (int i = 0; i < Np; i++) {
fprintf (fp, "%f\n", (float) H[i]);
}
fclose (fp);
return 0;
}
int
main (int argc, char **argv)
{
int Np, Nt, err;
int Nint;
real_t dt;
real_t *x, *y, *z, *u, *v, *w;
real_t *Hin, *Hout;
const char *basename = NULL;
#if defined(_OPENMP)
double time_start, time_end, time_diff;
#endif
if (argc < 4 || argc > 6) {
printf ("Usage: %s NUM_POINTS NUM_STEPS DT [NCHUNK OUTPUT_BASENAME]\n", argv[0]);
return 1;
}
Np = atoi (argv[1]);
Nt = atoi (argv[2]);
dt = (real_t) atof (argv[3]);
if (argc >= 5) {
Nint = atoi (argv[4]);
if (argc == 6) {
basename = argv[5];
}
}
else {
Nint = 1;
}
printf ("%s, NUM_POINTS=%d, NUM_STEPS=%d, DT=%g, NCHUNK=%d\n", argv[0], Np, Nt, dt, Nint);
err = safeMALLOC (Np * sizeof (real_t), &x);CHK(err);
err = safeMALLOC (Np * sizeof (real_t), &y);CHK(err);
err = safeMALLOC (Np * sizeof (real_t), &z);CHK(err);
err = safeMALLOC (Np * sizeof (real_t), &u);CHK(err);
err = safeMALLOC (Np * sizeof (real_t), &v);CHK(err);
err = safeMALLOC (Np * sizeof (real_t), &w);CHK(err);
err = safeMALLOC (Np * sizeof (real_t), &Hin);CHK(err);
err = safeMALLOC (Np * sizeof (real_t), &Hout);CHK(err);
#pragma omp parallel for schedule(static)
for (int i = 0; i < Np; i++) {
x[i] = 0.;
y[i] = 0.;
z[i] = 0.;
u[i] = 0.;
v[i] = 0.;
w[i] = 0.;
}
/* make some orbits that are visually appealing */
for (int i = 0; i < Np; i++) {
real_t e, p, theta, inc, Omega, omicr;
real_t r, x_hat, y_hat, u_hat, v_hat;
real_t sininc, cosinc, sinOmg, cosOmg, sinomc, cosomc;
real_t T[3][2];
/* create a random orbit (without too much eccentricity, and not too close
* to the center, and at a random phase in its period) */
e = 0.5 * ((real_t) rand() / (real_t) RAND_MAX);
p = 0.5 + ((real_t) rand() / (real_t) RAND_MAX);
theta = 2. * PI * ((real_t) rand() / (real_t) RAND_MAX);
r = p / (1. + e * cos (theta));
x_hat = r * cos (theta);
y_hat = r * sin (theta);
u_hat = - sqrt (1. / p) * sin (theta);
v_hat = sqrt (1. / p) * (e + cos (theta));
/* randomly orient the orient relative to the reference coordinates
* https://en.wikipedia.org/wiki/Orbital_elements#Euler_angle_transformations */
inc = 2. * PI * ((real_t) rand() / (real_t) RAND_MAX);
Omega = 2. * PI * ((real_t) rand() / (real_t) RAND_MAX);
omicr = 2. * PI * ((real_t) rand() / (real_t) RAND_MAX);
sininc = sin (inc);
cosinc = cos (inc);
sinOmg = sin (Omega);
cosOmg = cos (Omega);
sinomc = sin (omicr);
cosomc = cos (omicr);
T[0][0] = cosOmg * cosomc - sinOmg * sinomc * cosinc;
T[0][1] = sinOmg * cosomc + cosOmg * sinomc * cosinc;
T[1][0] = - cosOmg * sinomc - sinOmg * cosomc * cosinc;
T[1][1] = - sinOmg * sinomc + cosOmg * cosomc * cosinc;
T[2][0] = cosOmg * sininc;
T[2][1] = - sinOmg * sininc;
x[i] = T[0][0] * x_hat + T[0][1] * y_hat;
y[i] = T[1][0] * x_hat + T[1][1] * y_hat;
z[i] = T[2][0] * x_hat + T[2][1] * y_hat;
u[i] = T[0][0] * u_hat + T[0][1] * v_hat;
v[i] = T[1][0] * u_hat + T[1][1] * v_hat;
w[i] = T[2][0] * u_hat + T[2][1] * v_hat;
Hin[i] = Hout[i] = 0.5 * (u[i]*u[i] + v[i]*v[i] + w[i]*w[i]) /* kinetic energy */
- 1. / sqrt (x[i]*x[i] + y[i]*y[i] + z[i]*z[i]); /* potential energy */
}
compute_hamiltonian (Np, Hin, x, y, z, u, v, w);
memcpy (Hout, Hin, Np * sizeof (real_t));
#if defined (_OPENMP)
time_start = omp_get_wtime();
#endif
#pragma omp parallel
{
int num_threads, my_thread;
int my_start, my_end;
int my_N;
#if defined(_OPENMP)
my_thread = omp_get_thread_num();
num_threads = omp_get_num_threads();
#else
my_thread = 0;
num_threads = 1;
#endif
/* get thread intervals */
my_start = ((size_t) my_thread * (size_t) Np) / (size_t) num_threads;
my_end = ((size_t) (my_thread + 1) * (size_t) Np) / (size_t) num_threads;
my_N = my_end - my_start;
#if 0
#pragma omp critical
{
printf ("[%d/%d]: [%d, %d)\n", my_thread, num_threads, my_start, my_end);
}
#endif
for (int s = 0; s < Nt; s += Nint) {
#pragma omp master
if (basename) {write_step (Np, s / Nint, basename, x, y, z, Hout);}
/* execute the loop */
verlet_step (my_N, Nint, dt, &x[my_start], &y[my_start], &z[my_start],
&u[my_start], &v[my_start], &w[my_start]);
#if 0
compute_hamiltonian (Np, Hout, x, y, z, u, v, w);
#endif
}
}
#if defined (_OPENMP)
time_end = omp_get_wtime();
time_diff = time_end - time_start;
printf ("[%s]: %e elapsed seconds\n", argv[0], time_diff);
printf ("[%s]: %e particle time steps per second\n", argv[0], (size_t) Np * (size_t) Nt / time_diff);
printf ("[%s]: %e particle time step chunks per second\n", argv[0], (size_t) Np * (size_t) Nt / (Nint * time_diff));
#endif
#if 0
printf ("Hamiltonian relative error:\n");
for (int i = 0; i < Np; i++) {
printf ("[%d]: %g\n", i, fabs (Hout[i] - Hin[i]) / fabs (Hin[i]));
}
#endif
free (Hout);
free (Hin);
free (w);
free (v);
free (u);
free (z);
free (y);
free (x);
return 0;
}
|
geopm_sched.c | /*
* Copyright (c) 2015, 2016, 2017, 2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY LOG OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#ifdef __APPLE__
#define _DARWIN_C_SOURCE
#include <sys/types.h>
#include <sys/sysctl.h>
#endif
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <stdint.h>
#include <unistd.h>
#include <pthread.h>
#include <errno.h>
#include <string.h>
#include <signal.h>
#include "geopm_sched.h"
#include "geopm_error.h"
#include "config.h"
#ifdef _OPENMP
#include <omp.h>
#endif
static volatile unsigned g_is_popen_complete = 0;
static struct sigaction g_popen_complete_signal_action;
static void geopm_sched_popen_complete(int signum)
{
if (signum == SIGCHLD) {
g_is_popen_complete = 1;
}
}
int geopm_sched_popen(const char *cmd, FILE **fid)
{
int err = 0;
*fid = NULL;
struct sigaction save_action;
g_popen_complete_signal_action.sa_handler = geopm_sched_popen_complete;
sigemptyset(&g_popen_complete_signal_action.sa_mask);
g_popen_complete_signal_action.sa_flags = 0;
err = sigaction(SIGCHLD, &g_popen_complete_signal_action, &save_action);
if (!err) {
*fid = popen(cmd, "r");
while (*fid && !g_is_popen_complete) {
}
g_is_popen_complete = 0;
sigaction(SIGCHLD, &save_action, NULL);
}
if (!err && *fid == NULL) {
err = errno ? errno : GEOPM_ERROR_RUNTIME;
}
return err;
}
#ifndef __APPLE__
int geopm_sched_num_cpu(void)
{
return sysconf(_SC_NPROCESSORS_CONF);
}
int geopm_sched_get_cpu(void)
{
return sched_getcpu();
}
static pthread_once_t g_proc_cpuset_once = PTHREAD_ONCE_INIT;
static cpu_set_t *g_proc_cpuset = NULL;
static size_t g_proc_cpuset_size = 0;
/* If /proc/self/status is usable and correct then parse this file to
determine the process affinity. */
#ifdef GEOPM_PROCFS
int geopm_sched_proc_cpuset_helper(int num_cpu, uint32_t *proc_cpuset, FILE *fid)
{
const char *key = "Cpus_allowed:";
const size_t key_len = strlen(key);
const int num_read = num_cpu / 32 + (num_cpu % 32 ? 1 : 0);
int err = 0;
char *line = NULL;
size_t line_len = 0;
int read_idx = 0;
while ((getline(&line, &line_len, fid)) != -1) {
if (strncmp(line, key, key_len) == 0) {
char *line_ptr = line + key_len;
/* On some systems we have seen the mask padded with zeros
beyond the number of online CPUs. Deal with this by
skipping extra leading 32 bit masks */
int num_comma = 0;
char *comma_ptr = line_ptr;
while ((comma_ptr = strchr(comma_ptr, ','))) {
++comma_ptr;
++num_comma;
}
if (num_comma > num_read - 1) {
num_comma -= num_read - 1;
for (int i = 0; !err && i < num_comma; ++i) {
line_ptr = strchr(line_ptr, ',');
if (!line_ptr) {
err = GEOPM_ERROR_LOGIC;
}
else {
++line_ptr;
}
}
}
for (read_idx = num_read - 1; !err && read_idx >= 0; --read_idx) {
int num_match = sscanf(line_ptr, "%x", proc_cpuset + read_idx);
if (num_match != 1) {
err = GEOPM_ERROR_RUNTIME;
}
else {
line_ptr = strchr(line_ptr, ',');
if (read_idx != 0 && line_ptr == NULL) {
err = GEOPM_ERROR_RUNTIME;
}
else {
++line_ptr;
}
}
}
}
}
if (line) {
free(line);
}
if (read_idx != -1) {
err = GEOPM_ERROR_RUNTIME;
}
return err;
}
static void geopm_proc_cpuset_once(void)
{
const char *status_path = "/proc/self/status";
const int num_cpu = geopm_sched_num_cpu();
const int num_read = num_cpu / 32 + (num_cpu % 32 ? 1 : 0);
int err = 0;
uint32_t *proc_cpuset = NULL;
FILE *fid = NULL;
g_proc_cpuset = CPU_ALLOC(num_cpu);
if (g_proc_cpuset == NULL) {
err = ENOMEM;
}
if (!err) {
g_proc_cpuset_size = CPU_ALLOC_SIZE(num_cpu);
proc_cpuset = calloc(num_read, sizeof(*proc_cpuset));
if (proc_cpuset == NULL) {
err = ENOMEM;
}
}
if (!err) {
fid = fopen(status_path, "r");
if (!fid) {
err = errno ? errno : GEOPM_ERROR_RUNTIME;
}
}
if (!err) {
err = geopm_sched_proc_cpuset_helper(num_cpu, proc_cpuset, fid);
fclose(fid);
}
if (!err) {
memcpy(g_proc_cpuset, proc_cpuset, g_proc_cpuset_size);
}
else if (g_proc_cpuset) {
for (int i = 0; i < num_cpu; ++i) {
CPU_SET_S(i, g_proc_cpuset_size, g_proc_cpuset);
}
}
if (proc_cpuset) {
free(proc_cpuset);
}
}
/* If /proc/self/status is not available spawn a pthread requesting an
open affinity mask and then have the thread query the affinity mask
enforced by the OS using sched_getaffinity(). */
#else /* GEOPM_PROCFS */
static void *geopm_proc_cpuset_pthread(void *arg)
{
void *result = NULL;
int err = sched_getaffinity(0, g_proc_cpuset_size, g_proc_cpuset);
if (err) {
result = (void *)(size_t)(errno ? errno : GEOPM_ERROR_RUNTIME);
}
return result;
}
static void geopm_proc_cpuset_once(void)
{
int err = 0;
int num_cpu = geopm_sched_num_cpu();
pthread_t tid;
pthread_attr_t attr;
g_proc_cpuset = CPU_ALLOC(num_cpu);
if (g_proc_cpuset == NULL) {
err = ENOMEM;
}
if (!err) {
g_proc_cpuset_size = CPU_ALLOC_SIZE(num_cpu);
for (int i = 0; i < num_cpu; ++i) {
CPU_SET_S(i, g_proc_cpuset_size, g_proc_cpuset);
}
err = pthread_attr_init(&attr);
}
if (!err) {
err = pthread_attr_setaffinity_np(&attr, g_proc_cpuset_size, g_proc_cpuset);
}
if (!err) {
err = pthread_create(&tid, &attr, geopm_proc_cpuset_pthread, NULL);
}
if (!err) {
void *result = NULL;
err = pthread_join(tid, &result);
if (!err && result) {
err = (int)(size_t)result;
}
}
if (err && err != ENOMEM) {
for (int i = 0; i < num_cpu; ++i) {
CPU_SET_S(i, g_proc_cpuset_size, g_proc_cpuset);
}
}
if (!err) {
err = pthread_attr_destroy(&attr);
}
}
#endif /* GEOPM_PROCFS */
int geopm_sched_proc_cpuset(int num_cpu, cpu_set_t *proc_cpuset)
{
int err = pthread_once(&g_proc_cpuset_once, geopm_proc_cpuset_once);
int sched_num_cpu = geopm_sched_num_cpu();
size_t cpuset_size = CPU_ALLOC_SIZE(num_cpu);
if (!err && cpuset_size < g_proc_cpuset_size) {
err = GEOPM_ERROR_INVALID;
}
if (!err) {
memcpy(proc_cpuset, g_proc_cpuset, g_proc_cpuset_size);
for (int i = sched_num_cpu; i < num_cpu; ++i) {
CPU_CLR_S(i, cpuset_size, proc_cpuset);
}
}
return err;
}
int geopm_sched_woomp(int num_cpu, cpu_set_t *woomp)
{
/*! @brief Function that returns a cpuset that has bits set for
all CPUs enabled for the process which are not used by
OpenMP. Rather than returning an empty mask, if all
CPUs allocated for the process are used by OpenMP, then
the woomp mask will have all bits set. */
int err = pthread_once(&g_proc_cpuset_once, geopm_proc_cpuset_once);
int sched_num_cpu = geopm_sched_num_cpu();
size_t req_alloc_size = CPU_ALLOC_SIZE(num_cpu);
if (!err && !g_proc_cpuset) {
err = ENOMEM;
}
if (!err && req_alloc_size < g_proc_cpuset_size) {
err = EINVAL;
}
if (!err) {
/* Copy the process CPU mask into the output. */
memcpy(woomp, g_proc_cpuset, g_proc_cpuset_size);
/* Start an OpenMP parallel region and have each thread clear
its bit from the mask. */
#ifdef _OPENMP
#pragma omp parallel default(shared)
{
#pragma omp critical
{
int cpu_index = sched_getcpu();
if (cpu_index != -1 && cpu_index < num_cpu) {
/* Clear the bit for this OpenMP thread's CPU. */
CPU_CLR_S(cpu_index, g_proc_cpuset_size, woomp);
}
else {
err = errno ? errno : GEOPM_ERROR_LOGIC;
}
} /* end pragma omp critical */
} /* end pragma omp parallel */
#endif /* _OPENMP */
}
if (!err) {
for (int i = sched_num_cpu; i < num_cpu; ++i) {
CPU_CLR_S(i, req_alloc_size, woomp);
}
}
if (err || CPU_COUNT_S(g_proc_cpuset_size, woomp) == 0) {
/* If all CPUs are used by the OpenMP gang, then leave the
mask open and allow the Linux scheduler to choose. */
for (int i = 0; i < num_cpu; ++i) {
CPU_SET_S(i, g_proc_cpuset_size, woomp);
}
}
return err;
}
#else /* __APPLE__ */
void __cpuid(uint32_t*, int);
int geopm_sched_num_cpu(void)
{
uint32_t result = 1;
size_t len = sizeof(result);
sysctl((int[2]) {CTL_HW, HW_NCPU}, 2, &result, &len, NULL, 0);
return result;
}
int geopm_sched_get_cpu(void)
{
int result = -1;
uint32_t cpu_info[4];
__cpuid(cpu_info, 1);
// Check APIC
if (cpu_info[3] & (1 << 9)) {
result = (int)(cpu_info[1] >> 24);
}
return result;
}
// On Mac OS just fill in all bits for the cpuset for both the process
// mask and woomp to get the tests passing.
static void geopm_cpuset_fill(int num_cpu, cpu_set_t *proc_cpuset)
{
size_t cpuset_size = CPU_ALLOC_SIZE(num_cpu);
for (int i = 0; i < num_cpu; ++i) {
CPU_SET_S(i, cpuset_size, proc_cpuset);
}
}
int geopm_sched_proc_cpuset(int num_cpu, cpu_set_t *proc_cpuset)
{
geopm_cpuset_fill(num_cpu, proc_cpuset);
return 0;
}
int geopm_sched_woomp(int num_cpu, cpu_set_t *woomp)
{
geopm_cpuset_fill(num_cpu, woomp);
return 0;
}
#endif /* __APPLE__ */
|
GB_unop__identity_uint8_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_uint8_fp64)
// op(A') function: GB (_unop_tran__identity_uint8_fp64)
// C type: uint8_t
// A type: double
// cast: uint8_t cij = GB_cast_to_uint8_t ((double) (aij))
// unaryop: cij = aij
#define GB_ATYPE \
double
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint8_t z = GB_cast_to_uint8_t ((double) (aij)) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint8_t z = GB_cast_to_uint8_t ((double) (aij)) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT8 || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_uint8_fp64)
(
uint8_t *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
uint8_t z = GB_cast_to_uint8_t ((double) (aij)) ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
uint8_t z = GB_cast_to_uint8_t ((double) (aij)) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_uint8_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
morphology.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M M OOO RRRR PPPP H H OOO L OOO GGGG Y Y %
% MM MM O O R R P P H H O O L O O G Y Y %
% M M M O O RRRR PPPP HHHHH O O L O O G GGG Y %
% M M O O R R P H H O O L O O G G Y %
% M M OOO R R P H H OOO LLLLL OOO GGG Y %
% %
% %
% MagickCore Morphology Methods %
% %
% Software Design %
% Anthony Thyssen %
% January 2010 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Morphology is the application of various kernels, of any size or shape, to an
% image in various ways (typically binary, but not always).
%
% Convolution (weighted sum or average) is just one specific type of
% morphology. Just one that is very common for image bluring and sharpening
% effects. Not only 2D Gaussian blurring, but also 2-pass 1D Blurring.
%
% This module provides not only a general morphology function, and the ability
% to apply more advanced or iterative morphologies, but also functions for the
% generation of many different types of kernel arrays from user supplied
% arguments. Prehaps even the generation of a kernel from a small image.
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/cache-view.h"
#include "magick/color-private.h"
#include "magick/channel.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/hashmap.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/memory-private.h"
#include "magick/monitor-private.h"
#include "magick/morphology.h"
#include "magick/morphology-private.h"
#include "magick/option.h"
#include "magick/pixel-private.h"
#include "magick/prepress.h"
#include "magick/quantize.h"
#include "magick/registry.h"
#include "magick/resource_.h"
#include "magick/semaphore.h"
#include "magick/splay-tree.h"
#include "magick/statistic.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#include "magick/token.h"
#include "magick/utility.h"
/*
Other global definitions used by module.
*/
#define Minimize(assign,value) assign=MagickMin(assign,value)
#define Maximize(assign,value) assign=MagickMax(assign,value)
/* Integer Factorial Function - for a Binomial kernel */
#if 1
static inline size_t fact(size_t n)
{
size_t l,f;
for(f=1, l=2; l <= n; f=f*l, l++);
return(f);
}
#elif 1 /* glibc floating point alternatives */
#define fact(n) ((size_t)tgamma((double)n+1))
#else
#define fact(n) ((size_t)lgamma((double)n+1))
#endif
/* Currently these are only internal to this module */
static void
CalcKernelMetaData(KernelInfo *),
ExpandMirrorKernelInfo(KernelInfo *),
ExpandRotateKernelInfo(KernelInfo *, const double),
RotateKernelInfo(KernelInfo *, double);
/* Quick function to find last kernel in a kernel list */
static inline KernelInfo *LastKernelInfo(KernelInfo *kernel)
{
while (kernel->next != (KernelInfo *) NULL)
kernel=kernel->next;
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireKernelInfo() takes the given string (generally supplied by the
% user) and converts it into a Morphology/Convolution Kernel. This allows
% users to specify a kernel from a number of pre-defined kernels, or to fully
% specify their own kernel for a specific Convolution or Morphology
% Operation.
%
% The kernel so generated can be any rectangular array of floating point
% values (doubles) with the 'control point' or 'pixel being affected'
% anywhere within that array of values.
%
% Previously IM was restricted to a square of odd size using the exact
% center as origin, this is no longer the case, and any rectangular kernel
% with any value being declared the origin. This in turn allows the use of
% highly asymmetrical kernels.
%
% The floating point values in the kernel can also include a special value
% known as 'nan' or 'not a number' to indicate that this value is not part
% of the kernel array. This allows you to shaped the kernel within its
% rectangular area. That is 'nan' values provide a 'mask' for the kernel
% shape. However at least one non-nan value must be provided for correct
% working of a kernel.
%
% The returned kernel should be freed using the DestroyKernelInfo method
% when you are finished with it. Do not free this memory yourself.
%
% Input kernel defintion strings can consist of any of three types.
%
% "name:args[[@><]"
% Select from one of the built in kernels, using the name and
% geometry arguments supplied. See AcquireKernelBuiltIn()
%
% "WxH[+X+Y][@><]:num, num, num ..."
% a kernel of size W by H, with W*H floating point numbers following.
% the 'center' can be optionally be defined at +X+Y (such that +0+0
% is top left corner). If not defined the pixel in the center, for
% odd sizes, or to the immediate top or left of center for even sizes
% is automatically selected.
%
% "num, num, num, num, ..."
% list of floating point numbers defining an 'old style' odd sized
% square kernel. At least 9 values should be provided for a 3x3
% square kernel, 25 for a 5x5 square kernel, 49 for 7x7, etc.
% Values can be space or comma separated. This is not recommended.
%
% You can define a 'list of kernels' which can be used by some morphology
% operators A list is defined as a semi-colon separated list kernels.
%
% " kernel ; kernel ; kernel ; "
%
% Any extra ';' characters, at start, end or between kernel defintions are
% simply ignored.
%
% The special flags will expand a single kernel, into a list of rotated
% kernels. A '@' flag will expand a 3x3 kernel into a list of 45-degree
% cyclic rotations, while a '>' will generate a list of 90-degree rotations.
% The '<' also exands using 90-degree rotates, but giving a 180-degree
% reflected kernel before the +/- 90-degree rotations, which can be important
% for Thinning operations.
%
% Note that 'name' kernels will start with an alphabetic character while the
% new kernel specification has a ':' character in its specification string.
% If neither is the case, it is assumed an old style of a simple list of
% numbers generating a odd-sized square kernel has been given.
%
% The format of the AcquireKernal method is:
%
% KernelInfo *AcquireKernelInfo(const char *kernel_string)
%
% A description of each parameter follows:
%
% o kernel_string: the Morphology/Convolution kernel wanted.
%
*/
/* This was separated so that it could be used as a separate
** array input handling function, such as for -color-matrix
*/
static KernelInfo *ParseKernelArray(const char *kernel_string)
{
KernelInfo
*kernel;
char
token[MaxTextExtent];
const char
*p,
*end;
register ssize_t
i;
double
nan = sqrt((double)-1.0); /* Special Value : Not A Number */
MagickStatusType
flags;
GeometryInfo
args;
kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel));
if (kernel == (KernelInfo *) NULL)
return(kernel);
(void) memset(kernel,0,sizeof(*kernel));
kernel->minimum = kernel->maximum = kernel->angle = 0.0;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->type = UserDefinedKernel;
kernel->next = (KernelInfo *) NULL;
kernel->signature = MagickCoreSignature;
if (kernel_string == (const char *) NULL)
return(kernel);
/* find end of this specific kernel definition string */
end = strchr(kernel_string, ';');
if ( end == (char *) NULL )
end = strchr(kernel_string, '\0');
/* clear flags - for Expanding kernel lists thorugh rotations */
flags = NoValue;
/* Has a ':' in argument - New user kernel specification
FUTURE: this split on ':' could be done by StringToken()
*/
p = strchr(kernel_string, ':');
if ( p != (char *) NULL && p < end)
{
/* ParseGeometry() needs the geometry separated! -- Arrgghh */
memcpy(token, kernel_string, (size_t) (p-kernel_string));
token[p-kernel_string] = '\0';
SetGeometryInfo(&args);
flags = ParseGeometry(token, &args);
/* Size handling and checks of geometry settings */
if ( (flags & WidthValue) == 0 ) /* if no width then */
args.rho = args.sigma; /* then width = height */
if ( args.rho < 1.0 ) /* if width too small */
args.rho = 1.0; /* then width = 1 */
if ( args.sigma < 1.0 ) /* if height too small */
args.sigma = args.rho; /* then height = width */
kernel->width = (size_t)args.rho;
kernel->height = (size_t)args.sigma;
/* Offset Handling and Checks */
if ( args.xi < 0.0 || args.psi < 0.0 )
return(DestroyKernelInfo(kernel));
kernel->x = ((flags & XValue)!=0) ? (ssize_t)args.xi
: (ssize_t) (kernel->width-1)/2;
kernel->y = ((flags & YValue)!=0) ? (ssize_t)args.psi
: (ssize_t) (kernel->height-1)/2;
if ( kernel->x >= (ssize_t) kernel->width ||
kernel->y >= (ssize_t) kernel->height )
return(DestroyKernelInfo(kernel));
p++; /* advance beyond the ':' */
}
else
{ /* ELSE - Old old specification, forming odd-square kernel */
/* count up number of values given */
p=(const char *) kernel_string;
while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == '\''))
p++; /* ignore "'" chars for convolve filter usage - Cristy */
for (i=0; p < end; i++)
{
GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
GetNextToken(p,&p,MaxTextExtent,token);
}
/* set the size of the kernel - old sized square */
kernel->width = kernel->height= (size_t) sqrt((double) i+1.0);
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
p=(const char *) kernel_string;
while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == '\''))
p++; /* ignore "'" chars for convolve filter usage - Cristy */
}
/* Read in the kernel values from rest of input string argument */
kernel->values=(double *) MagickAssumeAligned(AcquireAlignedMemory(
kernel->width,kernel->height*sizeof(*kernel->values)));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
kernel->minimum=MagickMaximumValue;
kernel->maximum=(-MagickMaximumValue);
kernel->negative_range = kernel->positive_range = 0.0;
for (i=0; (i < (ssize_t) (kernel->width*kernel->height)) && (p < end); i++)
{
GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
GetNextToken(p,&p,MaxTextExtent,token);
if ( LocaleCompare("nan",token) == 0
|| LocaleCompare("-",token) == 0 ) {
kernel->values[i] = nan; /* this value is not part of neighbourhood */
}
else {
kernel->values[i] = StringToDouble(token,(char **) NULL);
( kernel->values[i] < 0)
? ( kernel->negative_range += kernel->values[i] )
: ( kernel->positive_range += kernel->values[i] );
Minimize(kernel->minimum, kernel->values[i]);
Maximize(kernel->maximum, kernel->values[i]);
}
}
/* sanity check -- no more values in kernel definition */
GetNextToken(p,&p,MaxTextExtent,token);
if ( *token != '\0' && *token != ';' && *token != '\'' )
return(DestroyKernelInfo(kernel));
#if 0
/* this was the old method of handling a incomplete kernel */
if ( i < (ssize_t) (kernel->width*kernel->height) ) {
Minimize(kernel->minimum, kernel->values[i]);
Maximize(kernel->maximum, kernel->values[i]);
for ( ; i < (ssize_t) (kernel->width*kernel->height); i++)
kernel->values[i]=0.0;
}
#else
/* Number of values for kernel was not enough - Report Error */
if ( i < (ssize_t) (kernel->width*kernel->height) )
return(DestroyKernelInfo(kernel));
#endif
/* check that we recieved at least one real (non-nan) value! */
if (kernel->minimum == MagickMaximumValue)
return(DestroyKernelInfo(kernel));
if ( (flags & AreaValue) != 0 ) /* '@' symbol in kernel size */
ExpandRotateKernelInfo(kernel, 45.0); /* cyclic rotate 3x3 kernels */
else if ( (flags & GreaterValue) != 0 ) /* '>' symbol in kernel args */
ExpandRotateKernelInfo(kernel, 90.0); /* 90 degree rotate of kernel */
else if ( (flags & LessValue) != 0 ) /* '<' symbol in kernel args */
ExpandMirrorKernelInfo(kernel); /* 90 degree mirror rotate */
return(kernel);
}
static KernelInfo *ParseKernelName(const char *kernel_string)
{
char
token[MaxTextExtent];
const char
*p,
*end;
GeometryInfo
args;
KernelInfo
*kernel;
MagickStatusType
flags;
ssize_t
type;
/* Parse special 'named' kernel */
GetNextToken(kernel_string,&p,MaxTextExtent,token);
type=ParseCommandOption(MagickKernelOptions,MagickFalse,token);
if ( type < 0 || type == UserDefinedKernel )
return((KernelInfo *) NULL); /* not a valid named kernel */
while (((isspace((int) ((unsigned char) *p)) != 0) ||
(*p == ',') || (*p == ':' )) && (*p != '\0') && (*p != ';'))
p++;
end = strchr(p, ';'); /* end of this kernel defintion */
if ( end == (char *) NULL )
end = strchr(p, '\0');
/* ParseGeometry() needs the geometry separated! -- Arrgghh */
memcpy(token, p, (size_t) (end-p));
token[end-p] = '\0';
SetGeometryInfo(&args);
flags = ParseGeometry(token, &args);
#if 0
/* For Debugging Geometry Input */
(void) FormatLocaleFile(stderr, "Geometry = 0x%04X : %lg x %lg %+lg %+lg\n",
flags, args.rho, args.sigma, args.xi, args.psi );
#endif
/* special handling of missing values in input string */
switch( type ) {
/* Shape Kernel Defaults */
case UnityKernel:
if ( (flags & WidthValue) == 0 )
args.rho = 1.0; /* Default scale = 1.0, zero is valid */
break;
case SquareKernel:
case DiamondKernel:
case OctagonKernel:
case DiskKernel:
case PlusKernel:
case CrossKernel:
if ( (flags & HeightValue) == 0 )
args.sigma = 1.0; /* Default scale = 1.0, zero is valid */
break;
case RingKernel:
if ( (flags & XValue) == 0 )
args.xi = 1.0; /* Default scale = 1.0, zero is valid */
break;
case RectangleKernel: /* Rectangle - set size defaults */
if ( (flags & WidthValue) == 0 ) /* if no width then */
args.rho = args.sigma; /* then width = height */
if ( args.rho < 1.0 ) /* if width too small */
args.rho = 3; /* then width = 3 */
if ( args.sigma < 1.0 ) /* if height too small */
args.sigma = args.rho; /* then height = width */
if ( (flags & XValue) == 0 ) /* center offset if not defined */
args.xi = (double)(((ssize_t)args.rho-1)/2);
if ( (flags & YValue) == 0 )
args.psi = (double)(((ssize_t)args.sigma-1)/2);
break;
/* Distance Kernel Defaults */
case ChebyshevKernel:
case ManhattanKernel:
case OctagonalKernel:
case EuclideanKernel:
if ( (flags & HeightValue) == 0 ) /* no distance scale */
args.sigma = 100.0; /* default distance scaling */
else if ( (flags & AspectValue ) != 0 ) /* '!' flag */
args.sigma = QuantumRange/(args.sigma+1); /* maximum pixel distance */
else if ( (flags & PercentValue ) != 0 ) /* '%' flag */
args.sigma *= QuantumRange/100.0; /* percentage of color range */
break;
default:
break;
}
kernel = AcquireKernelBuiltIn((KernelInfoType)type, &args);
if ( kernel == (KernelInfo *) NULL )
return(kernel);
/* global expand to rotated kernel list - only for single kernels */
if ( kernel->next == (KernelInfo *) NULL ) {
if ( (flags & AreaValue) != 0 ) /* '@' symbol in kernel args */
ExpandRotateKernelInfo(kernel, 45.0);
else if ( (flags & GreaterValue) != 0 ) /* '>' symbol in kernel args */
ExpandRotateKernelInfo(kernel, 90.0);
else if ( (flags & LessValue) != 0 ) /* '<' symbol in kernel args */
ExpandMirrorKernelInfo(kernel);
}
return(kernel);
}
MagickExport KernelInfo *AcquireKernelInfo(const char *kernel_string)
{
KernelInfo
*kernel,
*new_kernel;
char
*kernel_cache,
token[MaxTextExtent];
const char
*p;
if (kernel_string == (const char *) NULL)
return(ParseKernelArray(kernel_string));
p=kernel_string;
kernel_cache=(char *) NULL;
if (*kernel_string == '@')
{
ExceptionInfo *exception=AcquireExceptionInfo();
kernel_cache=FileToString(kernel_string+1,~0UL,exception);
exception=DestroyExceptionInfo(exception);
if (kernel_cache == (char *) NULL)
return((KernelInfo *) NULL);
p=(const char *) kernel_cache;
}
kernel=NULL;
while (GetNextToken(p,(const char **) NULL,MaxTextExtent,token), *token != '\0')
{
/* ignore extra or multiple ';' kernel separators */
if (*token != ';')
{
/* tokens starting with alpha is a Named kernel */
if (isalpha((int) ((unsigned char) *token)) != 0)
new_kernel=ParseKernelName(p);
else /* otherwise a user defined kernel array */
new_kernel=ParseKernelArray(p);
/* Error handling -- this is not proper error handling! */
if (new_kernel == (KernelInfo *) NULL)
{
if (kernel != (KernelInfo *) NULL)
kernel=DestroyKernelInfo(kernel);
return((KernelInfo *) NULL);
}
/* initialise or append the kernel list */
if (kernel == (KernelInfo *) NULL)
kernel=new_kernel;
else
LastKernelInfo(kernel)->next=new_kernel;
}
/* look for the next kernel in list */
p=strchr(p,';');
if (p == (char *) NULL)
break;
p++;
}
if (kernel_cache != (char *) NULL)
kernel_cache=DestroyString(kernel_cache);
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A c q u i r e K e r n e l B u i l t I n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireKernelBuiltIn() returned one of the 'named' built-in types of
% kernels used for special purposes such as gaussian blurring, skeleton
% pruning, and edge distance determination.
%
% They take a KernelType, and a set of geometry style arguments, which were
% typically decoded from a user supplied string, or from a more complex
% Morphology Method that was requested.
%
% The format of the AcquireKernalBuiltIn method is:
%
% KernelInfo *AcquireKernelBuiltIn(const KernelInfoType type,
% const GeometryInfo args)
%
% A description of each parameter follows:
%
% o type: the pre-defined type of kernel wanted
%
% o args: arguments defining or modifying the kernel
%
% Convolution Kernels
%
% Unity
% The a No-Op or Scaling single element kernel.
%
% Gaussian:{radius},{sigma}
% Generate a two-dimensional gaussian kernel, as used by -gaussian.
% The sigma for the curve is required. The resulting kernel is
% normalized,
%
% If 'sigma' is zero, you get a single pixel on a field of zeros.
%
% NOTE: that the 'radius' is optional, but if provided can limit (clip)
% the final size of the resulting kernel to a square 2*radius+1 in size.
% The radius should be at least 2 times that of the sigma value, or
% sever clipping and aliasing may result. If not given or set to 0 the
% radius will be determined so as to produce the best minimal error
% result, which is usally much larger than is normally needed.
%
% LoG:{radius},{sigma}
% "Laplacian of a Gaussian" or "Mexician Hat" Kernel.
% The supposed ideal edge detection, zero-summing kernel.
%
% An alturnative to this kernel is to use a "DoG" with a sigma ratio of
% approx 1.6 (according to wikipedia).
%
% DoG:{radius},{sigma1},{sigma2}
% "Difference of Gaussians" Kernel.
% As "Gaussian" but with a gaussian produced by 'sigma2' subtracted
% from the gaussian produced by 'sigma1'. Typically sigma2 > sigma1.
% The result is a zero-summing kernel.
%
% Blur:{radius},{sigma}[,{angle}]
% Generates a 1 dimensional or linear gaussian blur, at the angle given
% (current restricted to orthogonal angles). If a 'radius' is given the
% kernel is clipped to a width of 2*radius+1. Kernel can be rotated
% by a 90 degree angle.
%
% If 'sigma' is zero, you get a single pixel on a field of zeros.
%
% Note that two convolutions with two "Blur" kernels perpendicular to
% each other, is equivalent to a far larger "Gaussian" kernel with the
% same sigma value, However it is much faster to apply. This is how the
% "-blur" operator actually works.
%
% Comet:{width},{sigma},{angle}
% Blur in one direction only, much like how a bright object leaves
% a comet like trail. The Kernel is actually half a gaussian curve,
% Adding two such blurs in opposite directions produces a Blur Kernel.
% Angle can be rotated in multiples of 90 degrees.
%
% Note that the first argument is the width of the kernel and not the
% radius of the kernel.
%
% Binomial:[{radius}]
% Generate a discrete kernel using a 2 dimentional Pascel's Triangle
% of values. Used for special forma of image filters
%
% # Still to be implemented...
% #
% # Filter2D
% # Filter1D
% # Set kernel values using a resize filter, and given scale (sigma)
% # Cylindrical or Linear. Is this possible with an image?
% #
%
% Named Constant Convolution Kernels
%
% All these are unscaled, zero-summing kernels by default. As such for
% non-HDRI version of ImageMagick some form of normalization, user scaling,
% and biasing the results is recommended, to prevent the resulting image
% being 'clipped'.
%
% The 3x3 kernels (most of these) can be circularly rotated in multiples of
% 45 degrees to generate the 8 angled varients of each of the kernels.
%
% Laplacian:{type}
% Discrete Lapacian Kernels, (without normalization)
% Type 0 : 3x3 with center:8 surounded by -1 (8 neighbourhood)
% Type 1 : 3x3 with center:4 edge:-1 corner:0 (4 neighbourhood)
% Type 2 : 3x3 with center:4 edge:1 corner:-2
% Type 3 : 3x3 with center:4 edge:-2 corner:1
% Type 5 : 5x5 laplacian
% Type 7 : 7x7 laplacian
% Type 15 : 5x5 LoG (sigma approx 1.4)
% Type 19 : 9x9 LoG (sigma approx 1.4)
%
% Sobel:{angle}
% Sobel 'Edge' convolution kernel (3x3)
% | -1, 0, 1 |
% | -2, 0, 2 |
% | -1, 0, 1 |
%
% Roberts:{angle}
% Roberts convolution kernel (3x3)
% | 0, 0, 0 |
% | -1, 1, 0 |
% | 0, 0, 0 |
%
% Prewitt:{angle}
% Prewitt Edge convolution kernel (3x3)
% | -1, 0, 1 |
% | -1, 0, 1 |
% | -1, 0, 1 |
%
% Compass:{angle}
% Prewitt's "Compass" convolution kernel (3x3)
% | -1, 1, 1 |
% | -1,-2, 1 |
% | -1, 1, 1 |
%
% Kirsch:{angle}
% Kirsch's "Compass" convolution kernel (3x3)
% | -3,-3, 5 |
% | -3, 0, 5 |
% | -3,-3, 5 |
%
% FreiChen:{angle}
% Frei-Chen Edge Detector is based on a kernel that is similar to
% the Sobel Kernel, but is designed to be isotropic. That is it takes
% into account the distance of the diagonal in the kernel.
%
% | 1, 0, -1 |
% | sqrt(2), 0, -sqrt(2) |
% | 1, 0, -1 |
%
% FreiChen:{type},{angle}
%
% Frei-Chen Pre-weighted kernels...
%
% Type 0: default un-nomalized version shown above.
%
% Type 1: Orthogonal Kernel (same as type 11 below)
% | 1, 0, -1 |
% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
% | 1, 0, -1 |
%
% Type 2: Diagonal form of Kernel...
% | 1, sqrt(2), 0 |
% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
% | 0, -sqrt(2) -1 |
%
% However this kernel is als at the heart of the FreiChen Edge Detection
% Process which uses a set of 9 specially weighted kernel. These 9
% kernels not be normalized, but directly applied to the image. The
% results is then added together, to produce the intensity of an edge in
% a specific direction. The square root of the pixel value can then be
% taken as the cosine of the edge, and at least 2 such runs at 90 degrees
% from each other, both the direction and the strength of the edge can be
% determined.
%
% Type 10: All 9 of the following pre-weighted kernels...
%
% Type 11: | 1, 0, -1 |
% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
% | 1, 0, -1 |
%
% Type 12: | 1, sqrt(2), 1 |
% | 0, 0, 0 | / 2*sqrt(2)
% | 1, sqrt(2), 1 |
%
% Type 13: | sqrt(2), -1, 0 |
% | -1, 0, 1 | / 2*sqrt(2)
% | 0, 1, -sqrt(2) |
%
% Type 14: | 0, 1, -sqrt(2) |
% | -1, 0, 1 | / 2*sqrt(2)
% | sqrt(2), -1, 0 |
%
% Type 15: | 0, -1, 0 |
% | 1, 0, 1 | / 2
% | 0, -1, 0 |
%
% Type 16: | 1, 0, -1 |
% | 0, 0, 0 | / 2
% | -1, 0, 1 |
%
% Type 17: | 1, -2, 1 |
% | -2, 4, -2 | / 6
% | -1, -2, 1 |
%
% Type 18: | -2, 1, -2 |
% | 1, 4, 1 | / 6
% | -2, 1, -2 |
%
% Type 19: | 1, 1, 1 |
% | 1, 1, 1 | / 3
% | 1, 1, 1 |
%
% The first 4 are for edge detection, the next 4 are for line detection
% and the last is to add a average component to the results.
%
% Using a special type of '-1' will return all 9 pre-weighted kernels
% as a multi-kernel list, so that you can use them directly (without
% normalization) with the special "-set option:morphology:compose Plus"
% setting to apply the full FreiChen Edge Detection Technique.
%
% If 'type' is large it will be taken to be an actual rotation angle for
% the default FreiChen (type 0) kernel. As such FreiChen:45 will look
% like a Sobel:45 but with 'sqrt(2)' instead of '2' values.
%
% WARNING: The above was layed out as per
% http://www.math.tau.ac.il/~turkel/notes/edge_detectors.pdf
% But rotated 90 degrees so direction is from left rather than the top.
% I have yet to find any secondary confirmation of the above. The only
% other source found was actual source code at
% http://ltswww.epfl.ch/~courstiv/exos_labos/sol3.pdf
% Neigher paper defineds the kernels in a way that looks locical or
% correct when taken as a whole.
%
% Boolean Kernels
%
% Diamond:[{radius}[,{scale}]]
% Generate a diamond shaped kernel with given radius to the points.
% Kernel size will again be radius*2+1 square and defaults to radius 1,
% generating a 3x3 kernel that is slightly larger than a square.
%
% Square:[{radius}[,{scale}]]
% Generate a square shaped kernel of size radius*2+1, and defaulting
% to a 3x3 (radius 1).
%
% Octagon:[{radius}[,{scale}]]
% Generate octagonal shaped kernel of given radius and constant scale.
% Default radius is 3 producing a 7x7 kernel. A radius of 1 will result
% in "Diamond" kernel.
%
% Disk:[{radius}[,{scale}]]
% Generate a binary disk, thresholded at the radius given, the radius
% may be a float-point value. Final Kernel size is floor(radius)*2+1
% square. A radius of 5.3 is the default.
%
% NOTE: That a low radii Disk kernels produce the same results as
% many of the previously defined kernels, but differ greatly at larger
% radii. Here is a table of equivalences...
% "Disk:1" => "Diamond", "Octagon:1", or "Cross:1"
% "Disk:1.5" => "Square"
% "Disk:2" => "Diamond:2"
% "Disk:2.5" => "Octagon"
% "Disk:2.9" => "Square:2"
% "Disk:3.5" => "Octagon:3"
% "Disk:4.5" => "Octagon:4"
% "Disk:5.4" => "Octagon:5"
% "Disk:6.4" => "Octagon:6"
% All other Disk shapes are unique to this kernel, but because a "Disk"
% is more circular when using a larger radius, using a larger radius is
% preferred over iterating the morphological operation.
%
% Rectangle:{geometry}
% Simply generate a rectangle of 1's with the size given. You can also
% specify the location of the 'control point', otherwise the closest
% pixel to the center of the rectangle is selected.
%
% Properly centered and odd sized rectangles work the best.
%
% Symbol Dilation Kernels
%
% These kernel is not a good general morphological kernel, but is used
% more for highlighting and marking any single pixels in an image using,
% a "Dilate" method as appropriate.
%
% For the same reasons iterating these kernels does not produce the
% same result as using a larger radius for the symbol.
%
% Plus:[{radius}[,{scale}]]
% Cross:[{radius}[,{scale}]]
% Generate a kernel in the shape of a 'plus' or a 'cross' with
% a each arm the length of the given radius (default 2).
%
% NOTE: "plus:1" is equivalent to a "Diamond" kernel.
%
% Ring:{radius1},{radius2}[,{scale}]
% A ring of the values given that falls between the two radii.
% Defaults to a ring of approximataly 3 radius in a 7x7 kernel.
% This is the 'edge' pixels of the default "Disk" kernel,
% More specifically, "Ring" -> "Ring:2.5,3.5,1.0"
%
% Hit and Miss Kernels
%
% Peak:radius1,radius2
% Find any peak larger than the pixels the fall between the two radii.
% The default ring of pixels is as per "Ring".
% Edges
% Find flat orthogonal edges of a binary shape
% Corners
% Find 90 degree corners of a binary shape
% Diagonals:type
% A special kernel to thin the 'outside' of diagonals
% LineEnds:type
% Find end points of lines (for pruning a skeletion)
% Two types of lines ends (default to both) can be searched for
% Type 0: All line ends
% Type 1: single kernel for 4-conneected line ends
% Type 2: single kernel for simple line ends
% LineJunctions
% Find three line junctions (within a skeletion)
% Type 0: all line junctions
% Type 1: Y Junction kernel
% Type 2: Diagonal T Junction kernel
% Type 3: Orthogonal T Junction kernel
% Type 4: Diagonal X Junction kernel
% Type 5: Orthogonal + Junction kernel
% Ridges:type
% Find single pixel ridges or thin lines
% Type 1: Fine single pixel thick lines and ridges
% Type 2: Find two pixel thick lines and ridges
% ConvexHull
% Octagonal Thickening Kernel, to generate convex hulls of 45 degrees
% Skeleton:type
% Traditional skeleton generating kernels.
% Type 1: Tradional Skeleton kernel (4 connected skeleton)
% Type 2: HIPR2 Skeleton kernel (8 connected skeleton)
% Type 3: Thinning skeleton based on a ressearch paper by
% Dan S. Bloomberg (Default Type)
% ThinSE:type
% A huge variety of Thinning Kernels designed to preserve conectivity.
% many other kernel sets use these kernels as source definitions.
% Type numbers are 41-49, 81-89, 481, and 482 which are based on
% the super and sub notations used in the source research paper.
%
% Distance Measuring Kernels
%
% Different types of distance measuring methods, which are used with the
% a 'Distance' morphology method for generating a gradient based on
% distance from an edge of a binary shape, though there is a technique
% for handling a anti-aliased shape.
%
% See the 'Distance' Morphological Method, for information of how it is
% applied.
%
% Chebyshev:[{radius}][x{scale}[%!]]
% Chebyshev Distance (also known as Tchebychev or Chessboard distance)
% is a value of one to any neighbour, orthogonal or diagonal. One why
% of thinking of it is the number of squares a 'King' or 'Queen' in
% chess needs to traverse reach any other position on a chess board.
% It results in a 'square' like distance function, but one where
% diagonals are given a value that is closer than expected.
%
% Manhattan:[{radius}][x{scale}[%!]]
% Manhattan Distance (also known as Rectilinear, City Block, or the Taxi
% Cab distance metric), it is the distance needed when you can only
% travel in horizontal or vertical directions only. It is the
% distance a 'Rook' in chess would have to travel, and results in a
% diamond like distances, where diagonals are further than expected.
%
% Octagonal:[{radius}][x{scale}[%!]]
% An interleving of Manhatten and Chebyshev metrics producing an
% increasing octagonally shaped distance. Distances matches those of
% the "Octagon" shaped kernel of the same radius. The minimum radius
% and default is 2, producing a 5x5 kernel.
%
% Euclidean:[{radius}][x{scale}[%!]]
% Euclidean distance is the 'direct' or 'as the crow flys' distance.
% However by default the kernel size only has a radius of 1, which
% limits the distance to 'Knight' like moves, with only orthogonal and
% diagonal measurements being correct. As such for the default kernel
% you will get octagonal like distance function.
%
% However using a larger radius such as "Euclidean:4" you will get a
% much smoother distance gradient from the edge of the shape. Especially
% if the image is pre-processed to include any anti-aliasing pixels.
% Of course a larger kernel is slower to use, and not always needed.
%
% The first three Distance Measuring Kernels will only generate distances
% of exact multiples of {scale} in binary images. As such you can use a
% scale of 1 without loosing any information. However you also need some
% scaling when handling non-binary anti-aliased shapes.
%
% The "Euclidean" Distance Kernel however does generate a non-integer
% fractional results, and as such scaling is vital even for binary shapes.
%
*/
MagickExport KernelInfo *AcquireKernelBuiltIn(const KernelInfoType type,
const GeometryInfo *args)
{
KernelInfo
*kernel;
register ssize_t
i;
register ssize_t
u,
v;
double
nan = sqrt((double)-1.0); /* Special Value : Not A Number */
/* Generate a new empty kernel if needed */
kernel=(KernelInfo *) NULL;
switch(type) {
case UndefinedKernel: /* These should not call this function */
case UserDefinedKernel:
assert("Should not call this function" != (char *) NULL);
break;
case LaplacianKernel: /* Named Descrete Convolution Kernels */
case SobelKernel: /* these are defined using other kernels */
case RobertsKernel:
case PrewittKernel:
case CompassKernel:
case KirschKernel:
case FreiChenKernel:
case EdgesKernel: /* Hit and Miss kernels */
case CornersKernel:
case DiagonalsKernel:
case LineEndsKernel:
case LineJunctionsKernel:
case RidgesKernel:
case ConvexHullKernel:
case SkeletonKernel:
case ThinSEKernel:
break; /* A pre-generated kernel is not needed */
#if 0
/* set to 1 to do a compile-time check that we haven't missed anything */
case UnityKernel:
case GaussianKernel:
case DoGKernel:
case LoGKernel:
case BlurKernel:
case CometKernel:
case BinomialKernel:
case DiamondKernel:
case SquareKernel:
case RectangleKernel:
case OctagonKernel:
case DiskKernel:
case PlusKernel:
case CrossKernel:
case RingKernel:
case PeaksKernel:
case ChebyshevKernel:
case ManhattanKernel:
case OctangonalKernel:
case EuclideanKernel:
#else
default:
#endif
/* Generate the base Kernel Structure */
kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel));
if (kernel == (KernelInfo *) NULL)
return(kernel);
(void) memset(kernel,0,sizeof(*kernel));
kernel->minimum = kernel->maximum = kernel->angle = 0.0;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->type = type;
kernel->next = (KernelInfo *) NULL;
kernel->signature = MagickCoreSignature;
break;
}
switch(type) {
/*
Convolution Kernels
*/
case UnityKernel:
{
kernel->height = kernel->width = (size_t) 1;
kernel->x = kernel->y = (ssize_t) 0;
kernel->values=(double *) MagickAssumeAligned(AcquireAlignedMemory(1,
sizeof(*kernel->values)));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
kernel->maximum = kernel->values[0] = args->rho;
break;
}
break;
case GaussianKernel:
case DoGKernel:
case LoGKernel:
{ double
sigma = fabs(args->sigma),
sigma2 = fabs(args->xi),
A, B, R;
if ( args->rho >= 1.0 )
kernel->width = (size_t)args->rho*2+1;
else if ( (type != DoGKernel) || (sigma >= sigma2) )
kernel->width = GetOptimalKernelWidth2D(args->rho,sigma);
else
kernel->width = GetOptimalKernelWidth2D(args->rho,sigma2);
kernel->height = kernel->width;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) MagickAssumeAligned(AcquireAlignedMemory(
kernel->width,kernel->height*sizeof(*kernel->values)));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* WARNING: The following generates a 'sampled gaussian' kernel.
* What we really want is a 'discrete gaussian' kernel.
*
* How to do this is I don't know, but appears to be basied on the
* Error Function 'erf()' (intergral of a gaussian)
*/
if ( type == GaussianKernel || type == DoGKernel )
{ /* Calculate a Gaussian, OR positive half of a DoG */
if ( sigma > MagickEpsilon )
{ A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
B = (double) (1.0/(Magick2PI*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = exp(-((double)(u*u+v*v))*A)*B;
}
else /* limiting case - a unity (normalized Dirac) kernel */
{ (void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
}
}
if ( type == DoGKernel )
{ /* Subtract a Negative Gaussian for "Difference of Gaussian" */
if ( sigma2 > MagickEpsilon )
{ sigma = sigma2; /* simplify loop expressions */
A = 1.0/(2.0*sigma*sigma);
B = (double) (1.0/(Magick2PI*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] -= exp(-((double)(u*u+v*v))*A)*B;
}
else /* limiting case - a unity (normalized Dirac) kernel */
kernel->values[kernel->x+kernel->y*kernel->width] -= 1.0;
}
if ( type == LoGKernel )
{ /* Calculate a Laplacian of a Gaussian - Or Mexician Hat */
if ( sigma > MagickEpsilon )
{ A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
B = (double) (1.0/(MagickPI*sigma*sigma*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
{ R = ((double)(u*u+v*v))*A;
kernel->values[i] = (1-R)*exp(-R)*B;
}
}
else /* special case - generate a unity kernel */
{ (void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
}
}
/* Note the above kernels may have been 'clipped' by a user defined
** radius, producing a smaller (darker) kernel. Also for very small
** sigma's (> 0.1) the central value becomes larger than one, and thus
** producing a very bright kernel.
**
** Normalization will still be needed.
*/
/* Normalize the 2D Gaussian Kernel
**
** NB: a CorrelateNormalize performs a normal Normalize if
** there are no negative values.
*/
CalcKernelMetaData(kernel); /* the other kernel meta-data */
ScaleKernelInfo(kernel, 1.0, CorrelateNormalizeValue);
break;
}
case BlurKernel:
{ double
sigma = fabs(args->sigma),
alpha, beta;
if ( args->rho >= 1.0 )
kernel->width = (size_t)args->rho*2+1;
else
kernel->width = GetOptimalKernelWidth1D(args->rho,sigma);
kernel->height = 1;
kernel->x = (ssize_t) (kernel->width-1)/2;
kernel->y = 0;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
#if 1
#define KernelRank 3
/* Formula derived from GetBlurKernel() in "effect.c" (plus bug fix).
** It generates a gaussian 3 times the width, and compresses it into
** the expected range. This produces a closer normalization of the
** resulting kernel, especially for very low sigma values.
** As such while wierd it is prefered.
**
** I am told this method originally came from Photoshop.
**
** A properly normalized curve is generated (apart from edge clipping)
** even though we later normalize the result (for edge clipping)
** to allow the correct generation of a "Difference of Blurs".
*/
/* initialize */
v = (ssize_t) (kernel->width*KernelRank-1)/2; /* start/end points to fit range */
(void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
/* Calculate a Positive 1D Gaussian */
if ( sigma > MagickEpsilon )
{ sigma *= KernelRank; /* simplify loop expressions */
alpha = 1.0/(2.0*sigma*sigma);
beta= (double) (1.0/(MagickSQ2PI*sigma ));
for ( u=-v; u <= v; u++) {
kernel->values[(u+v)/KernelRank] +=
exp(-((double)(u*u))*alpha)*beta;
}
}
else /* special case - generate a unity kernel */
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
#else
/* Direct calculation without curve averaging
This is equivelent to a KernelRank of 1 */
/* Calculate a Positive Gaussian */
if ( sigma > MagickEpsilon )
{ alpha = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
beta = 1.0/(MagickSQ2PI*sigma);
for ( i=0, u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = exp(-((double)(u*u))*alpha)*beta;
}
else /* special case - generate a unity kernel */
{ (void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
}
#endif
/* Note the above kernel may have been 'clipped' by a user defined
** radius, producing a smaller (darker) kernel. Also for very small
** sigma's (< 0.1) the central value becomes larger than one, as a
** result of not generating a actual 'discrete' kernel, and thus
** producing a very bright 'impulse'.
**
** Becuase of these two factors Normalization is required!
*/
/* Normalize the 1D Gaussian Kernel
**
** NB: a CorrelateNormalize performs a normal Normalize if
** there are no negative values.
*/
CalcKernelMetaData(kernel); /* the other kernel meta-data */
ScaleKernelInfo(kernel, 1.0, CorrelateNormalizeValue);
/* rotate the 1D kernel by given angle */
RotateKernelInfo(kernel, args->xi );
break;
}
case CometKernel:
{ double
sigma = fabs(args->sigma),
A;
if ( args->rho < 1.0 )
kernel->width = (GetOptimalKernelWidth1D(args->rho,sigma)-1)/2+1;
else
kernel->width = (size_t)args->rho;
kernel->x = kernel->y = 0;
kernel->height = 1;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* A comet blur is half a 1D gaussian curve, so that the object is
** blurred in one direction only. This may not be quite the right
** curve to use so may change in the future. The function must be
** normalised after generation, which also resolves any clipping.
**
** As we are normalizing and not subtracting gaussians,
** there is no need for a divisor in the gaussian formula
**
** It is less comples
*/
if ( sigma > MagickEpsilon )
{
#if 1
#define KernelRank 3
v = (ssize_t) kernel->width*KernelRank; /* start/end points */
(void) memset(kernel->values,0, (size_t)
kernel->width*sizeof(*kernel->values));
sigma *= KernelRank; /* simplify the loop expression */
A = 1.0/(2.0*sigma*sigma);
/* B = 1.0/(MagickSQ2PI*sigma); */
for ( u=0; u < v; u++) {
kernel->values[u/KernelRank] +=
exp(-((double)(u*u))*A);
/* exp(-((double)(i*i))/2.0*sigma*sigma)/(MagickSQ2PI*sigma); */
}
for (i=0; i < (ssize_t) kernel->width; i++)
kernel->positive_range += kernel->values[i];
#else
A = 1.0/(2.0*sigma*sigma); /* simplify the loop expression */
/* B = 1.0/(MagickSQ2PI*sigma); */
for ( i=0; i < (ssize_t) kernel->width; i++)
kernel->positive_range +=
kernel->values[i] = exp(-((double)(i*i))*A);
/* exp(-((double)(i*i))/2.0*sigma*sigma)/(MagickSQ2PI*sigma); */
#endif
}
else /* special case - generate a unity kernel */
{ (void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
kernel->positive_range = 1.0;
}
kernel->minimum = 0.0;
kernel->maximum = kernel->values[0];
kernel->negative_range = 0.0;
ScaleKernelInfo(kernel, 1.0, NormalizeValue); /* Normalize */
RotateKernelInfo(kernel, args->xi); /* Rotate by angle */
break;
}
case BinomialKernel:
{
size_t
order_f;
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
order_f = fact(kernel->width-1);
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values within diamond area to scale given */
for ( i=0, v=0; v < (ssize_t)kernel->height; v++)
{ size_t
alpha = order_f / ( fact((size_t) v) * fact(kernel->height-v-1) );
for ( u=0; u < (ssize_t)kernel->width; u++, i++)
kernel->positive_range += kernel->values[i] = (double)
(alpha * order_f / ( fact((size_t) u) * fact(kernel->height-u-1) ));
}
kernel->minimum = 1.0;
kernel->maximum = kernel->values[kernel->x+kernel->y*kernel->width];
kernel->negative_range = 0.0;
break;
}
/*
Convolution Kernels - Well Known Named Constant Kernels
*/
case LaplacianKernel:
{ switch ( (int) args->rho ) {
case 0:
default: /* laplacian square filter -- default */
kernel=ParseKernelArray("3: -1,-1,-1 -1,8,-1 -1,-1,-1");
break;
case 1: /* laplacian diamond filter */
kernel=ParseKernelArray("3: 0,-1,0 -1,4,-1 0,-1,0");
break;
case 2:
kernel=ParseKernelArray("3: -2,1,-2 1,4,1 -2,1,-2");
break;
case 3:
kernel=ParseKernelArray("3: 1,-2,1 -2,4,-2 1,-2,1");
break;
case 5: /* a 5x5 laplacian */
kernel=ParseKernelArray(
"5: -4,-1,0,-1,-4 -1,2,3,2,-1 0,3,4,3,0 -1,2,3,2,-1 -4,-1,0,-1,-4");
break;
case 7: /* a 7x7 laplacian */
kernel=ParseKernelArray(
"7:-10,-5,-2,-1,-2,-5,-10 -5,0,3,4,3,0,-5 -2,3,6,7,6,3,-2 -1,4,7,8,7,4,-1 -2,3,6,7,6,3,-2 -5,0,3,4,3,0,-5 -10,-5,-2,-1,-2,-5,-10" );
break;
case 15: /* a 5x5 LoG (sigma approx 1.4) */
kernel=ParseKernelArray(
"5: 0,0,-1,0,0 0,-1,-2,-1,0 -1,-2,16,-2,-1 0,-1,-2,-1,0 0,0,-1,0,0");
break;
case 19: /* a 9x9 LoG (sigma approx 1.4) */
/* http://www.cscjournals.org/csc/manuscript/Journals/IJIP/volume3/Issue1/IJIP-15.pdf */
kernel=ParseKernelArray(
"9: 0,-1,-1,-2,-2,-2,-1,-1,0 -1,-2,-4,-5,-5,-5,-4,-2,-1 -1,-4,-5,-3,-0,-3,-5,-4,-1 -2,-5,-3,12,24,12,-3,-5,-2 -2,-5,-0,24,40,24,-0,-5,-2 -2,-5,-3,12,24,12,-3,-5,-2 -1,-4,-5,-3,-0,-3,-5,-4,-1 -1,-2,-4,-5,-5,-5,-4,-2,-1 0,-1,-1,-2,-2,-2,-1,-1,0");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
break;
}
case SobelKernel:
{ /* Simple Sobel Kernel */
kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case RobertsKernel:
{
kernel=ParseKernelArray("3: 0,0,0 1,-1,0 0,0,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case PrewittKernel:
{
kernel=ParseKernelArray("3: 1,0,-1 1,0,-1 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case CompassKernel:
{
kernel=ParseKernelArray("3: 1,1,-1 1,-2,-1 1,1,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case KirschKernel:
{
kernel=ParseKernelArray("3: 5,-3,-3 5,0,-3 5,-3,-3");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case FreiChenKernel:
/* Direction is set to be left to right positive */
/* http://www.math.tau.ac.il/~turkel/notes/edge_detectors.pdf -- RIGHT? */
/* http://ltswww.epfl.ch/~courstiv/exos_labos/sol3.pdf -- WRONG? */
{ switch ( (int) args->rho ) {
default:
case 0:
kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[3] = +MagickSQ2;
kernel->values[5] = -MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
break;
case 2:
kernel=ParseKernelArray("3: 1,2,0 2,0,-2 0,-2,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[1] = kernel->values[3]= +MagickSQ2;
kernel->values[5] = kernel->values[7]= -MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 10:
kernel=AcquireKernelInfo("FreiChen:11;FreiChen:12;FreiChen:13;FreiChen:14;FreiChen:15;FreiChen:16;FreiChen:17;FreiChen:18;FreiChen:19");
if (kernel == (KernelInfo *) NULL)
return(kernel);
break;
case 1:
case 11:
kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[3] = +MagickSQ2;
kernel->values[5] = -MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 12:
kernel=ParseKernelArray("3: 1,2,1 0,0,0 1,2,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[1] = +MagickSQ2;
kernel->values[7] = +MagickSQ2;
CalcKernelMetaData(kernel);
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 13:
kernel=ParseKernelArray("3: 2,-1,0 -1,0,1 0,1,-2");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[0] = +MagickSQ2;
kernel->values[8] = -MagickSQ2;
CalcKernelMetaData(kernel);
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 14:
kernel=ParseKernelArray("3: 0,1,-2 -1,0,1 2,-1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[2] = -MagickSQ2;
kernel->values[6] = +MagickSQ2;
CalcKernelMetaData(kernel);
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 15:
kernel=ParseKernelArray("3: 0,-1,0 1,0,1 0,-1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/2.0, NoValue);
break;
case 16:
kernel=ParseKernelArray("3: 1,0,-1 0,0,0 -1,0,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/2.0, NoValue);
break;
case 17:
kernel=ParseKernelArray("3: 1,-2,1 -2,4,-2 -1,-2,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/6.0, NoValue);
break;
case 18:
kernel=ParseKernelArray("3: -2,1,-2 1,4,1 -2,1,-2");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/6.0, NoValue);
break;
case 19:
kernel=ParseKernelArray("3: 1,1,1 1,1,1 1,1,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/3.0, NoValue);
break;
}
if ( fabs(args->sigma) >= MagickEpsilon )
/* Rotate by correctly supplied 'angle' */
RotateKernelInfo(kernel, args->sigma);
else if ( args->rho > 30.0 || args->rho < -30.0 )
/* Rotate by out of bounds 'type' */
RotateKernelInfo(kernel, args->rho);
break;
}
/*
Boolean or Shaped Kernels
*/
case DiamondKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values within diamond area to scale given */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
if ( (labs((long) u)+labs((long) v)) <= (long) kernel->x)
kernel->positive_range += kernel->values[i] = args->sigma;
else
kernel->values[i] = nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
break;
}
case SquareKernel:
case RectangleKernel:
{ double
scale;
if ( type == SquareKernel )
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = (size_t) (2*args->rho+1);
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
scale = args->sigma;
}
else {
/* NOTE: user defaults set in "AcquireKernelInfo()" */
if ( args->rho < 1.0 || args->sigma < 1.0 )
return(DestroyKernelInfo(kernel)); /* invalid args given */
kernel->width = (size_t)args->rho;
kernel->height = (size_t)args->sigma;
if ( args->xi < 0.0 || args->xi > (double)kernel->width ||
args->psi < 0.0 || args->psi > (double)kernel->height )
return(DestroyKernelInfo(kernel)); /* invalid args given */
kernel->x = (ssize_t) args->xi;
kernel->y = (ssize_t) args->psi;
scale = 1.0;
}
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values to scale given */
u=(ssize_t) (kernel->width*kernel->height);
for ( i=0; i < u; i++)
kernel->values[i] = scale;
kernel->minimum = kernel->maximum = scale; /* a flat shape */
kernel->positive_range = scale*u;
break;
}
case OctagonKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 5; /* default radius = 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
if ( (labs((long) u)+labs((long) v)) <=
((long)kernel->x + (long)(kernel->x/2)) )
kernel->positive_range += kernel->values[i] = args->sigma;
else
kernel->values[i] = nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
break;
}
case DiskKernel:
{
ssize_t
limit = (ssize_t)(args->rho*args->rho);
if (args->rho < 0.4) /* default radius approx 4.3 */
kernel->width = kernel->height = 9L, limit = 18L;
else
kernel->width = kernel->height = (size_t)fabs(args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
if ((u*u+v*v) <= limit)
kernel->positive_range += kernel->values[i] = args->sigma;
else
kernel->values[i] = nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
break;
}
case PlusKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 5; /* default radius 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values along axises to given scale */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = (u == 0 || v == 0) ? args->sigma : nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0);
break;
}
case CrossKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 5; /* default radius 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values along axises to given scale */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = (u == v || u == -v) ? args->sigma : nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0);
break;
}
/*
HitAndMiss Kernels
*/
case RingKernel:
case PeaksKernel:
{
ssize_t
limit1,
limit2,
scale;
if (args->rho < args->sigma)
{
kernel->width = ((size_t)args->sigma)*2+1;
limit1 = (ssize_t)(args->rho*args->rho);
limit2 = (ssize_t)(args->sigma*args->sigma);
}
else
{
kernel->width = ((size_t)args->rho)*2+1;
limit1 = (ssize_t)(args->sigma*args->sigma);
limit2 = (ssize_t)(args->rho*args->rho);
}
if ( limit2 <= 0 )
kernel->width = 7L, limit1 = 7L, limit2 = 11L;
kernel->height = kernel->width;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* set a ring of points of 'scale' ( 0.0 for PeaksKernel ) */
scale = (ssize_t) (( type == PeaksKernel) ? 0.0 : args->xi);
for ( i=0, v= -kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
{ ssize_t radius=u*u+v*v;
if (limit1 < radius && radius <= limit2)
kernel->positive_range += kernel->values[i] = (double) scale;
else
kernel->values[i] = nan;
}
kernel->minimum = kernel->maximum = (double) scale;
if ( type == PeaksKernel ) {
/* set the central point in the middle */
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
kernel->positive_range = 1.0;
kernel->maximum = 1.0;
}
break;
}
case EdgesKernel:
{
kernel=AcquireKernelInfo("ThinSE:482");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandMirrorKernelInfo(kernel); /* mirror expansion of kernels */
break;
}
case CornersKernel:
{
kernel=AcquireKernelInfo("ThinSE:87");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* Expand 90 degree rotations */
break;
}
case DiagonalsKernel:
{
switch ( (int) args->rho ) {
case 0:
default:
{ KernelInfo
*new_kernel;
kernel=ParseKernelArray("3: 0,0,0 0,-,1 1,1,-");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
new_kernel=ParseKernelArray("3: 0,0,1 0,-,1 0,1,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
ExpandMirrorKernelInfo(kernel);
return(kernel);
}
case 1:
kernel=ParseKernelArray("3: 0,0,0 0,-,1 1,1,-");
break;
case 2:
kernel=ParseKernelArray("3: 0,0,1 0,-,1 0,1,-");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
case LineEndsKernel:
{ /* Kernels for finding the end of thin lines */
switch ( (int) args->rho ) {
case 0:
default:
/* set of kernels to find all end of lines */
return(AcquireKernelInfo("LineEnds:1>;LineEnds:2>"));
case 1:
/* kernel for 4-connected line ends - no rotation */
kernel=ParseKernelArray("3: 0,0,- 0,1,1 0,0,-");
break;
case 2:
/* kernel to add for 8-connected lines - no rotation */
kernel=ParseKernelArray("3: 0,0,0 0,1,0 0,0,1");
break;
case 3:
/* kernel to add for orthogonal line ends - does not find corners */
kernel=ParseKernelArray("3: 0,0,0 0,1,1 0,0,0");
break;
case 4:
/* traditional line end - fails on last T end */
kernel=ParseKernelArray("3: 0,0,0 0,1,- 0,0,-");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
case LineJunctionsKernel:
{ /* kernels for finding the junctions of multiple lines */
switch ( (int) args->rho ) {
case 0:
default:
/* set of kernels to find all line junctions */
return(AcquireKernelInfo("LineJunctions:1@;LineJunctions:2>"));
case 1:
/* Y Junction */
kernel=ParseKernelArray("3: 1,-,1 -,1,- -,1,-");
break;
case 2:
/* Diagonal T Junctions */
kernel=ParseKernelArray("3: 1,-,- -,1,- 1,-,1");
break;
case 3:
/* Orthogonal T Junctions */
kernel=ParseKernelArray("3: -,-,- 1,1,1 -,1,-");
break;
case 4:
/* Diagonal X Junctions */
kernel=ParseKernelArray("3: 1,-,1 -,1,- 1,-,1");
break;
case 5:
/* Orthogonal X Junctions - minimal diamond kernel */
kernel=ParseKernelArray("3: -,1,- 1,1,1 -,1,-");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
case RidgesKernel:
{ /* Ridges - Ridge finding kernels */
KernelInfo
*new_kernel;
switch ( (int) args->rho ) {
case 1:
default:
kernel=ParseKernelArray("3x1:0,1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* 2 rotated kernels (symmetrical) */
break;
case 2:
kernel=ParseKernelArray("4x1:0,1,1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotated kernels */
/* Kernels to find a stepped 'thick' line, 4 rotates + mirrors */
/* Unfortunatally we can not yet rotate a non-square kernel */
/* But then we can't flip a non-symetrical kernel either */
new_kernel=ParseKernelArray("4x3+1+1:0,1,1,- -,1,1,- -,1,1,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("4x3+2+1:0,1,1,- -,1,1,- -,1,1,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("4x3+1+1:-,1,1,0 -,1,1,- 0,1,1,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("4x3+2+1:-,1,1,0 -,1,1,- 0,1,1,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+1:0,-,- 1,1,1 1,1,1 -,-,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+2:0,-,- 1,1,1 1,1,1 -,-,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+1:-,-,0 1,1,1 1,1,1 0,-,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+2:-,-,0 1,1,1 1,1,1 0,-,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
break;
}
break;
}
case ConvexHullKernel:
{
KernelInfo
*new_kernel;
/* first set of 8 kernels */
kernel=ParseKernelArray("3: 1,1,- 1,0,- 1,-,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0);
/* append the mirror versions too - no flip function yet */
new_kernel=ParseKernelArray("3: 1,1,1 1,0,- -,-,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
ExpandRotateKernelInfo(new_kernel, 90.0);
LastKernelInfo(kernel)->next = new_kernel;
break;
}
case SkeletonKernel:
{
switch ( (int) args->rho ) {
case 1:
default:
/* Traditional Skeleton...
** A cyclically rotated single kernel
*/
kernel=AcquireKernelInfo("ThinSE:482");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 45.0); /* 8 rotations */
break;
case 2:
/* HIPR Variation of the cyclic skeleton
** Corners of the traditional method made more forgiving,
** but the retain the same cyclic order.
*/
kernel=AcquireKernelInfo("ThinSE:482; ThinSE:87x90;");
if (kernel == (KernelInfo *) NULL)
return(kernel);
if (kernel->next == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
kernel->type = type;
kernel->next->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotations of the 2 kernels */
break;
case 3:
/* Dan Bloomberg Skeleton, from his paper on 3x3 thinning SE's
** "Connectivity-Preserving Morphological Image Thransformations"
** by Dan S. Bloomberg, available on Leptonica, Selected Papers,
** http://www.leptonica.com/papers/conn.pdf
*/
kernel=AcquireKernelInfo(
"ThinSE:41; ThinSE:42; ThinSE:43");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->next->type = type;
kernel->next->next->type = type;
ExpandMirrorKernelInfo(kernel); /* 12 kernels total */
break;
}
break;
}
case ThinSEKernel:
{ /* Special kernels for general thinning, while preserving connections
** "Connectivity-Preserving Morphological Image Thransformations"
** by Dan S. Bloomberg, available on Leptonica, Selected Papers,
** http://www.leptonica.com/papers/conn.pdf
** And
** http://tpgit.github.com/Leptonica/ccthin_8c_source.html
**
** Note kernels do not specify the origin pixel, allowing them
** to be used for both thickening and thinning operations.
*/
switch ( (int) args->rho ) {
/* SE for 4-connected thinning */
case 41: /* SE_4_1 */
kernel=ParseKernelArray("3: -,-,1 0,-,1 -,-,1");
break;
case 42: /* SE_4_2 */
kernel=ParseKernelArray("3: -,-,1 0,-,1 -,0,-");
break;
case 43: /* SE_4_3 */
kernel=ParseKernelArray("3: -,0,- 0,-,1 -,-,1");
break;
case 44: /* SE_4_4 */
kernel=ParseKernelArray("3: -,0,- 0,-,1 -,0,-");
break;
case 45: /* SE_4_5 */
kernel=ParseKernelArray("3: -,0,1 0,-,1 -,0,-");
break;
case 46: /* SE_4_6 */
kernel=ParseKernelArray("3: -,0,- 0,-,1 -,0,1");
break;
case 47: /* SE_4_7 */
kernel=ParseKernelArray("3: -,1,1 0,-,1 -,0,-");
break;
case 48: /* SE_4_8 */
kernel=ParseKernelArray("3: -,-,1 0,-,1 0,-,1");
break;
case 49: /* SE_4_9 */
kernel=ParseKernelArray("3: 0,-,1 0,-,1 -,-,1");
break;
/* SE for 8-connected thinning - negatives of the above */
case 81: /* SE_8_0 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 -,1,-");
break;
case 82: /* SE_8_2 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 0,-,-");
break;
case 83: /* SE_8_3 */
kernel=ParseKernelArray("3: 0,-,- 0,-,1 -,1,-");
break;
case 84: /* SE_8_4 */
kernel=ParseKernelArray("3: 0,-,- 0,-,1 0,-,-");
break;
case 85: /* SE_8_5 */
kernel=ParseKernelArray("3: 0,-,1 0,-,1 0,-,-");
break;
case 86: /* SE_8_6 */
kernel=ParseKernelArray("3: 0,-,- 0,-,1 0,-,1");
break;
case 87: /* SE_8_7 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 0,0,-");
break;
case 88: /* SE_8_8 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 0,1,-");
break;
case 89: /* SE_8_9 */
kernel=ParseKernelArray("3: 0,1,- 0,-,1 -,1,-");
break;
/* Special combined SE kernels */
case 423: /* SE_4_2 , SE_4_3 Combined Kernel */
kernel=ParseKernelArray("3: -,-,1 0,-,- -,0,-");
break;
case 823: /* SE_8_2 , SE_8_3 Combined Kernel */
kernel=ParseKernelArray("3: -,1,- -,-,1 0,-,-");
break;
case 481: /* SE_48_1 - General Connected Corner Kernel */
kernel=ParseKernelArray("3: -,1,1 0,-,1 0,0,-");
break;
default:
case 482: /* SE_48_2 - General Edge Kernel */
kernel=ParseKernelArray("3: 0,-,1 0,-,1 0,-,1");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
/*
Distance Measuring Kernels
*/
case ChebyshevKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->positive_range += ( kernel->values[i] =
args->sigma*MagickMax(fabs((double)u),fabs((double)v)) );
kernel->maximum = kernel->values[0];
break;
}
case ManhattanKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->positive_range += ( kernel->values[i] =
args->sigma*(labs((long) u)+labs((long) v)) );
kernel->maximum = kernel->values[0];
break;
}
case OctagonalKernel:
{
if (args->rho < 2.0)
kernel->width = kernel->height = 5; /* default/minimum radius = 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
{
double
r1 = MagickMax(fabs((double)u),fabs((double)v)),
r2 = floor((double)(labs((long)u)+labs((long)v)+1)/1.5);
kernel->positive_range += kernel->values[i] =
args->sigma*MagickMax(r1,r2);
}
kernel->maximum = kernel->values[0];
break;
}
case EuclideanKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->positive_range += ( kernel->values[i] =
args->sigma*sqrt((double)(u*u+v*v)) );
kernel->maximum = kernel->values[0];
break;
}
default:
{
/* No-Op Kernel - Basically just a single pixel on its own */
kernel=ParseKernelArray("1:1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = UndefinedKernel;
break;
}
break;
}
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneKernelInfo() creates a new clone of the given Kernel List so that its
% can be modified without effecting the original. The cloned kernel should
% be destroyed using DestoryKernelInfo() when no longer needed.
%
% The format of the CloneKernelInfo method is:
%
% KernelInfo *CloneKernelInfo(const KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to be cloned
%
*/
MagickExport KernelInfo *CloneKernelInfo(const KernelInfo *kernel)
{
register ssize_t
i;
KernelInfo
*new_kernel;
assert(kernel != (KernelInfo *) NULL);
new_kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel));
if (new_kernel == (KernelInfo *) NULL)
return(new_kernel);
*new_kernel=(*kernel); /* copy values in structure */
/* replace the values with a copy of the values */
new_kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (new_kernel->values == (double *) NULL)
return(DestroyKernelInfo(new_kernel));
for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++)
new_kernel->values[i]=kernel->values[i];
/* Also clone the next kernel in the kernel list */
if ( kernel->next != (KernelInfo *) NULL ) {
new_kernel->next = CloneKernelInfo(kernel->next);
if ( new_kernel->next == (KernelInfo *) NULL )
return(DestroyKernelInfo(new_kernel));
}
return(new_kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyKernelInfo() frees the memory used by a Convolution/Morphology
% kernel.
%
% The format of the DestroyKernelInfo method is:
%
% KernelInfo *DestroyKernelInfo(KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to be destroyed
%
*/
MagickExport KernelInfo *DestroyKernelInfo(KernelInfo *kernel)
{
assert(kernel != (KernelInfo *) NULL);
if (kernel->next != (KernelInfo *) NULL)
kernel->next=DestroyKernelInfo(kernel->next);
kernel->values=(double *) RelinquishAlignedMemory(kernel->values);
kernel=(KernelInfo *) RelinquishMagickMemory(kernel);
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ E x p a n d M i r r o r K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExpandMirrorKernelInfo() takes a single kernel, and expands it into a
% sequence of 90-degree rotated kernels but providing a reflected 180
% rotatation, before the -/+ 90-degree rotations.
%
% This special rotation order produces a better, more symetrical thinning of
% objects.
%
% The format of the ExpandMirrorKernelInfo method is:
%
% void ExpandMirrorKernelInfo(KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% This function is only internel to this module, as it is not finalized,
% especially with regard to non-orthogonal angles, and rotation of larger
% 2D kernels.
*/
#if 0
static void FlopKernelInfo(KernelInfo *kernel)
{ /* Do a Flop by reversing each row. */
size_t
y;
register ssize_t
x,r;
register double
*k,t;
for ( y=0, k=kernel->values; y < kernel->height; y++, k+=kernel->width)
for ( x=0, r=kernel->width-1; x<kernel->width/2; x++, r--)
t=k[x], k[x]=k[r], k[r]=t;
kernel->x = kernel->width - kernel->x - 1;
angle = fmod(angle+180.0, 360.0);
}
#endif
static void ExpandMirrorKernelInfo(KernelInfo *kernel)
{
KernelInfo
*clone,
*last;
last = kernel;
clone = CloneKernelInfo(last);
if (clone == (KernelInfo *) NULL)
return;
RotateKernelInfo(clone, 180); /* flip */
LastKernelInfo(last)->next = clone;
last = clone;
clone = CloneKernelInfo(last);
if (clone == (KernelInfo *) NULL)
return;
RotateKernelInfo(clone, 90); /* transpose */
LastKernelInfo(last)->next = clone;
last = clone;
clone = CloneKernelInfo(last);
if (clone == (KernelInfo *) NULL)
return;
RotateKernelInfo(clone, 180); /* flop */
LastKernelInfo(last)->next = clone;
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ E x p a n d R o t a t e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExpandRotateKernelInfo() takes a kernel list, and expands it by rotating
% incrementally by the angle given, until the kernel repeats.
%
% WARNING: 45 degree rotations only works for 3x3 kernels.
% While 90 degree roatations only works for linear and square kernels
%
% The format of the ExpandRotateKernelInfo method is:
%
% void ExpandRotateKernelInfo(KernelInfo *kernel, double angle)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o angle: angle to rotate in degrees
%
% This function is only internel to this module, as it is not finalized,
% especially with regard to non-orthogonal angles, and rotation of larger
% 2D kernels.
*/
/* Internal Routine - Return true if two kernels are the same */
static MagickBooleanType SameKernelInfo(const KernelInfo *kernel1,
const KernelInfo *kernel2)
{
register size_t
i;
/* check size and origin location */
if ( kernel1->width != kernel2->width
|| kernel1->height != kernel2->height
|| kernel1->x != kernel2->x
|| kernel1->y != kernel2->y )
return MagickFalse;
/* check actual kernel values */
for (i=0; i < (kernel1->width*kernel1->height); i++) {
/* Test for Nan equivalence */
if ( IsNaN(kernel1->values[i]) && !IsNaN(kernel2->values[i]) )
return MagickFalse;
if ( IsNaN(kernel2->values[i]) && !IsNaN(kernel1->values[i]) )
return MagickFalse;
/* Test actual values are equivalent */
if ( fabs(kernel1->values[i] - kernel2->values[i]) >= MagickEpsilon )
return MagickFalse;
}
return MagickTrue;
}
static void ExpandRotateKernelInfo(KernelInfo *kernel, const double angle)
{
KernelInfo
*clone_info,
*last;
last=kernel;
DisableMSCWarning(4127)
while (1) {
RestoreMSCWarning
clone_info=CloneKernelInfo(last);
if (clone_info == (KernelInfo *) NULL)
break;
RotateKernelInfo(clone_info,angle);
if (SameKernelInfo(kernel,clone_info) != MagickFalse)
break;
LastKernelInfo(last)->next=clone_info;
last=clone_info;
}
if (clone_info != (KernelInfo *) NULL)
clone_info=DestroyKernelInfo(clone_info); /* kernel repeated - junk */
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C a l c M e t a K e r n a l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CalcKernelMetaData() recalculate the KernelInfo meta-data of this kernel only,
% using the kernel values. This should only ne used if it is not possible to
% calculate that meta-data in some easier way.
%
% It is important that the meta-data is correct before ScaleKernelInfo() is
% used to perform kernel normalization.
%
% The format of the CalcKernelMetaData method is:
%
% void CalcKernelMetaData(KernelInfo *kernel, const double scale )
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to modify
%
% WARNING: Minimum and Maximum values are assumed to include zero, even if
% zero is not part of the kernel (as in Gaussian Derived kernels). This
% however is not true for flat-shaped morphological kernels.
%
% WARNING: Only the specific kernel pointed to is modified, not a list of
% multiple kernels.
%
% This is an internal function and not expected to be useful outside this
% module. This could change however.
*/
static void CalcKernelMetaData(KernelInfo *kernel)
{
register size_t
i;
kernel->minimum = kernel->maximum = 0.0;
kernel->negative_range = kernel->positive_range = 0.0;
for (i=0; i < (kernel->width*kernel->height); i++)
{
if ( fabs(kernel->values[i]) < MagickEpsilon )
kernel->values[i] = 0.0;
( kernel->values[i] < 0)
? ( kernel->negative_range += kernel->values[i] )
: ( kernel->positive_range += kernel->values[i] );
Minimize(kernel->minimum, kernel->values[i]);
Maximize(kernel->maximum, kernel->values[i]);
}
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o r p h o l o g y A p p l y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MorphologyApply() applies a morphological method, multiple times using
% a list of multiple kernels. This is the method that should be called by
% other 'operators' that internally use morphology operations as part of
% their processing.
%
% It is basically equivalent to as MorphologyImage() (see below) but
% without any user controls. This allows internel programs to use this
% function, to actually perform a specific task without possible interference
% by any API user supplied settings.
%
% It is MorphologyImage() task to extract any such user controls, and
% pass them to this function for processing.
%
% More specifically all given kernels should already be scaled, normalised,
% and blended appropriatally before being parred to this routine. The
% appropriate bias, and compose (typically 'UndefinedComposeOp') given.
%
% The format of the MorphologyApply method is:
%
% Image *MorphologyApply(const Image *image,MorphologyMethod method,
% const ChannelType channel, const ssize_t iterations,
% const KernelInfo *kernel, const CompositeMethod compose,
% const double bias, ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the source image
%
% o method: the morphology method to be applied.
%
% o channel: the channels to which the operations are applied
% The channel 'sync' flag determines if 'alpha weighting' is
% applied for convolution style operations.
%
% o iterations: apply the operation this many times (or no change).
% A value of -1 means loop until no change found.
% How this is applied may depend on the morphology method.
% Typically this is a value of 1.
%
% o channel: the channel type.
%
% o kernel: An array of double representing the morphology kernel.
%
% o compose: How to handle or merge multi-kernel results.
% If 'UndefinedCompositeOp' use default for the Morphology method.
% If 'NoCompositeOp' force image to be re-iterated by each kernel.
% Otherwise merge the results using the compose method given.
%
% o bias: Convolution Output Bias.
%
% o exception: return any errors or warnings in this structure.
%
*/
/* Apply a Morphology Primative to an image using the given kernel.
** Two pre-created images must be provided, and no image is created.
** It returns the number of pixels that changed between the images
** for result convergence determination.
*/
static ssize_t MorphologyPrimitive(const Image *image, Image *result_image,
const MorphologyMethod method, const ChannelType channel,
const KernelInfo *kernel,const double bias,ExceptionInfo *exception)
{
#define MorphologyTag "Morphology/Image"
CacheView
*p_view,
*q_view;
register ssize_t
i;
size_t
*changes,
changed,
virt_width;
ssize_t
y,
offx,
offy;
MagickBooleanType
status;
MagickOffsetType
progress;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(result_image != (Image *) NULL);
assert(result_image->signature == MagickCoreSignature);
assert(kernel != (KernelInfo *) NULL);
assert(kernel->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
status=MagickTrue;
progress=0;
p_view=AcquireVirtualCacheView(image,exception);
q_view=AcquireAuthenticCacheView(result_image,exception);
virt_width=image->columns+kernel->width-1;
/* Some methods (including convolve) needs use a reflected kernel.
* Adjust 'origin' offsets to loop though kernel as a reflection.
*/
offx = kernel->x;
offy = kernel->y;
switch(method) {
case ConvolveMorphology:
case DilateMorphology:
case DilateIntensityMorphology:
case IterativeDistanceMorphology:
/* kernel needs to used with reflection about origin */
offx = (ssize_t) kernel->width-offx-1;
offy = (ssize_t) kernel->height-offy-1;
break;
case ErodeMorphology:
case ErodeIntensityMorphology:
case HitAndMissMorphology:
case ThinningMorphology:
case ThickenMorphology:
/* kernel is used as is, without reflection */
break;
default:
assert("Not a Primitive Morphology Method" != (char *) NULL);
break;
}
changed=0;
changes=(size_t *) AcquireQuantumMemory(GetOpenMPMaximumThreads(),
sizeof(*changes));
if (changes == (size_t *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
for (i=0; i < (ssize_t) GetOpenMPMaximumThreads(); i++)
changes[i]=0;
if ( method == ConvolveMorphology && kernel->width == 1 )
{ /* Special handling (for speed) of vertical (blur) kernels.
** This performs its handling in columns rather than in rows.
** This is only done for convolve as it is the only method that
** generates very large 1-D vertical kernels (such as a 'BlurKernel')
**
** Timing tests (on single CPU laptop)
** Using a vertical 1-d Blue with normal row-by-row (below)
** time convert logo: -morphology Convolve Blur:0x10+90 null:
** 0.807u
** Using this column method
** time convert logo: -morphology Convolve Blur:0x10+90 null:
** 0.620u
**
** Anthony Thyssen, 14 June 2010
*/
register ssize_t
x;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,result_image,image->columns,1)
#endif
for (x=0; x < (ssize_t) image->columns; x++)
{
const int
id = GetOpenMPThreadId();
register const PixelPacket
*magick_restrict p;
register const IndexPacket
*magick_restrict p_indexes;
register PixelPacket
*magick_restrict q;
register IndexPacket
*magick_restrict q_indexes;
register ssize_t
y;
ssize_t
r;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(p_view,x,-offy,1,image->rows+kernel->height-1,
exception);
q=GetCacheViewAuthenticPixels(q_view,x,0,1,result_image->rows,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
p_indexes=GetCacheViewVirtualIndexQueue(p_view);
q_indexes=GetCacheViewAuthenticIndexQueue(q_view);
/* offset to origin in 'p'. while 'q' points to it directly */
r = offy;
for (y=0; y < (ssize_t) image->rows; y++)
{
DoublePixelPacket
result;
register ssize_t
v;
register const double
*magick_restrict k;
register const PixelPacket
*magick_restrict k_pixels;
register const IndexPacket
*magick_restrict k_indexes;
/* Copy input image to the output image for unused channels
* This removes need for 'cloning' a new image every iteration
*/
*q = p[r];
if (image->colorspace == CMYKColorspace)
SetPixelIndex(q_indexes+y,GetPixelIndex(p_indexes+y+r));
/* Set the bias of the weighted average output */
result.red =
result.green =
result.blue =
result.opacity =
result.index = bias;
/* Weighted Average of pixels using reflected kernel
**
** NOTE for correct working of this operation for asymetrical
** kernels, the kernel needs to be applied in its reflected form.
** That is its values needs to be reversed.
*/
k = &kernel->values[ kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes+y;
if ( ((channel & SyncChannels) == 0 ) ||
(image->matte == MagickFalse) )
{ /* No 'Sync' involved.
** Convolution is simple greyscale channel operation
*/
for (v=0; v < (ssize_t) kernel->height; v++) {
if ( IsNaN(*k) ) continue;
result.red += (*k)*GetPixelRed(k_pixels);
result.green += (*k)*GetPixelGreen(k_pixels);
result.blue += (*k)*GetPixelBlue(k_pixels);
result.opacity += (*k)*GetPixelOpacity(k_pixels);
if ( image->colorspace == CMYKColorspace)
result.index += (*k)*(*k_indexes);
k--;
k_pixels++;
k_indexes++;
}
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(result.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(result.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(result.blue));
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
SetPixelOpacity(q,ClampToQuantum(result.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(q_indexes+y,ClampToQuantum(result.index));
}
else
{ /* Channel 'Sync' Flag, and Alpha Channel enabled.
** Weight the color channels with Alpha Channel so that
** transparent pixels are not part of the results.
*/
double
gamma; /* divisor, sum of color alpha weighting */
MagickRealType
alpha; /* alpha weighting for colors : alpha */
size_t
count; /* alpha valus collected, number kernel values */
count=0;
gamma=0.0;
for (v=0; v < (ssize_t) kernel->height; v++) {
if ( IsNaN(*k) ) continue;
alpha=QuantumScale*(QuantumRange-GetPixelOpacity(k_pixels));
count++; /* number of alpha values collected */
alpha*=(*k); /* include kernel weighting now */
gamma += alpha; /* normalize alpha weights only */
result.red += alpha*GetPixelRed(k_pixels);
result.green += alpha*GetPixelGreen(k_pixels);
result.blue += alpha*GetPixelBlue(k_pixels);
result.opacity += (*k)*GetPixelOpacity(k_pixels);
if ( image->colorspace == CMYKColorspace)
result.index += alpha*(*k_indexes);
k--;
k_pixels++;
k_indexes++;
}
/* Sync'ed channels, all channels are modified */
gamma=PerceptibleReciprocal(gamma);
if (count != 0)
gamma*=(double) kernel->height/count;
SetPixelRed(q,ClampToQuantum(gamma*result.red));
SetPixelGreen(q,ClampToQuantum(gamma*result.green));
SetPixelBlue(q,ClampToQuantum(gamma*result.blue));
SetPixelOpacity(q,ClampToQuantum(result.opacity));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(q_indexes+y,ClampToQuantum(gamma*result.index));
}
/* Count up changed pixels */
if ( ( p[r].red != GetPixelRed(q))
|| ( p[r].green != GetPixelGreen(q))
|| ( p[r].blue != GetPixelBlue(q))
|| ( (image->matte != MagickFalse) &&
(p[r].opacity != GetPixelOpacity(q)))
|| ( (image->colorspace == CMYKColorspace) &&
(GetPixelIndex(p_indexes+y+r) != GetPixelIndex(q_indexes+y))) )
changes[id]++;
p++;
q++;
} /* y */
if ( SyncCacheViewAuthenticPixels(q_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_MorphologyPrimitive)
#endif
proceed=SetImageProgress(image,MorphologyTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
} /* x */
result_image->type=image->type;
q_view=DestroyCacheView(q_view);
p_view=DestroyCacheView(p_view);
for (i=0; i < (ssize_t) GetOpenMPMaximumThreads(); i++)
changed+=changes[i];
changes=(size_t *) RelinquishMagickMemory(changes);
return(status ? (ssize_t) changed : 0);
}
/*
** Normal handling of horizontal or rectangular kernels (row by row)
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,result_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const PixelPacket
*magick_restrict p;
register const IndexPacket
*magick_restrict p_indexes;
register PixelPacket
*magick_restrict q;
register IndexPacket
*magick_restrict q_indexes;
register ssize_t
x;
size_t
r;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(p_view, -offx, y-offy, virt_width,
kernel->height, exception);
q=GetCacheViewAuthenticPixels(q_view,0,y,result_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
p_indexes=GetCacheViewVirtualIndexQueue(p_view);
q_indexes=GetCacheViewAuthenticIndexQueue(q_view);
/* offset to origin in 'p'. while 'q' points to it directly */
r = virt_width*offy + offx;
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
v;
register ssize_t
u;
register const double
*magick_restrict k;
register const PixelPacket
*magick_restrict k_pixels;
register const IndexPacket
*magick_restrict k_indexes;
DoublePixelPacket
result,
min,
max;
/* Copy input image to the output image for unused channels
* This removes need for 'cloning' a new image every iteration
*/
*q = p[r];
if (image->colorspace == CMYKColorspace)
SetPixelIndex(q_indexes+x,GetPixelIndex(p_indexes+x+r));
/* Defaults */
min.red =
min.green =
min.blue =
min.opacity =
min.index = (double) QuantumRange;
max.red =
max.green =
max.blue =
max.opacity =
max.index = 0.0;
/* default result is the original pixel value */
result.red = (double) p[r].red;
result.green = (double) p[r].green;
result.blue = (double) p[r].blue;
result.opacity = QuantumRange - (double) p[r].opacity;
result.index = 0.0;
if ( image->colorspace == CMYKColorspace)
result.index = (double) GetPixelIndex(p_indexes+x+r);
switch (method) {
case ConvolveMorphology:
/* Set the bias of the weighted average output */
result.red =
result.green =
result.blue =
result.opacity =
result.index = bias;
break;
case DilateIntensityMorphology:
case ErodeIntensityMorphology:
/* use a boolean flag indicating when first match found */
result.red = 0.0; /* result is not used otherwise */
break;
default:
break;
}
switch ( method ) {
case ConvolveMorphology:
/* Weighted Average of pixels using reflected kernel
**
** NOTE for correct working of this operation for asymetrical
** kernels, the kernel needs to be applied in its reflected form.
** That is its values needs to be reversed.
**
** Correlation is actually the same as this but without reflecting
** the kernel, and thus 'lower-level' that Convolution. However
** as Convolution is the more common method used, and it does not
** really cost us much in terms of processing to use a reflected
** kernel, so it is Convolution that is implemented.
**
** Correlation will have its kernel reflected before calling
** this function to do a Convolve.
**
** For more details of Correlation vs Convolution see
** http://www.cs.umd.edu/~djacobs/CMSC426/Convolution.pdf
*/
k = &kernel->values[ kernel->width*kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes+x;
if ( ((channel & SyncChannels) == 0 ) ||
(image->matte == MagickFalse) )
{ /* No 'Sync' involved.
** Convolution is simple greyscale channel operation
*/
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNaN(*k) ) continue;
result.red += (*k)*k_pixels[u].red;
result.green += (*k)*k_pixels[u].green;
result.blue += (*k)*k_pixels[u].blue;
result.opacity += (*k)*k_pixels[u].opacity;
if ( image->colorspace == CMYKColorspace)
result.index += (*k)*GetPixelIndex(k_indexes+u);
}
k_pixels += virt_width;
k_indexes += virt_width;
}
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum((MagickRealType) result.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum((MagickRealType) result.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum((MagickRealType) result.blue));
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
SetPixelOpacity(q,ClampToQuantum((MagickRealType) result.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(q_indexes+x,ClampToQuantum(result.index));
}
else
{ /* Channel 'Sync' Flag, and Alpha Channel enabled.
** Weight the color channels with Alpha Channel so that
** transparent pixels are not part of the results.
*/
double
alpha, /* alpha weighting for colors : alpha */
gamma; /* divisor, sum of color alpha weighting */
size_t
count; /* alpha valus collected, number kernel values */
count=0;
gamma=0.0;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNaN(*k) ) continue;
alpha=QuantumScale*(QuantumRange-k_pixels[u].opacity);
count++; /* number of alpha values collected */
alpha*=(*k); /* include kernel weighting now */
gamma += alpha; /* normalize alpha weights only */
result.red += alpha*k_pixels[u].red;
result.green += alpha*k_pixels[u].green;
result.blue += alpha*k_pixels[u].blue;
result.opacity += (*k)*k_pixels[u].opacity;
if ( image->colorspace == CMYKColorspace)
result.index+=alpha*GetPixelIndex(k_indexes+u);
}
k_pixels += virt_width;
k_indexes += virt_width;
}
/* Sync'ed channels, all channels are modified */
gamma=PerceptibleReciprocal(gamma);
if (count != 0)
gamma*=(double) kernel->height*kernel->width/count;
SetPixelRed(q,ClampToQuantum((MagickRealType) (gamma*result.red)));
SetPixelGreen(q,ClampToQuantum((MagickRealType) (gamma*result.green)));
SetPixelBlue(q,ClampToQuantum((MagickRealType) (gamma*result.blue)));
SetPixelOpacity(q,ClampToQuantum(result.opacity));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(q_indexes+x,ClampToQuantum((MagickRealType) (gamma*
result.index)));
}
break;
case ErodeMorphology:
/* Minimum Value within kernel neighbourhood
**
** NOTE that the kernel is not reflected for this operation!
**
** NOTE: in normal Greyscale Morphology, the kernel value should
** be added to the real value, this is currently not done, due to
** the nature of the boolean kernels being used.
*/
k = kernel->values;
k_pixels = p;
k_indexes = p_indexes+x;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k++) {
if ( IsNaN(*k) || (*k) < 0.5 ) continue;
Minimize(min.red, (double) k_pixels[u].red);
Minimize(min.green, (double) k_pixels[u].green);
Minimize(min.blue, (double) k_pixels[u].blue);
Minimize(min.opacity,
QuantumRange-(double) k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(min.index,(double) GetPixelIndex(k_indexes+u));
}
k_pixels += virt_width;
k_indexes += virt_width;
}
break;
case DilateMorphology:
/* Maximum Value within kernel neighbourhood
**
** NOTE for correct working of this operation for asymetrical
** kernels, the kernel needs to be applied in its reflected form.
** That is its values needs to be reversed.
**
** NOTE: in normal Greyscale Morphology, the kernel value should
** be added to the real value, this is currently not done, due to
** the nature of the boolean kernels being used.
**
*/
k = &kernel->values[ kernel->width*kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes+x;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNaN(*k) || (*k) < 0.5 ) continue;
Maximize(max.red, (double) k_pixels[u].red);
Maximize(max.green, (double) k_pixels[u].green);
Maximize(max.blue, (double) k_pixels[u].blue);
Maximize(max.opacity,
QuantumRange-(double) k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Maximize(max.index, (double) GetPixelIndex(
k_indexes+u));
}
k_pixels += virt_width;
k_indexes += virt_width;
}
break;
case HitAndMissMorphology:
case ThinningMorphology:
case ThickenMorphology:
/* Minimum of Foreground Pixel minus Maxumum of Background Pixels
**
** NOTE that the kernel is not reflected for this operation,
** and consists of both foreground and background pixel
** neighbourhoods, 0.0 for background, and 1.0 for foreground
** with either Nan or 0.5 values for don't care.
**
** Note that this will never produce a meaningless negative
** result. Such results can cause Thinning/Thicken to not work
** correctly when used against a greyscale image.
*/
k = kernel->values;
k_pixels = p;
k_indexes = p_indexes+x;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k++) {
if ( IsNaN(*k) ) continue;
if ( (*k) > 0.7 )
{ /* minimim of foreground pixels */
Minimize(min.red, (double) k_pixels[u].red);
Minimize(min.green, (double) k_pixels[u].green);
Minimize(min.blue, (double) k_pixels[u].blue);
Minimize(min.opacity,
QuantumRange-(double) k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(min.index,(double) GetPixelIndex(
k_indexes+u));
}
else if ( (*k) < 0.3 )
{ /* maximum of background pixels */
Maximize(max.red, (double) k_pixels[u].red);
Maximize(max.green, (double) k_pixels[u].green);
Maximize(max.blue, (double) k_pixels[u].blue);
Maximize(max.opacity,
QuantumRange-(double) k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Maximize(max.index, (double) GetPixelIndex(
k_indexes+u));
}
}
k_pixels += virt_width;
k_indexes += virt_width;
}
/* Pattern Match if difference is positive */
min.red -= max.red; Maximize( min.red, 0.0 );
min.green -= max.green; Maximize( min.green, 0.0 );
min.blue -= max.blue; Maximize( min.blue, 0.0 );
min.opacity -= max.opacity; Maximize( min.opacity, 0.0 );
min.index -= max.index; Maximize( min.index, 0.0 );
break;
case ErodeIntensityMorphology:
/* Select Pixel with Minimum Intensity within kernel neighbourhood
**
** WARNING: the intensity test fails for CMYK and does not
** take into account the moderating effect of the alpha channel
** on the intensity.
**
** NOTE that the kernel is not reflected for this operation!
*/
k = kernel->values;
k_pixels = p;
k_indexes = p_indexes+x;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k++) {
if ( IsNaN(*k) || (*k) < 0.5 ) continue;
if ( result.red == 0.0 ||
GetPixelIntensity(image,&(k_pixels[u])) < GetPixelIntensity(result_image,q) ) {
/* copy the whole pixel - no channel selection */
*q = k_pixels[u];
if ( result.red > 0.0 ) changes[id]++;
result.red = 1.0;
}
}
k_pixels += virt_width;
k_indexes += virt_width;
}
break;
case DilateIntensityMorphology:
/* Select Pixel with Maximum Intensity within kernel neighbourhood
**
** WARNING: the intensity test fails for CMYK and does not
** take into account the moderating effect of the alpha channel
** on the intensity (yet).
**
** NOTE for correct working of this operation for asymetrical
** kernels, the kernel needs to be applied in its reflected form.
** That is its values needs to be reversed.
*/
k = &kernel->values[ kernel->width*kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes+x;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNaN(*k) || (*k) < 0.5 ) continue; /* boolean kernel */
if ( result.red == 0.0 ||
GetPixelIntensity(image,&(k_pixels[u])) > GetPixelIntensity(result_image,q) ) {
/* copy the whole pixel - no channel selection */
*q = k_pixels[u];
if ( result.red > 0.0 ) changes[id]++;
result.red = 1.0;
}
}
k_pixels += virt_width;
k_indexes += virt_width;
}
break;
case IterativeDistanceMorphology:
/* Work out an iterative distance from black edge of a white image
** shape. Essentually white values are decreased to the smallest
** 'distance from edge' it can find.
**
** It works by adding kernel values to the neighbourhood, and and
** select the minimum value found. The kernel is rotated before
** use, so kernel distances match resulting distances, when a user
** provided asymmetric kernel is applied.
**
**
** This code is almost identical to True GrayScale Morphology But
** not quite.
**
** GreyDilate Kernel values added, maximum value found Kernel is
** rotated before use.
**
** GrayErode: Kernel values subtracted and minimum value found No
** kernel rotation used.
**
** Note the the Iterative Distance method is essentially a
** GrayErode, but with negative kernel values, and kernel
** rotation applied.
*/
k = &kernel->values[ kernel->width*kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes+x;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNaN(*k) ) continue;
Minimize(result.red, (*k)+k_pixels[u].red);
Minimize(result.green, (*k)+k_pixels[u].green);
Minimize(result.blue, (*k)+k_pixels[u].blue);
Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(result.index,(*k)+GetPixelIndex(k_indexes+u));
}
k_pixels += virt_width;
k_indexes += virt_width;
}
break;
case UndefinedMorphology:
default:
break; /* Do nothing */
}
/* Final mathematics of results (combine with original image?)
**
** NOTE: Difference Morphology operators Edge* and *Hat could also
** be done here but works better with iteration as a image difference
** in the controlling function (below). Thicken and Thinning however
** should be done here so thay can be iterated correctly.
*/
switch ( method ) {
case HitAndMissMorphology:
case ErodeMorphology:
result = min; /* minimum of neighbourhood */
break;
case DilateMorphology:
result = max; /* maximum of neighbourhood */
break;
case ThinningMorphology:
/* subtract pattern match from original */
result.red -= min.red;
result.green -= min.green;
result.blue -= min.blue;
result.opacity -= min.opacity;
result.index -= min.index;
break;
case ThickenMorphology:
/* Add the pattern matchs to the original */
result.red += min.red;
result.green += min.green;
result.blue += min.blue;
result.opacity += min.opacity;
result.index += min.index;
break;
default:
/* result directly calculated or assigned */
break;
}
/* Assign the resulting pixel values - Clamping Result */
switch ( method ) {
case UndefinedMorphology:
case ConvolveMorphology:
case DilateIntensityMorphology:
case ErodeIntensityMorphology:
break; /* full pixel was directly assigned - not a channel method */
default:
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(result.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(result.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(result.blue));
if ((channel & OpacityChannel) != 0
&& image->matte != MagickFalse )
SetPixelAlpha(q,ClampToQuantum(result.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(q_indexes+x,ClampToQuantum(result.index));
break;
}
/* Count up changed pixels */
if ( ( p[r].red != GetPixelRed(q) )
|| ( p[r].green != GetPixelGreen(q) )
|| ( p[r].blue != GetPixelBlue(q) )
|| ( (image->matte != MagickFalse) &&
(p[r].opacity != GetPixelOpacity(q)))
|| ( (image->colorspace == CMYKColorspace) &&
(GetPixelIndex(p_indexes+x+r) != GetPixelIndex(q_indexes+x))) )
changes[id]++;
p++;
q++;
} /* x */
if ( SyncCacheViewAuthenticPixels(q_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_MorphologyPrimitive)
#endif
proceed=SetImageProgress(image,MorphologyTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
} /* y */
q_view=DestroyCacheView(q_view);
p_view=DestroyCacheView(p_view);
for (i=0; i < (ssize_t) GetOpenMPMaximumThreads(); i++)
changed+=changes[i];
changes=(size_t *) RelinquishMagickMemory(changes);
return(status ? (ssize_t)changed : -1);
}
/* This is almost identical to the MorphologyPrimative() function above,
** but will apply the primitive directly to the actual image using two
** passes, once in each direction, with the results of the previous (and
** current) row being re-used.
**
** That is after each row is 'Sync'ed' into the image, the next row will
** make use of those values as part of the calculation of the next row.
** It then repeats, but going in the oppisite (bottom-up) direction.
**
** Because of this 're-use of results' this function can not make use
** of multi-threaded, parellel processing.
*/
static ssize_t MorphologyPrimitiveDirect(Image *image,
const MorphologyMethod method, const ChannelType channel,
const KernelInfo *kernel,ExceptionInfo *exception)
{
CacheView
*auth_view,
*virt_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y, offx, offy;
size_t
changed,
virt_width;
status=MagickTrue;
changed=0;
progress=0;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(kernel != (KernelInfo *) NULL);
assert(kernel->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
/* Some methods (including convolve) needs use a reflected kernel.
* Adjust 'origin' offsets to loop though kernel as a reflection.
*/
offx = kernel->x;
offy = kernel->y;
switch(method) {
case DistanceMorphology:
case VoronoiMorphology:
/* kernel needs to used with reflection about origin */
offx = (ssize_t) kernel->width-offx-1;
offy = (ssize_t) kernel->height-offy-1;
break;
#if 0
case ?????Morphology:
/* kernel is used as is, without reflection */
break;
#endif
default:
assert("Not a PrimativeDirect Morphology Method" != (char *) NULL);
break;
}
/* DO NOT THREAD THIS CODE! */
/* two views into same image (virtual, and actual) */
virt_view=AcquireVirtualCacheView(image,exception);
auth_view=AcquireAuthenticCacheView(image,exception);
virt_width=image->columns+kernel->width-1;
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register const IndexPacket
*magick_restrict p_indexes;
register PixelPacket
*magick_restrict q;
register IndexPacket
*magick_restrict q_indexes;
register ssize_t
x;
ssize_t
r;
/* NOTE read virtual pixels, and authentic pixels, from the same image!
** we read using virtual to get virtual pixel handling, but write back
** into the same image.
**
** Only top half of kernel is processed as we do a single pass downward
** through the image iterating the distance function as we go.
*/
if (status == MagickFalse)
break;
p=GetCacheViewVirtualPixels(virt_view, -offx, y-offy, virt_width, (size_t) offy+1,
exception);
q=GetCacheViewAuthenticPixels(auth_view, 0, y, image->columns, 1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
status=MagickFalse;
if (status == MagickFalse)
break;
p_indexes=GetCacheViewVirtualIndexQueue(virt_view);
q_indexes=GetCacheViewAuthenticIndexQueue(auth_view);
/* offset to origin in 'p'. while 'q' points to it directly */
r = (ssize_t) virt_width*offy + offx;
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
v;
register ssize_t
u;
register const double
*magick_restrict k;
register const PixelPacket
*magick_restrict k_pixels;
register const IndexPacket
*magick_restrict k_indexes;
MagickPixelPacket
result;
/* Starting Defaults */
GetMagickPixelPacket(image,&result);
SetMagickPixelPacket(image,q,q_indexes,&result);
if ( method != VoronoiMorphology )
result.opacity = QuantumRange - result.opacity;
switch ( method ) {
case DistanceMorphology:
/* Add kernel Value and select the minimum value found. */
k = &kernel->values[ kernel->width*kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes+x;
for (v=0; v <= (ssize_t) offy; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNaN(*k) ) continue;
Minimize(result.red, (*k)+k_pixels[u].red);
Minimize(result.green, (*k)+k_pixels[u].green);
Minimize(result.blue, (*k)+k_pixels[u].blue);
Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(result.index, (*k)+GetPixelIndex(k_indexes+u));
}
k_pixels += virt_width;
k_indexes += virt_width;
}
/* repeat with the just processed pixels of this row */
k = &kernel->values[ kernel->width*(kernel->y+1)-1 ];
k_pixels = q-offx;
k_indexes = q_indexes-offx;
for (u=0; u < (ssize_t) offx; u++, k--) {
if ( x+u-offx < 0 ) continue; /* off the edge! */
if ( IsNaN(*k) ) continue;
Minimize(result.red, (*k)+k_pixels[u].red);
Minimize(result.green, (*k)+k_pixels[u].green);
Minimize(result.blue, (*k)+k_pixels[u].blue);
Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(result.index, (*k)+GetPixelIndex(k_indexes+u));
}
break;
case VoronoiMorphology:
/* Apply Distance to 'Matte' channel, while coping the color
** values of the closest pixel.
**
** This is experimental, and realy the 'alpha' component should
** be completely separate 'masking' channel so that alpha can
** also be used as part of the results.
*/
k = &kernel->values[ kernel->width*kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes+x;
for (v=0; v <= (ssize_t) offy; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNaN(*k) ) continue;
if( result.opacity > (*k)+k_pixels[u].opacity )
{
SetMagickPixelPacket(image,&k_pixels[u],&k_indexes[u],
&result);
result.opacity += *k;
}
}
k_pixels += virt_width;
k_indexes += virt_width;
}
/* repeat with the just processed pixels of this row */
k = &kernel->values[ kernel->width*(kernel->y+1)-1 ];
k_pixels = q-offx;
k_indexes = q_indexes-offx;
for (u=0; u < (ssize_t) offx; u++, k--) {
if ( x+u-offx < 0 ) continue; /* off the edge! */
if ( IsNaN(*k) ) continue;
if( result.opacity > (*k)+k_pixels[u].opacity )
{
SetMagickPixelPacket(image,&k_pixels[u],&k_indexes[u],
&result);
result.opacity += *k;
}
}
break;
default:
/* result directly calculated or assigned */
break;
}
/* Assign the resulting pixel values - Clamping Result */
switch ( method ) {
case VoronoiMorphology:
SetPixelPacket(image,&result,q,q_indexes);
break;
default:
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(result.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(result.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(result.blue));
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
SetPixelAlpha(q,ClampToQuantum(result.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(q_indexes+x,ClampToQuantum(result.index));
break;
}
/* Count up changed pixels */
if ( ( p[r].red != GetPixelRed(q) )
|| ( p[r].green != GetPixelGreen(q) )
|| ( p[r].blue != GetPixelBlue(q) )
|| ( (image->matte != MagickFalse) &&
(p[r].opacity != GetPixelOpacity(q)))
|| ( (image->colorspace == CMYKColorspace) &&
(GetPixelIndex(p_indexes+x+r) != GetPixelIndex(q_indexes+x))) )
changed++; /* The pixel was changed in some way! */
p++; /* increment pixel buffers */
q++;
} /* x */
if ( SyncCacheViewAuthenticPixels(auth_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
if ( SetImageProgress(image,MorphologyTag,progress++,image->rows)
== MagickFalse )
status=MagickFalse;
} /* y */
/* Do the reversed pass through the image */
for (y=(ssize_t)image->rows-1; y >= 0; y--)
{
register const PixelPacket
*magick_restrict p;
register const IndexPacket
*magick_restrict p_indexes;
register PixelPacket
*magick_restrict q;
register IndexPacket
*magick_restrict q_indexes;
register ssize_t
x;
ssize_t
r;
if (status == MagickFalse)
break;
/* NOTE read virtual pixels, and authentic pixels, from the same image!
** we read using virtual to get virtual pixel handling, but write back
** into the same image.
**
** Only the bottom half of the kernel will be processes as we
** up the image.
*/
p=GetCacheViewVirtualPixels(virt_view, -offx, y, virt_width, (size_t) kernel->y+1,
exception);
q=GetCacheViewAuthenticPixels(auth_view, 0, y, image->columns, 1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
status=MagickFalse;
if (status == MagickFalse)
break;
p_indexes=GetCacheViewVirtualIndexQueue(virt_view);
q_indexes=GetCacheViewAuthenticIndexQueue(auth_view);
/* adjust positions to end of row */
p += image->columns-1;
q += image->columns-1;
/* offset to origin in 'p'. while 'q' points to it directly */
r = offx;
for (x=(ssize_t)image->columns-1; x >= 0; x--)
{
ssize_t
v;
register ssize_t
u;
register const double
*magick_restrict k;
register const PixelPacket
*magick_restrict k_pixels;
register const IndexPacket
*magick_restrict k_indexes;
MagickPixelPacket
result;
/* Default - previously modified pixel */
GetMagickPixelPacket(image,&result);
SetMagickPixelPacket(image,q,q_indexes,&result);
if ( method != VoronoiMorphology )
result.opacity = QuantumRange - result.opacity;
switch ( method ) {
case DistanceMorphology:
/* Add kernel Value and select the minimum value found. */
k = &kernel->values[ kernel->width*(kernel->y+1)-1 ];
k_pixels = p;
k_indexes = p_indexes+x;
for (v=offy; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNaN(*k) ) continue;
Minimize(result.red, (*k)+k_pixels[u].red);
Minimize(result.green, (*k)+k_pixels[u].green);
Minimize(result.blue, (*k)+k_pixels[u].blue);
Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(result.index,(*k)+GetPixelIndex(k_indexes+u));
}
k_pixels += virt_width;
k_indexes += virt_width;
}
/* repeat with the just processed pixels of this row */
k = &kernel->values[ kernel->width*(kernel->y)+kernel->x-1 ];
k_pixels = q-offx;
k_indexes = q_indexes-offx;
for (u=offx+1; u < (ssize_t) kernel->width; u++, k--) {
if ( (x+u-offx) >= (ssize_t)image->columns ) continue;
if ( IsNaN(*k) ) continue;
Minimize(result.red, (*k)+k_pixels[u].red);
Minimize(result.green, (*k)+k_pixels[u].green);
Minimize(result.blue, (*k)+k_pixels[u].blue);
Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(result.index, (*k)+GetPixelIndex(k_indexes+u));
}
break;
case VoronoiMorphology:
/* Apply Distance to 'Matte' channel, coping the closest color.
**
** This is experimental, and realy the 'alpha' component should
** be completely separate 'masking' channel.
*/
k = &kernel->values[ kernel->width*(kernel->y+1)-1 ];
k_pixels = p;
k_indexes = p_indexes+x;
for (v=offy; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNaN(*k) ) continue;
if( result.opacity > (*k)+k_pixels[u].opacity )
{
SetMagickPixelPacket(image,&k_pixels[u],&k_indexes[u],
&result);
result.opacity += *k;
}
}
k_pixels += virt_width;
k_indexes += virt_width;
}
/* repeat with the just processed pixels of this row */
k = &kernel->values[ kernel->width*(kernel->y)+kernel->x-1 ];
k_pixels = q-offx;
k_indexes = q_indexes-offx;
for (u=offx+1; u < (ssize_t) kernel->width; u++, k--) {
if ( (x+u-offx) >= (ssize_t)image->columns ) continue;
if ( IsNaN(*k) ) continue;
if( result.opacity > (*k)+k_pixels[u].opacity )
{
SetMagickPixelPacket(image,&k_pixels[u],&k_indexes[u],
&result);
result.opacity += *k;
}
}
break;
default:
/* result directly calculated or assigned */
break;
}
/* Assign the resulting pixel values - Clamping Result */
switch ( method ) {
case VoronoiMorphology:
SetPixelPacket(image,&result,q,q_indexes);
break;
default:
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(result.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(result.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(result.blue));
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
SetPixelAlpha(q,ClampToQuantum(result.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(q_indexes+x,ClampToQuantum(result.index));
break;
}
/* Count up changed pixels */
if ( ( p[r].red != GetPixelRed(q) )
|| ( p[r].green != GetPixelGreen(q) )
|| ( p[r].blue != GetPixelBlue(q) )
|| ( (image->matte != MagickFalse) &&
(p[r].opacity != GetPixelOpacity(q)))
|| ( (image->colorspace == CMYKColorspace) &&
(GetPixelIndex(p_indexes+x+r) != GetPixelIndex(q_indexes+x))) )
changed++; /* The pixel was changed in some way! */
p--; /* go backward through pixel buffers */
q--;
} /* x */
if ( SyncCacheViewAuthenticPixels(auth_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
if ( SetImageProgress(image,MorphologyTag,progress++,image->rows)
== MagickFalse )
status=MagickFalse;
} /* y */
auth_view=DestroyCacheView(auth_view);
virt_view=DestroyCacheView(virt_view);
return(status ? (ssize_t) changed : -1);
}
/* Apply a Morphology by calling one of the above low level primitive
** application functions. This function handles any iteration loops,
** composition or re-iteration of results, and compound morphology methods
** that is based on multiple low-level (staged) morphology methods.
**
** Basically this provides the complex grue between the requested morphology
** method and raw low-level implementation (above).
*/
MagickExport Image *MorphologyApply(const Image *image, const ChannelType
channel,const MorphologyMethod method, const ssize_t iterations,
const KernelInfo *kernel, const CompositeOperator compose,
const double bias, ExceptionInfo *exception)
{
CompositeOperator
curr_compose;
Image
*curr_image, /* Image we are working with or iterating */
*work_image, /* secondary image for primitive iteration */
*save_image, /* saved image - for 'edge' method only */
*rslt_image; /* resultant image - after multi-kernel handling */
KernelInfo
*reflected_kernel, /* A reflected copy of the kernel (if needed) */
*norm_kernel, /* the current normal un-reflected kernel */
*rflt_kernel, /* the current reflected kernel (if needed) */
*this_kernel; /* the kernel being applied */
MorphologyMethod
primitive; /* the current morphology primitive being applied */
CompositeOperator
rslt_compose; /* multi-kernel compose method for results to use */
MagickBooleanType
special, /* do we use a direct modify function? */
verbose; /* verbose output of results */
size_t
method_loop, /* Loop 1: number of compound method iterations (norm 1) */
method_limit, /* maximum number of compound method iterations */
kernel_number, /* Loop 2: the kernel number being applied */
stage_loop, /* Loop 3: primitive loop for compound morphology */
stage_limit, /* how many primitives are in this compound */
kernel_loop, /* Loop 4: iterate the kernel over image */
kernel_limit, /* number of times to iterate kernel */
count, /* total count of primitive steps applied */
kernel_changed, /* total count of changed using iterated kernel */
method_changed; /* total count of changed over method iteration */
ssize_t
changed; /* number pixels changed by last primitive operation */
char
v_info[MaxTextExtent];
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(kernel != (KernelInfo *) NULL);
assert(kernel->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
count = 0; /* number of low-level morphology primitives performed */
if ( iterations == 0 )
return((Image *) NULL); /* null operation - nothing to do! */
kernel_limit = (size_t) iterations;
if ( iterations < 0 ) /* negative interations = infinite (well alomst) */
kernel_limit = image->columns>image->rows ? image->columns : image->rows;
verbose = IsMagickTrue(GetImageArtifact(image,"debug"));
/* initialise for cleanup */
curr_image = (Image *) image;
curr_compose = image->compose;
(void) curr_compose;
work_image = save_image = rslt_image = (Image *) NULL;
reflected_kernel = (KernelInfo *) NULL;
/* Initialize specific methods
* + which loop should use the given iteratations
* + how many primitives make up the compound morphology
* + multi-kernel compose method to use (by default)
*/
method_limit = 1; /* just do method once, unless otherwise set */
stage_limit = 1; /* assume method is not a compound */
special = MagickFalse; /* assume it is NOT a direct modify primitive */
rslt_compose = compose; /* and we are composing multi-kernels as given */
switch( method ) {
case SmoothMorphology: /* 4 primitive compound morphology */
stage_limit = 4;
break;
case OpenMorphology: /* 2 primitive compound morphology */
case OpenIntensityMorphology:
case TopHatMorphology:
case CloseMorphology:
case CloseIntensityMorphology:
case BottomHatMorphology:
case EdgeMorphology:
stage_limit = 2;
break;
case HitAndMissMorphology:
rslt_compose = LightenCompositeOp; /* Union of multi-kernel results */
/* FALL THUR */
case ThinningMorphology:
case ThickenMorphology:
method_limit = kernel_limit; /* iterate the whole method */
kernel_limit = 1; /* do not do kernel iteration */
break;
case DistanceMorphology:
case VoronoiMorphology:
special = MagickTrue; /* use special direct primative */
break;
default:
break;
}
/* Apply special methods with special requirments
** For example, single run only, or post-processing requirements
*/
if ( special != MagickFalse )
{
rslt_image=CloneImage(image,0,0,MagickTrue,exception);
if (rslt_image == (Image *) NULL)
goto error_cleanup;
if (SetImageStorageClass(rslt_image,DirectClass) == MagickFalse)
{
InheritException(exception,&rslt_image->exception);
goto error_cleanup;
}
changed = MorphologyPrimitiveDirect(rslt_image, method,
channel, kernel, exception);
if ( verbose != MagickFalse )
(void) (void) FormatLocaleFile(stderr,
"%s:%.20g.%.20g #%.20g => Changed %.20g\n",
CommandOptionToMnemonic(MagickMorphologyOptions, method),
1.0,0.0,1.0, (double) changed);
if ( changed < 0 )
goto error_cleanup;
if ( method == VoronoiMorphology ) {
/* Preserve the alpha channel of input image - but turned off */
(void) SetImageAlphaChannel(rslt_image, DeactivateAlphaChannel);
(void) CompositeImageChannel(rslt_image, DefaultChannels,
CopyOpacityCompositeOp, image, 0, 0);
(void) SetImageAlphaChannel(rslt_image, DeactivateAlphaChannel);
}
goto exit_cleanup;
}
/* Handle user (caller) specified multi-kernel composition method */
if ( compose != UndefinedCompositeOp )
rslt_compose = compose; /* override default composition for method */
if ( rslt_compose == UndefinedCompositeOp )
rslt_compose = NoCompositeOp; /* still not defined! Then re-iterate */
/* Some methods require a reflected kernel to use with primitives.
* Create the reflected kernel for those methods. */
switch ( method ) {
case CorrelateMorphology:
case CloseMorphology:
case CloseIntensityMorphology:
case BottomHatMorphology:
case SmoothMorphology:
reflected_kernel = CloneKernelInfo(kernel);
if (reflected_kernel == (KernelInfo *) NULL)
goto error_cleanup;
RotateKernelInfo(reflected_kernel,180);
break;
default:
break;
}
/* Loops around more primitive morpholgy methods
** erose, dilate, open, close, smooth, edge, etc...
*/
/* Loop 1: iterate the compound method */
method_loop = 0;
method_changed = 1;
while ( method_loop < method_limit && method_changed > 0 ) {
method_loop++;
method_changed = 0;
/* Loop 2: iterate over each kernel in a multi-kernel list */
norm_kernel = (KernelInfo *) kernel;
this_kernel = (KernelInfo *) kernel;
rflt_kernel = reflected_kernel;
kernel_number = 0;
while ( norm_kernel != NULL ) {
/* Loop 3: Compound Morphology Staging - Select Primative to apply */
stage_loop = 0; /* the compound morphology stage number */
while ( stage_loop < stage_limit ) {
stage_loop++; /* The stage of the compound morphology */
/* Select primitive morphology for this stage of compound method */
this_kernel = norm_kernel; /* default use unreflected kernel */
primitive = method; /* Assume method is a primitive */
switch( method ) {
case ErodeMorphology: /* just erode */
case EdgeInMorphology: /* erode and image difference */
primitive = ErodeMorphology;
break;
case DilateMorphology: /* just dilate */
case EdgeOutMorphology: /* dilate and image difference */
primitive = DilateMorphology;
break;
case OpenMorphology: /* erode then dialate */
case TopHatMorphology: /* open and image difference */
primitive = ErodeMorphology;
if ( stage_loop == 2 )
primitive = DilateMorphology;
break;
case OpenIntensityMorphology:
primitive = ErodeIntensityMorphology;
if ( stage_loop == 2 )
primitive = DilateIntensityMorphology;
break;
case CloseMorphology: /* dilate, then erode */
case BottomHatMorphology: /* close and image difference */
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = DilateMorphology;
if ( stage_loop == 2 )
primitive = ErodeMorphology;
break;
case CloseIntensityMorphology:
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = DilateIntensityMorphology;
if ( stage_loop == 2 )
primitive = ErodeIntensityMorphology;
break;
case SmoothMorphology: /* open, close */
switch ( stage_loop ) {
case 1: /* start an open method, which starts with Erode */
primitive = ErodeMorphology;
break;
case 2: /* now Dilate the Erode */
primitive = DilateMorphology;
break;
case 3: /* Reflect kernel a close */
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = DilateMorphology;
break;
case 4: /* Finish the Close */
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = ErodeMorphology;
break;
}
break;
case EdgeMorphology: /* dilate and erode difference */
primitive = DilateMorphology;
if ( stage_loop == 2 ) {
save_image = curr_image; /* save the image difference */
curr_image = (Image *) image;
primitive = ErodeMorphology;
}
break;
case CorrelateMorphology:
/* A Correlation is a Convolution with a reflected kernel.
** However a Convolution is a weighted sum using a reflected
** kernel. It may seem stange to convert a Correlation into a
** Convolution as the Correlation is the simplier method, but
** Convolution is much more commonly used, and it makes sense to
** implement it directly so as to avoid the need to duplicate the
** kernel when it is not required (which is typically the
** default).
*/
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = ConvolveMorphology;
break;
default:
break;
}
assert( this_kernel != (KernelInfo *) NULL );
/* Extra information for debugging compound operations */
if ( verbose != MagickFalse ) {
if ( stage_limit > 1 )
(void) FormatLocaleString(v_info,MaxTextExtent,"%s:%.20g.%.20g -> ",
CommandOptionToMnemonic(MagickMorphologyOptions,method),(double)
method_loop,(double) stage_loop);
else if ( primitive != method )
(void) FormatLocaleString(v_info, MaxTextExtent, "%s:%.20g -> ",
CommandOptionToMnemonic(MagickMorphologyOptions, method),(double)
method_loop);
else
v_info[0] = '\0';
}
/* Loop 4: Iterate the kernel with primitive */
kernel_loop = 0;
kernel_changed = 0;
changed = 1;
while ( kernel_loop < kernel_limit && changed > 0 ) {
kernel_loop++; /* the iteration of this kernel */
/* Create a clone as the destination image, if not yet defined */
if ( work_image == (Image *) NULL )
{
work_image=CloneImage(image,0,0,MagickTrue,exception);
if (work_image == (Image *) NULL)
goto error_cleanup;
if (SetImageStorageClass(work_image,DirectClass) == MagickFalse)
{
InheritException(exception,&work_image->exception);
goto error_cleanup;
}
/* work_image->type=image->type; ??? */
}
/* APPLY THE MORPHOLOGICAL PRIMITIVE (curr -> work) */
count++;
changed = MorphologyPrimitive(curr_image, work_image, primitive,
channel, this_kernel, bias, exception);
if ( verbose != MagickFalse ) {
if ( kernel_loop > 1 )
(void) FormatLocaleFile(stderr, "\n"); /* add end-of-line from previous */
(void) (void) FormatLocaleFile(stderr,
"%s%s%s:%.20g.%.20g #%.20g => Changed %.20g",
v_info,CommandOptionToMnemonic(MagickMorphologyOptions,
primitive),(this_kernel == rflt_kernel ) ? "*" : "",
(double) (method_loop+kernel_loop-1),(double) kernel_number,
(double) count,(double) changed);
}
if ( changed < 0 )
goto error_cleanup;
kernel_changed += changed;
method_changed += changed;
/* prepare next loop */
{ Image *tmp = work_image; /* swap images for iteration */
work_image = curr_image;
curr_image = tmp;
}
if ( work_image == image )
work_image = (Image *) NULL; /* replace input 'image' */
} /* End Loop 4: Iterate the kernel with primitive */
if ( verbose != MagickFalse && kernel_changed != (size_t)changed )
(void) FormatLocaleFile(stderr, " Total %.20g",(double) kernel_changed);
if ( verbose != MagickFalse && stage_loop < stage_limit )
(void) FormatLocaleFile(stderr, "\n"); /* add end-of-line before looping */
#if 0
(void) FormatLocaleFile(stderr, "--E-- image=0x%lx\n", (unsigned long)image);
(void) FormatLocaleFile(stderr, " curr =0x%lx\n", (unsigned long)curr_image);
(void) FormatLocaleFile(stderr, " work =0x%lx\n", (unsigned long)work_image);
(void) FormatLocaleFile(stderr, " save =0x%lx\n", (unsigned long)save_image);
(void) FormatLocaleFile(stderr, " union=0x%lx\n", (unsigned long)rslt_image);
#endif
} /* End Loop 3: Primative (staging) Loop for Coumpound Methods */
/* Final Post-processing for some Compound Methods
**
** The removal of any 'Sync' channel flag in the Image Compositon
** below ensures the methematical compose method is applied in a
** purely mathematical way, and only to the selected channels.
** Turn off SVG composition 'alpha blending'.
*/
switch( method ) {
case EdgeOutMorphology:
case EdgeInMorphology:
case TopHatMorphology:
case BottomHatMorphology:
if ( verbose != MagickFalse )
(void) FormatLocaleFile(stderr, "\n%s: Difference with original image",
CommandOptionToMnemonic(MagickMorphologyOptions, method) );
(void) CompositeImageChannel(curr_image,
(ChannelType) (channel & ~SyncChannels),
DifferenceCompositeOp, image, 0, 0);
break;
case EdgeMorphology:
if ( verbose != MagickFalse )
(void) FormatLocaleFile(stderr, "\n%s: Difference of Dilate and Erode",
CommandOptionToMnemonic(MagickMorphologyOptions, method) );
(void) CompositeImageChannel(curr_image,
(ChannelType) (channel & ~SyncChannels),
DifferenceCompositeOp, save_image, 0, 0);
save_image = DestroyImage(save_image); /* finished with save image */
break;
default:
break;
}
/* multi-kernel handling: re-iterate, or compose results */
if ( kernel->next == (KernelInfo *) NULL )
rslt_image = curr_image; /* just return the resulting image */
else if ( rslt_compose == NoCompositeOp )
{ if ( verbose != MagickFalse ) {
if ( this_kernel->next != (KernelInfo *) NULL )
(void) FormatLocaleFile(stderr, " (re-iterate)");
else
(void) FormatLocaleFile(stderr, " (done)");
}
rslt_image = curr_image; /* return result, and re-iterate */
}
else if ( rslt_image == (Image *) NULL)
{ if ( verbose != MagickFalse )
(void) FormatLocaleFile(stderr, " (save for compose)");
rslt_image = curr_image;
curr_image = (Image *) image; /* continue with original image */
}
else
{ /* Add the new 'current' result to the composition
**
** The removal of any 'Sync' channel flag in the Image Compositon
** below ensures the methematical compose method is applied in a
** purely mathematical way, and only to the selected channels.
** IE: Turn off SVG composition 'alpha blending'.
*/
if ( verbose != MagickFalse )
(void) FormatLocaleFile(stderr, " (compose \"%s\")",
CommandOptionToMnemonic(MagickComposeOptions, rslt_compose) );
(void) CompositeImageChannel(rslt_image,
(ChannelType) (channel & ~SyncChannels), rslt_compose,
curr_image, 0, 0);
curr_image = DestroyImage(curr_image);
curr_image = (Image *) image; /* continue with original image */
}
if ( verbose != MagickFalse )
(void) FormatLocaleFile(stderr, "\n");
/* loop to the next kernel in a multi-kernel list */
norm_kernel = norm_kernel->next;
if ( rflt_kernel != (KernelInfo *) NULL )
rflt_kernel = rflt_kernel->next;
kernel_number++;
} /* End Loop 2: Loop over each kernel */
} /* End Loop 1: compound method interation */
goto exit_cleanup;
/* Yes goto's are bad, but it makes cleanup lot more efficient */
error_cleanup:
if ( curr_image == rslt_image )
curr_image = (Image *) NULL;
if ( rslt_image != (Image *) NULL )
rslt_image = DestroyImage(rslt_image);
exit_cleanup:
if ( curr_image == rslt_image || curr_image == image )
curr_image = (Image *) NULL;
if ( curr_image != (Image *) NULL )
curr_image = DestroyImage(curr_image);
if ( work_image != (Image *) NULL )
work_image = DestroyImage(work_image);
if ( save_image != (Image *) NULL )
save_image = DestroyImage(save_image);
if ( reflected_kernel != (KernelInfo *) NULL )
reflected_kernel = DestroyKernelInfo(reflected_kernel);
return(rslt_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o r p h o l o g y I m a g e C h a n n e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MorphologyImageChannel() applies a user supplied kernel to the image
% according to the given mophology method.
%
% This function applies any and all user defined settings before calling
% the above internal function MorphologyApply().
%
% User defined settings include...
% * Output Bias for Convolution and correlation ("-bias"
or "-define convolve:bias=??")
% * Kernel Scale/normalize settings ("-set 'option:convolve:scale'")
% This can also includes the addition of a scaled unity kernel.
% * Show Kernel being applied ("-set option:showKernel 1")
%
% The format of the MorphologyImage method is:
%
% Image *MorphologyImage(const Image *image,MorphologyMethod method,
% const ssize_t iterations,KernelInfo *kernel,ExceptionInfo *exception)
%
% Image *MorphologyImageChannel(const Image *image, const ChannelType
% channel,MorphologyMethod method,const ssize_t iterations,
% KernelInfo *kernel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o method: the morphology method to be applied.
%
% o iterations: apply the operation this many times (or no change).
% A value of -1 means loop until no change found.
% How this is applied may depend on the morphology method.
% Typically this is a value of 1.
%
% o channel: the channel type.
%
% o kernel: An array of double representing the morphology kernel.
% Warning: kernel may be normalized for the Convolve method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MorphologyImage(const Image *image,
const MorphologyMethod method,const ssize_t iterations,
const KernelInfo *kernel,ExceptionInfo *exception)
{
Image
*morphology_image;
morphology_image=MorphologyImageChannel(image,DefaultChannels,method,
iterations,kernel,exception);
return(morphology_image);
}
MagickExport Image *MorphologyImageChannel(const Image *image,
const ChannelType channel,const MorphologyMethod method,
const ssize_t iterations,const KernelInfo *kernel,ExceptionInfo *exception)
{
KernelInfo
*curr_kernel;
CompositeOperator
compose;
double
bias;
Image
*morphology_image;
/* Apply Convolve/Correlate Normalization and Scaling Factors.
* This is done BEFORE the ShowKernelInfo() function is called so that
* users can see the results of the 'option:convolve:scale' option.
*/
curr_kernel = (KernelInfo *) kernel;
bias=image->bias;
if ((method == ConvolveMorphology) || (method == CorrelateMorphology))
{
const char
*artifact;
artifact = GetImageArtifact(image,"convolve:bias");
if (artifact != (const char *) NULL)
bias=StringToDoubleInterval(artifact,(double) QuantumRange+1.0);
artifact = GetImageArtifact(image,"convolve:scale");
if ( artifact != (const char *) NULL ) {
if ( curr_kernel == kernel )
curr_kernel = CloneKernelInfo(kernel);
if (curr_kernel == (KernelInfo *) NULL) {
curr_kernel=DestroyKernelInfo(curr_kernel);
return((Image *) NULL);
}
ScaleGeometryKernelInfo(curr_kernel, artifact);
}
}
/* display the (normalized) kernel via stderr */
if ( IsMagickTrue(GetImageArtifact(image,"showKernel"))
|| IsMagickTrue(GetImageArtifact(image,"convolve:showKernel"))
|| IsMagickTrue(GetImageArtifact(image,"morphology:showKernel")) )
ShowKernelInfo(curr_kernel);
/* Override the default handling of multi-kernel morphology results
* If 'Undefined' use the default method
* If 'None' (default for 'Convolve') re-iterate previous result
* Otherwise merge resulting images using compose method given.
* Default for 'HitAndMiss' is 'Lighten'.
*/
{ const char
*artifact;
compose = UndefinedCompositeOp; /* use default for method */
artifact = GetImageArtifact(image,"morphology:compose");
if ( artifact != (const char *) NULL)
compose = (CompositeOperator) ParseCommandOption(
MagickComposeOptions,MagickFalse,artifact);
}
/* Apply the Morphology */
morphology_image = MorphologyApply(image, channel, method, iterations,
curr_kernel, compose, bias, exception);
/* Cleanup and Exit */
if ( curr_kernel != kernel )
curr_kernel=DestroyKernelInfo(curr_kernel);
return(morphology_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R o t a t e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RotateKernelInfo() rotates the kernel by the angle given.
%
% Currently it is restricted to 90 degree angles, of either 1D kernels
% or square kernels. And 'circular' rotations of 45 degrees for 3x3 kernels.
% It will ignore usless rotations for specific 'named' built-in kernels.
%
% The format of the RotateKernelInfo method is:
%
% void RotateKernelInfo(KernelInfo *kernel, double angle)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o angle: angle to rotate in degrees
%
% This function is currently internal to this module only, but can be exported
% to other modules if needed.
*/
static void RotateKernelInfo(KernelInfo *kernel, double angle)
{
/* angle the lower kernels first */
if ( kernel->next != (KernelInfo *) NULL)
RotateKernelInfo(kernel->next, angle);
/* WARNING: Currently assumes the kernel (rightly) is horizontally symetrical
**
** TODO: expand beyond simple 90 degree rotates, flips and flops
*/
/* Modulus the angle */
angle = fmod(angle, 360.0);
if ( angle < 0 )
angle += 360.0;
if ( 337.5 < angle || angle <= 22.5 )
return; /* Near zero angle - no change! - At least not at this time */
/* Handle special cases */
switch (kernel->type) {
/* These built-in kernels are cylindrical kernels, rotating is useless */
case GaussianKernel:
case DoGKernel:
case LoGKernel:
case DiskKernel:
case PeaksKernel:
case LaplacianKernel:
case ChebyshevKernel:
case ManhattanKernel:
case EuclideanKernel:
return;
/* These may be rotatable at non-90 angles in the future */
/* but simply rotating them in multiples of 90 degrees is useless */
case SquareKernel:
case DiamondKernel:
case PlusKernel:
case CrossKernel:
return;
/* These only allows a +/-90 degree rotation (by transpose) */
/* A 180 degree rotation is useless */
case BlurKernel:
if ( 135.0 < angle && angle <= 225.0 )
return;
if ( 225.0 < angle && angle <= 315.0 )
angle -= 180;
break;
default:
break;
}
/* Attempt rotations by 45 degrees -- 3x3 kernels only */
if ( 22.5 < fmod(angle,90.0) && fmod(angle,90.0) <= 67.5 )
{
if ( kernel->width == 3 && kernel->height == 3 )
{ /* Rotate a 3x3 square by 45 degree angle */
double t = kernel->values[0];
kernel->values[0] = kernel->values[3];
kernel->values[3] = kernel->values[6];
kernel->values[6] = kernel->values[7];
kernel->values[7] = kernel->values[8];
kernel->values[8] = kernel->values[5];
kernel->values[5] = kernel->values[2];
kernel->values[2] = kernel->values[1];
kernel->values[1] = t;
/* rotate non-centered origin */
if ( kernel->x != 1 || kernel->y != 1 ) {
ssize_t x,y;
x = (ssize_t) kernel->x-1;
y = (ssize_t) kernel->y-1;
if ( x == y ) x = 0;
else if ( x == 0 ) x = -y;
else if ( x == -y ) y = 0;
else if ( y == 0 ) y = x;
kernel->x = (ssize_t) x+1;
kernel->y = (ssize_t) y+1;
}
angle = fmod(angle+315.0, 360.0); /* angle reduced 45 degrees */
kernel->angle = fmod(kernel->angle+45.0, 360.0);
}
else
perror("Unable to rotate non-3x3 kernel by 45 degrees");
}
if ( 45.0 < fmod(angle, 180.0) && fmod(angle,180.0) <= 135.0 )
{
if ( kernel->width == 1 || kernel->height == 1 )
{ /* Do a transpose of a 1 dimensional kernel,
** which results in a fast 90 degree rotation of some type.
*/
ssize_t
t;
t = (ssize_t) kernel->width;
kernel->width = kernel->height;
kernel->height = (size_t) t;
t = kernel->x;
kernel->x = kernel->y;
kernel->y = t;
if ( kernel->width == 1 ) {
angle = fmod(angle+270.0, 360.0); /* angle reduced 90 degrees */
kernel->angle = fmod(kernel->angle+90.0, 360.0);
} else {
angle = fmod(angle+90.0, 360.0); /* angle increased 90 degrees */
kernel->angle = fmod(kernel->angle+270.0, 360.0);
}
}
else if ( kernel->width == kernel->height )
{ /* Rotate a square array of values by 90 degrees */
{ register size_t
i,j,x,y;
register double
*k,t;
k=kernel->values;
for( i=0, x=kernel->width-1; i<=x; i++, x--)
for( j=0, y=kernel->height-1; j<y; j++, y--)
{ t = k[i+j*kernel->width];
k[i+j*kernel->width] = k[j+x*kernel->width];
k[j+x*kernel->width] = k[x+y*kernel->width];
k[x+y*kernel->width] = k[y+i*kernel->width];
k[y+i*kernel->width] = t;
}
}
/* rotate the origin - relative to center of array */
{ register ssize_t x,y;
x = (ssize_t) (kernel->x*2-kernel->width+1);
y = (ssize_t) (kernel->y*2-kernel->height+1);
kernel->x = (ssize_t) ( -y +(ssize_t) kernel->width-1)/2;
kernel->y = (ssize_t) ( +x +(ssize_t) kernel->height-1)/2;
}
angle = fmod(angle+270.0, 360.0); /* angle reduced 90 degrees */
kernel->angle = fmod(kernel->angle+90.0, 360.0);
}
else
perror("Unable to rotate a non-square, non-linear kernel 90 degrees");
}
if ( 135.0 < angle && angle <= 225.0 )
{
/* For a 180 degree rotation - also know as a reflection
* This is actually a very very common operation!
* Basically all that is needed is a reversal of the kernel data!
* And a reflection of the origon
*/
double
t;
register double
*k;
size_t
i,
j;
k=kernel->values;
for ( i=0, j=kernel->width*kernel->height-1; i<j; i++, j--)
t=k[i], k[i]=k[j], k[j]=t;
kernel->x = (ssize_t) kernel->width - kernel->x - 1;
kernel->y = (ssize_t) kernel->height - kernel->y - 1;
angle = fmod(angle-180.0, 360.0); /* angle+180 degrees */
kernel->angle = fmod(kernel->angle+180.0, 360.0);
}
/* At this point angle should at least between -45 (315) and +45 degrees
* In the future some form of non-orthogonal angled rotates could be
* performed here, posibily with a linear kernel restriction.
*/
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S c a l e G e o m e t r y K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleGeometryKernelInfo() takes a geometry argument string, typically
% provided as a "-set option:convolve:scale {geometry}" user setting,
% and modifies the kernel according to the parsed arguments of that setting.
%
% The first argument (and any normalization flags) are passed to
% ScaleKernelInfo() to scale/normalize the kernel. The second argument
% is then passed to UnityAddKernelInfo() to add a scled unity kernel
% into the scaled/normalized kernel.
%
% The format of the ScaleGeometryKernelInfo method is:
%
% void ScaleGeometryKernelInfo(KernelInfo *kernel,
% const double scaling_factor,const MagickStatusType normalize_flags)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to modify
%
% o geometry:
% The geometry string to parse, typically from the user provided
% "-set option:convolve:scale {geometry}" setting.
%
*/
MagickExport void ScaleGeometryKernelInfo (KernelInfo *kernel,
const char *geometry)
{
GeometryFlags
flags;
GeometryInfo
args;
SetGeometryInfo(&args);
flags = (GeometryFlags) ParseGeometry(geometry, &args);
#if 0
/* For Debugging Geometry Input */
(void) FormatLocaleFile(stderr, "Geometry = 0x%04X : %lg x %lg %+lg %+lg\n",
flags, args.rho, args.sigma, args.xi, args.psi );
#endif
if ( (flags & PercentValue) != 0 ) /* Handle Percentage flag*/
args.rho *= 0.01, args.sigma *= 0.01;
if ( (flags & RhoValue) == 0 ) /* Set Defaults for missing args */
args.rho = 1.0;
if ( (flags & SigmaValue) == 0 )
args.sigma = 0.0;
/* Scale/Normalize the input kernel */
ScaleKernelInfo(kernel, args.rho, flags);
/* Add Unity Kernel, for blending with original */
if ( (flags & SigmaValue) != 0 )
UnityAddKernelInfo(kernel, args.sigma);
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S c a l e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleKernelInfo() scales the given kernel list by the given amount, with or
% without normalization of the sum of the kernel values (as per given flags).
%
% By default (no flags given) the values within the kernel is scaled
% directly using given scaling factor without change.
%
% If either of the two 'normalize_flags' are given the kernel will first be
% normalized and then further scaled by the scaling factor value given.
%
% Kernel normalization ('normalize_flags' given) is designed to ensure that
% any use of the kernel scaling factor with 'Convolve' or 'Correlate'
% morphology methods will fall into -1.0 to +1.0 range. Note that for
% non-HDRI versions of IM this may cause images to have any negative results
% clipped, unless some 'bias' is used.
%
% More specifically. Kernels which only contain positive values (such as a
% 'Gaussian' kernel) will be scaled so that those values sum to +1.0,
% ensuring a 0.0 to +1.0 output range for non-HDRI images.
%
% For Kernels that contain some negative values, (such as 'Sharpen' kernels)
% the kernel will be scaled by the absolute of the sum of kernel values, so
% that it will generally fall within the +/- 1.0 range.
%
% For kernels whose values sum to zero, (such as 'Laplician' kernels) kernel
% will be scaled by just the sum of the postive values, so that its output
% range will again fall into the +/- 1.0 range.
%
% For special kernels designed for locating shapes using 'Correlate', (often
% only containing +1 and -1 values, representing foreground/brackground
% matching) a special normalization method is provided to scale the positive
% values separately to those of the negative values, so the kernel will be
% forced to become a zero-sum kernel better suited to such searches.
%
% WARNING: Correct normalization of the kernel assumes that the '*_range'
% attributes within the kernel structure have been correctly set during the
% kernels creation.
%
% NOTE: The values used for 'normalize_flags' have been selected specifically
% to match the use of geometry options, so that '!' means NormalizeValue, '^'
% means CorrelateNormalizeValue. All other GeometryFlags values are ignored.
%
% The format of the ScaleKernelInfo method is:
%
% void ScaleKernelInfo(KernelInfo *kernel, const double scaling_factor,
% const MagickStatusType normalize_flags )
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o scaling_factor:
% multiply all values (after normalization) by this factor if not
% zero. If the kernel is normalized regardless of any flags.
%
% o normalize_flags:
% GeometryFlags defining normalization method to use.
% specifically: NormalizeValue, CorrelateNormalizeValue,
% and/or PercentValue
%
*/
MagickExport void ScaleKernelInfo(KernelInfo *kernel,
const double scaling_factor,const GeometryFlags normalize_flags)
{
register ssize_t
i;
register double
pos_scale,
neg_scale;
/* do the other kernels in a multi-kernel list first */
if ( kernel->next != (KernelInfo *) NULL)
ScaleKernelInfo(kernel->next, scaling_factor, normalize_flags);
/* Normalization of Kernel */
pos_scale = 1.0;
if ( (normalize_flags&NormalizeValue) != 0 ) {
if ( fabs(kernel->positive_range + kernel->negative_range) >= MagickEpsilon )
/* non-zero-summing kernel (generally positive) */
pos_scale = fabs(kernel->positive_range + kernel->negative_range);
else
/* zero-summing kernel */
pos_scale = kernel->positive_range;
}
/* Force kernel into a normalized zero-summing kernel */
if ( (normalize_flags&CorrelateNormalizeValue) != 0 ) {
pos_scale = ( fabs(kernel->positive_range) >= MagickEpsilon )
? kernel->positive_range : 1.0;
neg_scale = ( fabs(kernel->negative_range) >= MagickEpsilon )
? -kernel->negative_range : 1.0;
}
else
neg_scale = pos_scale;
/* finialize scaling_factor for positive and negative components */
pos_scale = scaling_factor/pos_scale;
neg_scale = scaling_factor/neg_scale;
for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++)
if ( ! IsNaN(kernel->values[i]) )
kernel->values[i] *= (kernel->values[i] >= 0) ? pos_scale : neg_scale;
/* convolution output range */
kernel->positive_range *= pos_scale;
kernel->negative_range *= neg_scale;
/* maximum and minimum values in kernel */
kernel->maximum *= (kernel->maximum >= 0.0) ? pos_scale : neg_scale;
kernel->minimum *= (kernel->minimum >= 0.0) ? pos_scale : neg_scale;
/* swap kernel settings if user's scaling factor is negative */
if ( scaling_factor < MagickEpsilon ) {
double t;
t = kernel->positive_range;
kernel->positive_range = kernel->negative_range;
kernel->negative_range = t;
t = kernel->maximum;
kernel->maximum = kernel->minimum;
kernel->minimum = 1;
}
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h o w K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShowKernelInfo() outputs the details of the given kernel defination to
% standard error, generally due to a users 'showKernel' option request.
%
% The format of the ShowKernelInfo method is:
%
% void ShowKernelInfo(const KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
*/
MagickExport void ShowKernelInfo(const KernelInfo *kernel)
{
const KernelInfo
*k;
size_t
c, i, u, v;
for (c=0, k=kernel; k != (KernelInfo *) NULL; c++, k=k->next ) {
(void) FormatLocaleFile(stderr, "Kernel");
if ( kernel->next != (KernelInfo *) NULL )
(void) FormatLocaleFile(stderr, " #%lu", (unsigned long) c );
(void) FormatLocaleFile(stderr, " \"%s",
CommandOptionToMnemonic(MagickKernelOptions, k->type) );
if ( fabs(k->angle) >= MagickEpsilon )
(void) FormatLocaleFile(stderr, "@%lg", k->angle);
(void) FormatLocaleFile(stderr, "\" of size %lux%lu%+ld%+ld",(unsigned long)
k->width,(unsigned long) k->height,(long) k->x,(long) k->y);
(void) FormatLocaleFile(stderr,
" with values from %.*lg to %.*lg\n",
GetMagickPrecision(), k->minimum,
GetMagickPrecision(), k->maximum);
(void) FormatLocaleFile(stderr, "Forming a output range from %.*lg to %.*lg",
GetMagickPrecision(), k->negative_range,
GetMagickPrecision(), k->positive_range);
if ( fabs(k->positive_range+k->negative_range) < MagickEpsilon )
(void) FormatLocaleFile(stderr, " (Zero-Summing)\n");
else if ( fabs(k->positive_range+k->negative_range-1.0) < MagickEpsilon )
(void) FormatLocaleFile(stderr, " (Normalized)\n");
else
(void) FormatLocaleFile(stderr, " (Sum %.*lg)\n",
GetMagickPrecision(), k->positive_range+k->negative_range);
for (i=v=0; v < k->height; v++) {
(void) FormatLocaleFile(stderr, "%2lu:", (unsigned long) v );
for (u=0; u < k->width; u++, i++)
if ( IsNaN(k->values[i]) )
(void) FormatLocaleFile(stderr," %*s", GetMagickPrecision()+3, "nan");
else
(void) FormatLocaleFile(stderr," %*.*lg", GetMagickPrecision()+3,
GetMagickPrecision(), k->values[i]);
(void) FormatLocaleFile(stderr,"\n");
}
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n i t y A d d K e r n a l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnityAddKernelInfo() Adds a given amount of the 'Unity' Convolution Kernel
% to the given pre-scaled and normalized Kernel. This in effect adds that
% amount of the original image into the resulting convolution kernel. This
% value is usually provided by the user as a percentage value in the
% 'convolve:scale' setting.
%
% The resulting effect is to convert the defined kernels into blended
% soft-blurs, unsharp kernels or into sharpening kernels.
%
% The format of the UnityAdditionKernelInfo method is:
%
% void UnityAdditionKernelInfo(KernelInfo *kernel, const double scale )
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o scale:
% scaling factor for the unity kernel to be added to
% the given kernel.
%
*/
MagickExport void UnityAddKernelInfo(KernelInfo *kernel,
const double scale)
{
/* do the other kernels in a multi-kernel list first */
if ( kernel->next != (KernelInfo *) NULL)
UnityAddKernelInfo(kernel->next, scale);
/* Add the scaled unity kernel to the existing kernel */
kernel->values[kernel->x+kernel->y*kernel->width] += scale;
CalcKernelMetaData(kernel); /* recalculate the meta-data */
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Z e r o K e r n e l N a n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ZeroKernelNans() replaces any special 'nan' value that may be present in
% the kernel with a zero value. This is typically done when the kernel will
% be used in special hardware (GPU) convolution processors, to simply
% matters.
%
% The format of the ZeroKernelNans method is:
%
% void ZeroKernelNans (KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
*/
MagickExport void ZeroKernelNans(KernelInfo *kernel)
{
register size_t
i;
/* do the other kernels in a multi-kernel list first */
if ( kernel->next != (KernelInfo *) NULL)
ZeroKernelNans(kernel->next);
for (i=0; i < (kernel->width*kernel->height); i++)
if ( IsNaN(kernel->values[i]) )
kernel->values[i] = 0.0;
return;
}
|
eltwise_hcl_arm.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2020, OPEN AI LAB
* Author: qtang@openailab.com
*/
#ifndef __ELTWISE_HCL_ARM_H__
#define __ELTWISE_HCL_ARM_H__
#include <arm_neon.h>
#include "tengine_ir.h"
#include "eltwise_param.h"
int perf_eltwise_fp32(struct ir_tensor* output_tensor, struct ir_tensor* input_tensor0, struct ir_tensor* input_tensor1,
struct eltwise_param* eltwise_param, int num_thread)
{
int batch = input_tensor0->dims[0] ? input_tensor0->dims[0] : 1;
int channel = input_tensor0->dims[1] ? input_tensor0->dims[1] : 1;
int in_h = input_tensor0->dims[2] ? input_tensor0->dims[2] : 1;
int in_w = input_tensor0->dims[3] ? input_tensor0->dims[3] : 1;
int c_step = in_h * in_w;
int b_step = channel * in_h * in_w;
for (int n = 0; n < batch; n++)
{
float* input0 = (float*)input_tensor0->data + n * b_step;
float* input1 = (float*)input_tensor1->data + n * b_step;
float* output = (float*)output_tensor->data + n * b_step;
#pragma omp parallel for num_threads(num_thread)
for (int q = 0; q < channel; q++)
{
float* input0_data = input0 + q * c_step;
float* input1_data = input1 + q * c_step;
float* output_data = output + q * c_step;
#if __ARM_NEON
int nn = c_step >> 2;
int remain = c_step - (nn << 2);
#else
int remain = c_step;
#endif
#if __ARM_NEON
for (; nn > 0; nn--)
{
float32x4_t data0 = vld1q_f32(input0_data);
float32x4_t data1 = vld1q_f32(input1_data);
float32x4_t sum = vaddq_f32(data0, data1);
vst1q_f32(output_data, sum);
input0_data += 4;
input1_data += 4;
output_data += 4;
}
#endif
for (; remain > 0; remain--)
{
output_data[0] = input0_data[0] + input1_data[0];
input0_data += 1;
input1_data += 1;
output_data += 1;
}
}
}
return 0;
}
#endif
|
GraphBLAS.h | //------------------------------------------------------------------------------
// GraphBLAS.h: definitions for the GraphBLAS package
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS is a complete implementation of the GraphBLAS
// standard, which defines a set of sparse matrix operations on an extended
// algebra of semirings, using an almost unlimited variety of operators and
// types. When applied to sparse adjacency matrices, these algebraic
// operations are equivalent to computations on graphs. GraphBLAS provides a
// powerful and expressive framework creating graph algorithms based on the
// elegant mathematics of sparse matrix operations on a semiring.
// This GraphBLAS.h file contains GraphBLAS definitions for user applications
// to #include. A few functions and variables with the prefix GB_ need to be
// defined in this file and are thus technically visible to the user, but they
// must not be accessed in user code. They are here only so that the ANSI C11
// _Generic feature can be used in the user-accessible polymorphic functions,
// or to implement a fast GxB_Iterator using macros.
// This implementation conforms to the GraphBLAS API Specification and also
// includes functions and features that are extensions to the spec, which are
// given names of the form GxB_* for functions, built-in objects, and macros,
// so it is clear which are in the spec and which are extensions. Extensions
// with the name GxB_* are user-accessible in SuiteSparse:GraphBLAS but cannot
// be guaranteed to appear in all GraphBLAS implementations.
// Regarding "historical" functions and symbols: when a GxB_* function or
// symbol is added to the C API Specification, the new GrB_* name should be
// used instead. The old GxB_* name will be kept for historical reasons,
// documented here and in working order; it might no longer be mentioned in the
// user guide. Historical functions and symbols would only be removed in the
// rare case that they cause a serious conflict with future methods.
#ifndef GRAPHBLAS_H
#define GRAPHBLAS_H
//==============================================================================
// include files required by GraphBLAS
//==============================================================================
#include <stdio.h>
#include <errno.h>
#include <string.h>
#include <stdlib.h>
#include <stdbool.h>
#include <stdint.h>
#include <inttypes.h>
#include <stddef.h>
#include <limits.h>
#include <math.h>
#include <stdarg.h>
//==============================================================================
// renaming for use in R2021a or later
//==============================================================================
#define GB_CAT2(x,y) x ## y
#define GB_EVAL2(x,y) GB_CAT2 (x,y)
#ifdef GBRENAME
// All symbols must be renamed for the @GrB interface when using
// R2021a and following, since those versions include an earlier
// version of SuiteSparse:GraphBLAS.
#define GB(x) GB_EVAL2 (GM_, x)
#define GRB(x) GB_EVAL2 (GrM_, x)
#define GXB(x) GB_EVAL2 (GxM_, x)
#define GrB GrM
#define GxB GxM
#include "GB_rename.h"
#else
// Use the standard GraphBLAS prefix.
#define GB(x) GB_EVAL2 (GB_, x)
#define GRB(x) GB_EVAL2 (GrB_, x)
#define GXB(x) GB_EVAL2 (GxB_, x)
#endif
//==============================================================================
// compiler variations
//==============================================================================
// Exporting/importing symbols for Microsoft Visual Studio
#if ( _MSC_VER && !(__INTEL_COMPILER || __INTEL_CLANG_COMPILER) )
#ifdef GB_LIBRARY
// compiling SuiteSparse:GraphBLAS itself, exporting symbols to user apps
#define GB_PUBLIC extern __declspec ( dllexport )
#else
// compiling the user application, importing symbols from SuiteSparse:GraphBLAS
#define GB_PUBLIC extern __declspec ( dllimport )
#endif
#else
// for other compilers
#define GB_PUBLIC extern
#endif
// GraphBLAS requires an ANSI C11 compiler for its polymorphic functions (using
// the _Generic keyword), but it can be used in an C90 compiler if those
// functions are disabled.
// With ANSI C11 and later, _Generic keyword and polymorphic functions can be
// used. Earlier versions of the language do not have this feature.
#ifdef __STDC_VERSION__
// ANSI C17: 201710L
// ANSI C11: 201112L
// ANSI C99: 199901L
// ANSI C95: 199409L
#define GxB_STDC_VERSION __STDC_VERSION__
#else
// assume ANSI C90 / C89
#define GxB_STDC_VERSION 199001L
#endif
//------------------------------------------------------------------------------
// definitions for complex types, and restrict keyword
//------------------------------------------------------------------------------
#undef GB_restrict
// See:
// https://www.drdobbs.com/complex-arithmetic-in-the-intersection-o/184401628#
#if defined ( __cplusplus )
extern "C++"
{
// C++ complex types
#include <cmath>
#include <complex>
#undef I
typedef std::complex<float> GxB_FC32_t ;
typedef std::complex<double> GxB_FC64_t ;
}
#define GxB_CMPLXF(r,i) GxB_FC32_t(r,i)
#define GxB_CMPLX(r,i) GxB_FC64_t(r,i)
#define GB_restrict
#elif ( _MSC_VER && !(__INTEL_COMPILER || __INTEL_CLANG_COMPILER) )
// Microsoft Windows complex types
#include <complex.h>
#undef I
typedef _Fcomplex GxB_FC32_t ;
typedef _Dcomplex GxB_FC64_t ;
#define GxB_CMPLXF(r,i) (_FCbuild (r,i))
#define GxB_CMPLX(r,i) ( _Cbuild (r,i))
#define GB_restrict __restrict
#else
// ANSI C11 complex types
#include <complex.h>
#undef I
typedef float complex GxB_FC32_t ;
typedef double complex GxB_FC64_t ;
#ifndef CMPLX
// gcc 6.2 on the the Mac doesn't #define CMPLX
#define GxB_CMPLX(r,i) \
((GxB_FC64_t)((double)(r)) + (GxB_FC64_t)((double)(i) * _Complex_I))
#else
// use the ANSI C11 CMPLX macro
#define GxB_CMPLX(r,i) CMPLX (r,i)
#endif
#ifndef CMPLXF
// gcc 6.2 on the the Mac doesn't #define CMPLXF
#define GxB_CMPLXF(r,i) \
((GxB_FC32_t)((float)(r)) + (GxB_FC32_t)((float)(i) * _Complex_I))
#else
// use the ANSI C11 CMPLXF macro
#define GxB_CMPLXF(r,i) CMPLXF (r,i)
#endif
// restrict keyword
#if defined ( __NVCC__ )
// NVIDIA nvcc
#define GB_restrict __restrict__
#elif GxB_STDC_VERSION >= 199901L
// ANSI C99 or later
#define GB_restrict restrict
#else
// ANSI C95 and earlier: no restrict keyword
#define GB_restrict
#endif
#endif
//==============================================================================
// version control
//==============================================================================
// There are two version numbers that user codes can check against with
// compile-time #if tests: the version of this GraphBLAS implementation,
// and the version of the GraphBLAS specification it conforms to. User code
// can use tests like this:
//
// #if GxB_SPEC_VERSION >= GxB_VERSION (2,0,3)
// ... use features in GraphBLAS specification 2.0.3 ...
// #else
// ... only use features in early specifications
// #endif
//
// #if GxB_IMPLEMENTATION > GxB_VERSION (1,4,0)
// ... use features from version 1.4.0 of a GraphBLAS package
// #endif
// X_GRAPHBLAS: names this particular implementation:
#define GxB_SUITESPARSE_GRAPHBLAS
// GxB_VERSION: a single integer for comparing spec and version levels
#define GxB_VERSION(major,minor,sub) \
(((major)*1000ULL + (minor))*1000ULL + (sub))
// The version of this implementation, and the GraphBLAS API version:
#define GxB_IMPLEMENTATION_NAME "SuiteSparse:GraphBLAS"
#define GxB_IMPLEMENTATION_DATE "Feb 14, 2022"
#define GxB_IMPLEMENTATION_MAJOR 6
#define GxB_IMPLEMENTATION_MINOR 2
#define GxB_IMPLEMENTATION_SUB 0
#define GxB_SPEC_DATE "Nov 15, 2021"
#define GxB_SPEC_MAJOR 2
#define GxB_SPEC_MINOR 0
#define GxB_SPEC_SUB 0
// compile-time access to the C API Version number of this library.
#define GRB_VERSION GxB_SPEC_MAJOR
#define GRB_SUBVERSION GxB_SPEC_MINOR
#define GxB_IMPLEMENTATION \
GxB_VERSION (GxB_IMPLEMENTATION_MAJOR, \
GxB_IMPLEMENTATION_MINOR, \
GxB_IMPLEMENTATION_SUB)
// The 'about' string the describes this particular implementation of GraphBLAS:
#define GxB_IMPLEMENTATION_ABOUT \
"SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved." \
"\nhttp://suitesparse.com Dept of Computer Sci. & Eng, Texas A&M University.\n"
// The GraphBLAS license for this particular implementation of GraphBLAS:
#define GxB_IMPLEMENTATION_LICENSE \
"SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved." \
"\nLicensed under the Apache License, Version 2.0 (the \"License\"); you may\n"\
"not use SuiteSparse:GraphBLAS except in compliance with the License. You\n" \
"may obtain a copy of the License at\n\n" \
" http://www.apache.org/licenses/LICENSE-2.0\n\n" \
"Unless required by applicable law or agreed to in writing, software\n" \
"distributed under the License is distributed on an \"AS IS\" BASIS,\n" \
"WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n" \
"See the License for the specific language governing permissions and\n" \
"limitations under the License.\n"
//------------------------------------------------------------------------------
// GraphBLAS C API version
//------------------------------------------------------------------------------
#define GxB_SPEC_VERSION GxB_VERSION(GxB_SPEC_MAJOR,GxB_SPEC_MINOR,GxB_SPEC_SUB)
// The 'spec' string describes the GraphBLAS spec:
#define GxB_SPEC_ABOUT \
"GraphBLAS C API, by Aydin Buluc, Timothy Mattson, Scott McMillan,\n" \
"Jose' Moreira, Carl Yang, and Benjamin Brock. Based on 'GraphBLAS\n" \
"Mathematics by Jeremy Kepner. See also 'Graph Algorithms in the Language\n" \
"of Linear Algebra,' edited by J. Kepner and J. Gilbert, SIAM, 2011.\n"
//==============================================================================
// GrB_Index: the GraphBLAS integer
//==============================================================================
// GrB_Index: row or column index, or matrix dimension. This typedef is used
// for row and column indices, or matrix and vector dimensions.
typedef uint64_t GrB_Index ;
// GrB_INDEX_MAX is the largest permissible index value. The largest valid
// matrix or vector dimension is GrB_INDEX_MAX+1, or 2^60 in SuiteSparse:GrB.
#define GrB_INDEX_MAX ((GrB_Index) (1ULL << 60) - 1)
// GxB_INDEX_MAX is historical; use GrB_INDEX_MAX+1 instead. It differs by one
// from GrB_INDEX_MAX, since it defined the largest valid matrix or vector
// dimension.
#define GxB_INDEX_MAX ((GrB_Index) (1ULL << 60))
//==============================================================================
// GraphBLAS error and informational codes
//==============================================================================
// All GraphBLAS functions return a code that indicates if it was successful
// or not. If more information is required, the GrB_error function can be
// called, which returns a string that provides more information on the last
// return value from GraphBLAS.
// The v1.3 C API did not specify the enum values, but they appear in v2.0.
// Changing them will require SuiteSparse:GraphBLAS to bump to v6.x.
// Error codes GrB_NOT_IMPLEMENTED and GrB_EMPTY_OBJECT are new to v2.0.
typedef enum
{
GrB_SUCCESS = 0, // all is well
//--------------------------------------------------------------------------
// informational codes, not an error:
//--------------------------------------------------------------------------
GrB_NO_VALUE = 1, // A(i,j) requested but not there
GxB_EXHAUSTED = 2, // iterator is exhausted
//--------------------------------------------------------------------------
// errors:
//--------------------------------------------------------------------------
GrB_UNINITIALIZED_OBJECT = -1, // object has not been initialized
GrB_NULL_POINTER = -2, // input pointer is NULL
GrB_INVALID_VALUE = -3, // generic error; some value is bad
GrB_INVALID_INDEX = -4, // row or column index is out of bounds
GrB_DOMAIN_MISMATCH = -5, // object domains are not compatible
GrB_DIMENSION_MISMATCH = -6, // matrix dimensions do not match
GrB_OUTPUT_NOT_EMPTY = -7, // output matrix already has values
GrB_NOT_IMPLEMENTED = -8, // method not implemented
GrB_PANIC = -101, // unknown error
GrB_OUT_OF_MEMORY = -102, // out of memory
GrB_INSUFFICIENT_SPACE = -103, // output array not large enough
GrB_INVALID_OBJECT = -104, // object is corrupted
GrB_INDEX_OUT_OF_BOUNDS = -105, // row or col index out of bounds
GrB_EMPTY_OBJECT = -106 // an object does not contain a value
}
GrB_Info ;
//==============================================================================
// GrB_init / GrB_finalize
//==============================================================================
// GrB_init must called before any other GraphBLAS operation. GrB_finalize
// must be called as the last GraphBLAS operation.
// GrB_init defines the mode that GraphBLAS will use: blocking or
// non-blocking. With blocking mode, all operations finish before returning to
// the user application. With non-blocking mode, operations can be left
// pending, and are computed only when needed.
// The extension GxB_init does the work of GrB_init, but it also defines the
// memory management functions that SuiteSparse:GraphBLAS will use internally.
typedef enum
{
GrB_NONBLOCKING = 0, // methods may return with pending computations
GrB_BLOCKING = 1 // no computations are ever left pending
}
GrB_Mode ;
GB_PUBLIC
GrB_Info GrB_init // start up GraphBLAS
(
GrB_Mode mode // blocking or non-blocking mode
) ;
GB_PUBLIC
GrB_Info GxB_init // start up GraphBLAS and also define malloc, etc
(
GrB_Mode mode, // blocking or non-blocking mode
// pointers to memory management functions
void * (* user_malloc_function ) (size_t),
void * (* user_calloc_function ) (size_t, size_t),
void * (* user_realloc_function ) (void *, size_t),
void (* user_free_function ) (void *)
) ;
GB_PUBLIC
GrB_Info GrB_finalize (void) ; // finish GraphBLAS
//==============================================================================
// GrB_getVersion: GraphBLAS C API version
//==============================================================================
// GrB_getVersion provides a runtime access of the C API Version.
GB_PUBLIC
GrB_Info GrB_getVersion // runtime access to C API version number
(
unsigned int *version, // returns GRB_VERSION
unsigned int *subversion // returns GRB_SUBVERSION
) ;
//==============================================================================
// GrB_Descriptor: the GraphBLAS descriptor
//==============================================================================
// The GrB_Descriptor is used to modify the behavior of GraphBLAS operations.
//
// GrB_OUTP: can be GxB_DEFAULT or GrB_REPLACE. If GrB_REPLACE, then C is
// cleared after taking part in the accum operation but before the mask.
// In other words, C<Mask> = accum (C,T) is split into Z = accum(C,T) ;
// C=0 ; C<Mask> = Z.
//
// GrB_MASK: can be GxB_DEFAULT, GrB_COMP, GrB_STRUCTURE, or set to both
// GrB_COMP and GrB_STRUCTURE. If GxB_DEFAULT, the mask is used
// normally, where Mask(i,j)=1 means C(i,j) can be modified by C<Mask>=Z,
// and Mask(i,j)=0 means it cannot be modified even if Z(i,j) is has been
// computed and differs from C(i,j). If GrB_COMP, this is the same as
// taking the logical complement of the Mask. If GrB_STRUCTURE is set,
// the value of the mask is not considered, just its pattern. The
// GrB_COMP and GrB_STRUCTURE settings can be combined.
//
// GrB_INP0: can be GxB_DEFAULT or GrB_TRAN. If GxB_DEFAULT, the first input
// is used as-is. If GrB_TRAN, it is transposed. Only matrices are
// transposed this way. Vectors are never transposed via the
// GrB_Descriptor.
//
// GrB_INP1: the same as GrB_INP0 but for the second input
//
// GxB_NTHREADS: the maximum number of threads to use in the current method.
// If <= GxB_DEFAULT (which is zero), then the number of threads is
// determined automatically. This is the default value.
//
// GxB_CHUNK: an integer parameter that determines the number of threads to use
// for a small problem. If w is the work to be performed, and chunk is
// the value of this parameter, then the # of threads is limited to floor
// (w/chunk). The default chunk is currently 64K, but this may change in
// the future. If chunk is set to <= GxB_DEFAULT (that is, zero), the
// default is used.
//
// GxB_AxB_METHOD: this is a hint to SuiteSparse:GraphBLAS on which algorithm
// it should use to compute C=A*B, in GrB_mxm, GrB_mxv, and GrB_vxm.
// SuiteSparse:GraphBLAS has four different heuristics, and the default
// method (GxB_DEFAULT) selects between them automatically. The complete
// rule is in the User Guide. The brief discussion here assumes all
// matrices are stored by column. All methods compute the same result,
// except that floating-point roundoff may differ when working on
// floating-point data types.
//
// GxB_AxB_SAXPY: C(:,j)=A*B(:,j) is computed using a mix of Gustavson
// and Hash methods. Each task in the parallel computation makes its
// own decision between these two methods, via a heuristic.
//
// GxB_AxB_GUSTAVSON: This is the same as GxB_AxB_SAXPY, except that
// every task uses Gustavon's method, computing C(:,j)=A*B(:,j) via a
// gather/scatter workspace of size equal to the number of rows of A.
// Very good general-purpose method, but sometimes the workspace can
// be too large when many threads are used.
//
// GxB_AxB_HASH: This is the same as GxB_AxB_SAXPY, except that every
// task uses the Hash method. It is very good for hypersparse
// matrices and uses very little workspace, and so it scales well to
// many threads.
//
// GxB_AxB_DOT: computes C(i,j) = A(:,i)'*B(:,j), for each entry C(i,j).
// A very specialized method that works well only if the mask is
// present, very sparse, and not complemented, or when C is a dense
// vector or matrix, or when C is small.
//
// GxB_SORT: GrB_mxm and other methods may return a matrix in a 'jumbled'
// state, with indices out of order. The sort is left pending. Some
// methods can tolerate jumbled matrices on input, so this can be faster.
// However, in some cases, it can be faster for GrB_mxm to sort its output
// as it is computed. With GxB_SORT set to GxB_DEFAULT, the sort is left
// pending. With GxB_SORT set to a nonzero value, GrB_mxm typically sorts
// the resulting matrix C (but not always; this is just a hint). If
// GrB_init is called with GrB_BLOCKING mode, the sort will always be
// done, and this setting has no effect.
//
// GxB_COMPRESSION: compression method for GxB_Matrix_serialize and
// GxB_Vector_serialize. The default is LZ4.
//
// GxB_IMPORT: GxB_FAST_IMPORT (faster, for trusted input data) or
// GxB_SECURE_IMPORT (slower, for untrusted input data), for the
// GxB*_pack* methods.
// The following are enumerated values in both the GrB_Desc_Field and the
// GxB_Option_Field for global options. They are defined with the same integer
// value for both enums, so the user can use them for both.
#define GxB_NTHREADS 5
#define GxB_CHUNK 7
// GPU control (DRAFT: in progress, do not use)
#define GxB_GPU_CONTROL 21
#define GxB_GPU_CHUNK 22
typedef enum
{
GrB_OUTP = 0, // descriptor for output of a method
GrB_MASK = 1, // descriptor for the mask input of a method
GrB_INP0 = 2, // descriptor for the first input of a method
GrB_INP1 = 3, // descriptor for the second input of a method
GxB_DESCRIPTOR_NTHREADS = GxB_NTHREADS, // max number of threads to use.
// If <= GxB_DEFAULT, then GraphBLAS selects the number
// of threads automatically.
GxB_DESCRIPTOR_CHUNK = GxB_CHUNK, // chunk size for small problems.
// If <= GxB_DEFAULT, then the default is used.
// GPU control (DRAFT: in progress, do not use)
GxB_DESCRIPTOR_GPU_CONTROL = GxB_GPU_CONTROL,
GxB_DESCRIPTOR_GPU_CHUNK = GxB_GPU_CHUNK,
GxB_AxB_METHOD = 1000, // descriptor for selecting C=A*B algorithm
GxB_SORT = 35, // control sort in GrB_mxm
GxB_COMPRESSION = 36, // select compression for serialize
GxB_IMPORT = 37, // secure vs fast import
}
GrB_Desc_Field ;
typedef enum
{
// for all GrB_Descriptor fields:
GxB_DEFAULT = 0, // default behavior of the method
// for GrB_OUTP only:
GrB_REPLACE = 1, // clear the output before assigning new values to it
// for GrB_MASK only:
GrB_COMP = 2, // use the structural complement of the input
GrB_STRUCTURE = 4, // use the only pattern of the mask, not its values
// for GrB_INP0 and GrB_INP1 only:
GrB_TRAN = 3, // use the transpose of the input
// for GxB_GPU_CONTROL only (DRAFT: in progress, do not use)
GxB_GPU_ALWAYS = 2001,
GxB_GPU_NEVER = 2002,
// for GxB_AxB_METHOD only:
GxB_AxB_GUSTAVSON = 1001, // gather-scatter saxpy method
GxB_AxB_DOT = 1003, // dot product
GxB_AxB_HASH = 1004, // hash-based saxpy method
GxB_AxB_SAXPY = 1005, // saxpy method (any kind)
// for GxB_IMPORT only:
GxB_SECURE_IMPORT = 502 // GxB*_pack* methods trust their input data
}
GrB_Desc_Value ;
// default for GxB pack is to trust the input data
#define GxB_FAST_IMPORT GxB_DEFAULT
typedef struct GB_Descriptor_opaque *GrB_Descriptor ;
GB_PUBLIC
GrB_Info GrB_Descriptor_new // create a new descriptor
(
GrB_Descriptor *descriptor // handle of descriptor to create
) ;
GB_PUBLIC
GrB_Info GrB_Descriptor_set // set a parameter in a descriptor
(
GrB_Descriptor desc, // descriptor to modify
GrB_Desc_Field field, // parameter to change
GrB_Desc_Value val // value to change it to
) ;
GB_PUBLIC
GrB_Info GxB_Descriptor_get // get a parameter from a descriptor
(
GrB_Desc_Value *val, // value of the parameter
GrB_Descriptor desc, // descriptor to query; NULL means defaults
GrB_Desc_Field field // parameter to query
) ;
GB_PUBLIC
GrB_Info GxB_Desc_set // set a parameter in a descriptor
(
GrB_Descriptor desc, // descriptor to modify
GrB_Desc_Field field, // parameter to change
... // value to change it to
) ;
GB_PUBLIC
GrB_Info GxB_Desc_get // get a parameter from a descriptor
(
GrB_Descriptor desc, // descriptor to query; NULL means defaults
GrB_Desc_Field field, // parameter to query
... // value of the parameter
) ;
GB_PUBLIC
GrB_Info GrB_Descriptor_free // free a descriptor
(
GrB_Descriptor *descriptor // handle of descriptor to free
) ;
// Predefined descriptors and their values:
GB_PUBLIC
GrB_Descriptor // OUTP MASK MASK INP0 INP1
// structural complement
// =========== ============== ========== ======== ========
// GrB_NULL // - - - - -
GrB_DESC_T1 , // - - - - GrB_TRAN
GrB_DESC_T0 , // - - - GrB_TRAN -
GrB_DESC_T0T1 , // - - - GrB_TRAN GrB_TRAN
GrB_DESC_C , // - - GrB_COMP - -
GrB_DESC_CT1 , // - - GrB_COMP - GrB_TRAN
GrB_DESC_CT0 , // - - GrB_COMP GrB_TRAN -
GrB_DESC_CT0T1 , // - - GrB_COMP GrB_TRAN GrB_TRAN
GrB_DESC_S , // - GrB_STRUCTURE - - -
GrB_DESC_ST1 , // - GrB_STRUCTURE - - GrB_TRAN
GrB_DESC_ST0 , // - GrB_STRUCTURE - GrB_TRAN -
GrB_DESC_ST0T1 , // - GrB_STRUCTURE - GrB_TRAN GrB_TRAN
GrB_DESC_SC , // - GrB_STRUCTURE GrB_COMP - -
GrB_DESC_SCT1 , // - GrB_STRUCTURE GrB_COMP - GrB_TRAN
GrB_DESC_SCT0 , // - GrB_STRUCTURE GrB_COMP GrB_TRAN -
GrB_DESC_SCT0T1 , // - GrB_STRUCTURE GrB_COMP GrB_TRAN GrB_TRAN
GrB_DESC_R , // GrB_REPLACE - - - -
GrB_DESC_RT1 , // GrB_REPLACE - - - GrB_TRAN
GrB_DESC_RT0 , // GrB_REPLACE - - GrB_TRAN -
GrB_DESC_RT0T1 , // GrB_REPLACE - - GrB_TRAN GrB_TRAN
GrB_DESC_RC , // GrB_REPLACE - GrB_COMP - -
GrB_DESC_RCT1 , // GrB_REPLACE - GrB_COMP - GrB_TRAN
GrB_DESC_RCT0 , // GrB_REPLACE - GrB_COMP GrB_TRAN -
GrB_DESC_RCT0T1 , // GrB_REPLACE - GrB_COMP GrB_TRAN GrB_TRAN
GrB_DESC_RS , // GrB_REPLACE GrB_STRUCTURE - - -
GrB_DESC_RST1 , // GrB_REPLACE GrB_STRUCTURE - - GrB_TRAN
GrB_DESC_RST0 , // GrB_REPLACE GrB_STRUCTURE - GrB_TRAN -
GrB_DESC_RST0T1 , // GrB_REPLACE GrB_STRUCTURE - GrB_TRAN GrB_TRAN
GrB_DESC_RSC , // GrB_REPLACE GrB_STRUCTURE GrB_COMP - -
GrB_DESC_RSCT1 , // GrB_REPLACE GrB_STRUCTURE GrB_COMP - GrB_TRAN
GrB_DESC_RSCT0 , // GrB_REPLACE GrB_STRUCTURE GrB_COMP GrB_TRAN -
GrB_DESC_RSCT0T1 ; // GrB_REPLACE GrB_STRUCTURE GrB_COMP GrB_TRAN GrB_TRAN
// GrB_NULL is the default descriptor, with all settings at their defaults:
//
// OUTP: do not replace the output
// MASK: mask is valued and not complemented
// INP0: first input not transposed
// INP1: second input not transposed
// Predefined descriptors may not be modified or freed. Attempting to modify
// them results in an error (GrB_INVALID_VALUE). Attempts to free them are
// silently ignored.
//==============================================================================
// GrB_Type: data types
//==============================================================================
typedef struct GB_Type_opaque *GrB_Type ;
// GraphBLAS predefined types and their counterparts in pure C:
GB_PUBLIC GrB_Type
GrB_BOOL , // in C: bool
GrB_INT8 , // in C: int8_t
GrB_INT16 , // in C: int16_t
GrB_INT32 , // in C: int32_t
GrB_INT64 , // in C: int64_t
GrB_UINT8 , // in C: uint8_t
GrB_UINT16 , // in C: uint16_t
GrB_UINT32 , // in C: uint32_t
GrB_UINT64 , // in C: uint64_t
GrB_FP32 , // in C: float
GrB_FP64 , // in C: double
GxB_FC32 , // in C: float complex
GxB_FC64 ; // in C: double complex
//------------------------------------------------------------------------------
// helper macros for polymorphic functions
//------------------------------------------------------------------------------
#define GB_CAT(w,x,y,z) w ## x ## y ## z
#define GB_CONCAT(w,x,y,z) GB_CAT (w, x, y, z)
#if GxB_STDC_VERSION >= 201112L
#define GB_CASES(p,prefix,func) \
const bool p : GB_CONCAT ( prefix, _, func, _BOOL ), \
bool p : GB_CONCAT ( prefix, _, func, _BOOL ), \
const int8_t p : GB_CONCAT ( prefix, _, func, _INT8 ), \
int8_t p : GB_CONCAT ( prefix, _, func, _INT8 ), \
const int16_t p : GB_CONCAT ( prefix, _, func, _INT16 ), \
int16_t p : GB_CONCAT ( prefix, _, func, _INT16 ), \
const int32_t p : GB_CONCAT ( prefix, _, func, _INT32 ), \
int32_t p : GB_CONCAT ( prefix, _, func, _INT32 ), \
const int64_t p : GB_CONCAT ( prefix, _, func, _INT64 ), \
int64_t p : GB_CONCAT ( prefix, _, func, _INT64 ), \
const uint8_t p : GB_CONCAT ( prefix, _, func, _UINT8 ), \
uint8_t p : GB_CONCAT ( prefix, _, func, _UINT8 ), \
const uint16_t p : GB_CONCAT ( prefix, _, func, _UINT16 ), \
uint16_t p : GB_CONCAT ( prefix, _, func, _UINT16 ), \
const uint32_t p : GB_CONCAT ( prefix, _, func, _UINT32 ), \
uint32_t p : GB_CONCAT ( prefix, _, func, _UINT32 ), \
const uint64_t p : GB_CONCAT ( prefix, _, func, _UINT64 ), \
uint64_t p : GB_CONCAT ( prefix, _, func, _UINT64 ), \
const float p : GB_CONCAT ( prefix, _, func, _FP32 ), \
float p : GB_CONCAT ( prefix, _, func, _FP32 ), \
const double p : GB_CONCAT ( prefix, _, func, _FP64 ), \
double p : GB_CONCAT ( prefix, _, func, _FP64 ), \
const GxB_FC32_t p : GB_CONCAT ( GxB , _, func, _FC32 ), \
GxB_FC32_t p : GB_CONCAT ( GxB , _, func, _FC32 ), \
const GxB_FC64_t p : GB_CONCAT ( GxB , _, func, _FC64 ), \
GxB_FC64_t p : GB_CONCAT ( GxB , _, func, _FC64 ), \
const void * : GB_CONCAT ( prefix, _, func, _UDT ), \
void * : GB_CONCAT ( prefix, _, func, _UDT )
#endif
//------------------------------------------------------------------------------
// GrB_Type_new: create a new type
//------------------------------------------------------------------------------
// GrB_Type_new is implemented both as a macro and a function. Both are
// user-callable. The default is to use the macro, since this allows the name
// of the type to be saved as a string, for subsequent error reporting by
// GrB_error.
#undef GrB_Type_new
#undef GrM_Type_new
GB_PUBLIC
GrB_Info GRB (Type_new) // create a new GraphBLAS type
(
GrB_Type *type, // handle of user type to create
size_t sizeof_ctype // size = sizeof (ctype) of the C type
) ;
// user code should not directly use GB_STR or GB_XSTR
// GB_STR: convert the content of x into a string "x"
#define GB_XSTR(x) GB_STR(x)
#define GB_STR(x) #x
// GrB_Type_new as a user-callable macro, which allows the name of the ctype
// to be added to the new type. The type_defn is unknown.
#define GrB_Type_new(utype, sizeof_ctype) \
GxB_Type_new(utype, sizeof_ctype, GB_STR(sizeof_ctype), NULL)
#define GrM_Type_new(utype, sizeof_ctype) \
GxB_Type_new(utype, sizeof_ctype, GB_STR(sizeof_ctype), NULL)
// GxB_Type_new creates a type with a name and definition that are known to
// GraphBLAS, as strings. The type_name is any valid string (max length of 128
// characters, including the required null-terminating character) that may
// appear as the name of a C type created by a C "typedef" statement. It must
// not contain any white-space characters. Example, creating a type of size
// 16*4+4 = 68 bytes, with a 4-by-4 dense float array and a 32-bit integer:
//
// typedef struct { float x [4][4] ; int color ; } myquaternion ;
// GrB_Type MyQtype ;
// GxB_Type_new (&MyQtype, sizeof (myquaternion), "myquaternion",
// "typedef struct { float x [4][4] ; int color ; } myquaternion ;") ;
//
// The type_name and type_defn are both null-terminated strings. Currently,
// type_defn is unused, but it will be required for best performance when a JIT
// is implemented in SuiteSparse:GraphBLAS (both on the CPU and GPU). User
// defined types created by GrB_Type_new will not work with a JIT.
//
// At most GxB_MAX_NAME_LEN characters are accessed in type_name; characters
// beyond that limit are silently ignored.
#define GxB_MAX_NAME_LEN 128
GB_PUBLIC
GrB_Info GxB_Type_new // create a new named GraphBLAS type
(
GrB_Type *type, // handle of user type to create
size_t sizeof_ctype, // size = sizeof (ctype) of the C type
const char *type_name, // name of the type (max 128 characters)
const char *type_defn // typedef for the type (no max length)
) ;
// GB_Type_new is historical: use GxB_Type_new instead
GB_PUBLIC
GrB_Info GB_Type_new // not user-callable
(
GrB_Type *type, // handle of user type to create
size_t sizeof_ctype, // size of the user type
const char *type_name // name of the type, as "sizeof (ctype)"
) ;
GB_PUBLIC
GrB_Info GxB_Type_name // return the name of a GraphBLAS type
(
char *type_name, // name of the type (char array of size at least
// GxB_MAX_NAME_LEN, owned by the user application).
const GrB_Type type
) ;
GB_PUBLIC
GrB_Info GxB_Type_size // determine the size of the type
(
size_t *size, // the sizeof the type
const GrB_Type type // type to determine the sizeof
) ;
GB_PUBLIC
GrB_Info GxB_Type_from_name // return the built-in GrB_Type from a name
(
GrB_Type *type, // built-in type, or NULL if user-defined
const char *type_name // array of size at least GxB_MAX_NAME_LEN
) ;
GB_PUBLIC
GrB_Info GrB_Type_free // free a user-defined type
(
GrB_Type *type // handle of user-defined type to free
) ;
//==============================================================================
// GrB_UnaryOp: unary operators
//==============================================================================
// GrB_UnaryOp: a function z=f(x). The function f must have the signature:
// void f (void *z, const void *x) ;
// The pointers are void * but they are always of pointers to objects of type
// ztype and xtype, respectively. The function must typecast its arguments as
// needed from void* to ztype* and xtype*.
typedef struct GB_UnaryOp_opaque *GrB_UnaryOp ;
//------------------------------------------------------------------------------
// built-in unary operators, z = f(x)
//------------------------------------------------------------------------------
GB_PUBLIC GrB_UnaryOp
// For these functions z=f(x), z and x have the same type.
// The suffix in the name is the type of x and z.
// z = x z = -x z = 1/x z = ! (x != 0)
// identity additive multiplicative logical
// inverse inverse negation
GrB_IDENTITY_BOOL, GrB_AINV_BOOL, GrB_MINV_BOOL, GxB_LNOT_BOOL,
GrB_IDENTITY_INT8, GrB_AINV_INT8, GrB_MINV_INT8, GxB_LNOT_INT8,
GrB_IDENTITY_INT16, GrB_AINV_INT16, GrB_MINV_INT16, GxB_LNOT_INT16,
GrB_IDENTITY_INT32, GrB_AINV_INT32, GrB_MINV_INT32, GxB_LNOT_INT32,
GrB_IDENTITY_INT64, GrB_AINV_INT64, GrB_MINV_INT64, GxB_LNOT_INT64,
GrB_IDENTITY_UINT8, GrB_AINV_UINT8, GrB_MINV_UINT8, GxB_LNOT_UINT8,
GrB_IDENTITY_UINT16, GrB_AINV_UINT16, GrB_MINV_UINT16, GxB_LNOT_UINT16,
GrB_IDENTITY_UINT32, GrB_AINV_UINT32, GrB_MINV_UINT32, GxB_LNOT_UINT32,
GrB_IDENTITY_UINT64, GrB_AINV_UINT64, GrB_MINV_UINT64, GxB_LNOT_UINT64,
GrB_IDENTITY_FP32, GrB_AINV_FP32, GrB_MINV_FP32, GxB_LNOT_FP32,
GrB_IDENTITY_FP64, GrB_AINV_FP64, GrB_MINV_FP64, GxB_LNOT_FP64,
// complex unary operators:
GxB_IDENTITY_FC32, GxB_AINV_FC32, GxB_MINV_FC32, // no LNOT
GxB_IDENTITY_FC64, GxB_AINV_FC64, GxB_MINV_FC64, // for complex
// z = 1 z = abs(x) z = bnot(x) z = signum
// one absolute value bitwise negation
GxB_ONE_BOOL, GrB_ABS_BOOL,
GxB_ONE_INT8, GrB_ABS_INT8, GrB_BNOT_INT8,
GxB_ONE_INT16, GrB_ABS_INT16, GrB_BNOT_INT16,
GxB_ONE_INT32, GrB_ABS_INT32, GrB_BNOT_INT32,
GxB_ONE_INT64, GrB_ABS_INT64, GrB_BNOT_INT64,
GxB_ONE_UINT8, GrB_ABS_UINT8, GrB_BNOT_UINT8,
GxB_ONE_UINT16, GrB_ABS_UINT16, GrB_BNOT_UINT16,
GxB_ONE_UINT32, GrB_ABS_UINT32, GrB_BNOT_UINT32,
GxB_ONE_UINT64, GrB_ABS_UINT64, GrB_BNOT_UINT64,
GxB_ONE_FP32, GrB_ABS_FP32,
GxB_ONE_FP64, GrB_ABS_FP64,
// complex unary operators:
GxB_ONE_FC32, // for complex types, z = abs(x)
GxB_ONE_FC64, // is real; listed below.
// Boolean negation, z = !x, where both z and x are boolean. There is no
// suffix since z and x are only boolean. This operator is identical to
// GxB_LNOT_BOOL; it just has a different name.
GrB_LNOT ;
// GxB_ABS is now in the v1.3 spec, the following names are historical:
GB_PUBLIC GrB_UnaryOp
// z = abs(x)
GxB_ABS_BOOL,
GxB_ABS_INT8,
GxB_ABS_INT16,
GxB_ABS_INT32,
GxB_ABS_INT64,
GxB_ABS_UINT8,
GxB_ABS_UINT16,
GxB_ABS_UINT32,
GxB_ABS_UINT64,
GxB_ABS_FP32,
GxB_ABS_FP64 ;
//------------------------------------------------------------------------------
// Unary operators for floating-point types only
//------------------------------------------------------------------------------
// The following floating-point unary operators and their ANSI C11 equivalents,
// are only defined for floating-point (real and complex) types.
GB_PUBLIC GrB_UnaryOp
//--------------------------------------------------------------------------
// z = f(x) where z and x have the same type (all 4 floating-point types)
//--------------------------------------------------------------------------
// z = sqrt (x) z = log (x) z = exp (x) z = log2 (x)
GxB_SQRT_FP32, GxB_LOG_FP32, GxB_EXP_FP32, GxB_LOG2_FP32,
GxB_SQRT_FP64, GxB_LOG_FP64, GxB_EXP_FP64, GxB_LOG2_FP64,
GxB_SQRT_FC32, GxB_LOG_FC32, GxB_EXP_FC32, GxB_LOG2_FC32,
GxB_SQRT_FC64, GxB_LOG_FC64, GxB_EXP_FC64, GxB_LOG2_FC64,
// z = sin (x) z = cos (x) z = tan (x)
GxB_SIN_FP32, GxB_COS_FP32, GxB_TAN_FP32,
GxB_SIN_FP64, GxB_COS_FP64, GxB_TAN_FP64,
GxB_SIN_FC32, GxB_COS_FC32, GxB_TAN_FC32,
GxB_SIN_FC64, GxB_COS_FC64, GxB_TAN_FC64,
// z = acos (x) z = asin (x) z = atan (x)
GxB_ACOS_FP32, GxB_ASIN_FP32, GxB_ATAN_FP32,
GxB_ACOS_FP64, GxB_ASIN_FP64, GxB_ATAN_FP64,
GxB_ACOS_FC32, GxB_ASIN_FC32, GxB_ATAN_FC32,
GxB_ACOS_FC64, GxB_ASIN_FC64, GxB_ATAN_FC64,
// z = sinh (x) z = cosh (x) z = tanh (x)
GxB_SINH_FP32, GxB_COSH_FP32, GxB_TANH_FP32,
GxB_SINH_FP64, GxB_COSH_FP64, GxB_TANH_FP64,
GxB_SINH_FC32, GxB_COSH_FC32, GxB_TANH_FC32,
GxB_SINH_FC64, GxB_COSH_FC64, GxB_TANH_FC64,
// z = acosh (x) z = asinh (x) z = atanh (x) z = signum (x)
GxB_ACOSH_FP32, GxB_ASINH_FP32, GxB_ATANH_FP32, GxB_SIGNUM_FP32,
GxB_ACOSH_FP64, GxB_ASINH_FP64, GxB_ATANH_FP64, GxB_SIGNUM_FP64,
GxB_ACOSH_FC32, GxB_ASINH_FC32, GxB_ATANH_FC32, GxB_SIGNUM_FC32,
GxB_ACOSH_FC64, GxB_ASINH_FC64, GxB_ATANH_FC64, GxB_SIGNUM_FC64,
// z = ceil (x) z = floor (x) z = round (x) z = trunc (x)
GxB_CEIL_FP32, GxB_FLOOR_FP32, GxB_ROUND_FP32, GxB_TRUNC_FP32,
GxB_CEIL_FP64, GxB_FLOOR_FP64, GxB_ROUND_FP64, GxB_TRUNC_FP64,
GxB_CEIL_FC32, GxB_FLOOR_FC32, GxB_ROUND_FC32, GxB_TRUNC_FC32,
GxB_CEIL_FC64, GxB_FLOOR_FC64, GxB_ROUND_FC64, GxB_TRUNC_FC64,
// z = exp2 (x) z = expm1 (x) z = log10 (x) z = log1p (x)
GxB_EXP2_FP32, GxB_EXPM1_FP32, GxB_LOG10_FP32, GxB_LOG1P_FP32,
GxB_EXP2_FP64, GxB_EXPM1_FP64, GxB_LOG10_FP64, GxB_LOG1P_FP64,
GxB_EXP2_FC32, GxB_EXPM1_FC32, GxB_LOG10_FC32, GxB_LOG1P_FC32,
GxB_EXP2_FC64, GxB_EXPM1_FC64, GxB_LOG10_FC64, GxB_LOG1P_FC64,
//--------------------------------------------------------------------------
// z = f(x) where z and x are the same type (floating-point real only)
//--------------------------------------------------------------------------
// z = lgamma (x) z = tgamma (x) z = erf (x) z = erfc (x)
GxB_LGAMMA_FP32, GxB_TGAMMA_FP32, GxB_ERF_FP32, GxB_ERFC_FP32,
GxB_LGAMMA_FP64, GxB_TGAMMA_FP64, GxB_ERF_FP64, GxB_ERFC_FP64,
// frexpx and frexpe return the mantissa and exponent, respectively,
// from the ANSI C11 frexp function. The exponent is returned as a
// floating-point value, not an integer.
// z = frexpx (x) z = frexpe (x)
GxB_FREXPX_FP32, GxB_FREXPE_FP32,
GxB_FREXPX_FP64, GxB_FREXPE_FP64,
//--------------------------------------------------------------------------
// z = f(x) where z and x are the same type (complex only)
//--------------------------------------------------------------------------
// z = conj (x)
GxB_CONJ_FC32,
GxB_CONJ_FC64,
//--------------------------------------------------------------------------
// z = f(x) where z is real and x is complex:
//--------------------------------------------------------------------------
// z = creal (x) z = cimag (x) z = carg (x) z = abs (x)
GxB_CREAL_FC32, GxB_CIMAG_FC32, GxB_CARG_FC32, GxB_ABS_FC32,
GxB_CREAL_FC64, GxB_CIMAG_FC64, GxB_CARG_FC64, GxB_ABS_FC64,
//--------------------------------------------------------------------------
// z = f(x) where z is bool and x is any floating-point type
//--------------------------------------------------------------------------
// z = isinf (x)
GxB_ISINF_FP32,
GxB_ISINF_FP64,
GxB_ISINF_FC32, // isinf (creal (x)) || isinf (cimag (x))
GxB_ISINF_FC64, // isinf (creal (x)) || isinf (cimag (x))
// z = isnan (x)
GxB_ISNAN_FP32,
GxB_ISNAN_FP64,
GxB_ISNAN_FC32, // isnan (creal (x)) || isnan (cimag (x))
GxB_ISNAN_FC64, // isnan (creal (x)) || isnan (cimag (x))
// z = isfinite (x)
GxB_ISFINITE_FP32,
GxB_ISFINITE_FP64,
GxB_ISFINITE_FC32, // isfinite (real (x)) && isfinite (cimag (x))
GxB_ISFINITE_FC64 ; // isfinite (real (x)) && isfinite (cimag (x))
//------------------------------------------------------------------------------
// methods for unary operators
//------------------------------------------------------------------------------
typedef void (*GxB_unary_function) (void *, const void *) ;
// GrB_UnaryOp_new creates a user-defined unary op, with an automatic
// detection of the operator name.
#undef GrB_UnaryOp_new
#undef GrM_UnaryOp_new
GB_PUBLIC
GrB_Info GRB (UnaryOp_new) // create a new user-defined unary operator
(
GrB_UnaryOp *unaryop, // handle for the new unary operator
GxB_unary_function function, // pointer to the unary function
GrB_Type ztype, // type of output z
GrB_Type xtype // type of input x
) ;
#define GrB_UnaryOp_new(op,f,z,x) \
GxB_UnaryOp_new(op,f,z,x, GB_STR(f), NULL)
#define GrM_UnaryOp_new(op,f,z,x) \
GxM_UnaryOp_new(op,f,z,x, GB_STR(f), NULL)
// GxB_UnaryOp_new creates a named user-defined unary op.
GB_PUBLIC
GrB_Info GxB_UnaryOp_new // create a new user-defined unary operator
(
GrB_UnaryOp *unaryop, // handle for the new unary operator
GxB_unary_function function, // pointer to the unary function
GrB_Type ztype, // type of output z
GrB_Type xtype, // type of input x
const char *unop_name, // name of the user function
const char *unop_defn // definition of the user function
) ;
// GB_UnaryOp_new is historical: use GxB_UnaryOp_new instead
GB_PUBLIC
GrB_Info GB_UnaryOp_new // not user-callable
(
GrB_UnaryOp *unaryop, // handle for the new unary operator
GxB_unary_function function, // pointer to the unary function
GrB_Type ztype, // type of output z
GrB_Type xtype, // type of input x
const char *unop_name // name of the user function
) ;
// GxB_UnaryOp_ztype is historical. Use GxB_UnaryOp_ztype_name instead.
GB_PUBLIC
GrB_Info GxB_UnaryOp_ztype // return the type of z
(
GrB_Type *ztype, // return type of output z
GrB_UnaryOp unaryop // unary operator
) ;
GB_PUBLIC
GrB_Info GxB_UnaryOp_ztype_name // return the type_name of z
(
char *type_name, // user array of size GxB_MAX_NAME_LEN
const GrB_UnaryOp unaryop // unary operator
) ;
// GxB_UnaryOp_xtype is historical. Use GxB_UnaryOp_xtype_name instead.
GB_PUBLIC
GrB_Info GxB_UnaryOp_xtype // return the type of x
(
GrB_Type *xtype, // return type of input x
GrB_UnaryOp unaryop // unary operator
) ;
GB_PUBLIC
GrB_Info GxB_UnaryOp_xtype_name // return the type_name of x
(
char *type_name, // user array of size GxB_MAX_NAME_LEN
const GrB_UnaryOp unaryop // unary operator
) ;
GB_PUBLIC
GrB_Info GrB_UnaryOp_free // free a user-created unary operator
(
GrB_UnaryOp *unaryop // handle of unary operator to free
) ;
//==============================================================================
// GrB_BinaryOp: binary operators
//==============================================================================
// GrB_BinaryOp: a function z=f(x,y). The function f must have the signature:
// void f (void *z, const void *x, const void *y) ;
// The pointers are void * but they are always of pointers to objects of type
// ztype, xtype, and ytype, respectively. See Demo/usercomplex.c for examples.
typedef struct GB_BinaryOp_opaque *GrB_BinaryOp ;
//------------------------------------------------------------------------------
// built-in binary operators, z = f(x,y), where x,y,z all have the same type
//------------------------------------------------------------------------------
GB_PUBLIC GrB_BinaryOp
// operators for all 13 types (including complex):
// GxB_PAIR_T and GrB_ONEB_T are identical; the latter was added to the
// v2.0 C API Specification.
// z = x z = y z = 1 z = pow (x,y)
GrB_FIRST_BOOL, GrB_SECOND_BOOL, GrB_ONEB_BOOL, GxB_POW_BOOL,
GrB_FIRST_INT8, GrB_SECOND_INT8, GrB_ONEB_INT8, GxB_POW_INT8,
GrB_FIRST_INT16, GrB_SECOND_INT16, GrB_ONEB_INT16, GxB_POW_INT16,
GrB_FIRST_INT32, GrB_SECOND_INT32, GrB_ONEB_INT32, GxB_POW_INT32,
GrB_FIRST_INT64, GrB_SECOND_INT64, GrB_ONEB_INT64, GxB_POW_INT64,
GrB_FIRST_UINT8, GrB_SECOND_UINT8, GrB_ONEB_UINT8, GxB_POW_UINT8,
GrB_FIRST_UINT16, GrB_SECOND_UINT16, GrB_ONEB_UINT16, GxB_POW_UINT16,
GrB_FIRST_UINT32, GrB_SECOND_UINT32, GrB_ONEB_UINT32, GxB_POW_UINT32,
GrB_FIRST_UINT64, GrB_SECOND_UINT64, GrB_ONEB_UINT64, GxB_POW_UINT64,
GrB_FIRST_FP32, GrB_SECOND_FP32, GrB_ONEB_FP32, GxB_POW_FP32,
GrB_FIRST_FP64, GrB_SECOND_FP64, GrB_ONEB_FP64, GxB_POW_FP64,
// complex:
GxB_FIRST_FC32, GxB_SECOND_FC32, GxB_ONEB_FC32, GxB_POW_FC32,
GxB_FIRST_FC64, GxB_SECOND_FC64, GxB_ONEB_FC64, GxB_POW_FC64,
// z = x+y z = x-y z = x*y z = x/y
GrB_PLUS_BOOL, GrB_MINUS_BOOL, GrB_TIMES_BOOL, GrB_DIV_BOOL,
GrB_PLUS_INT8, GrB_MINUS_INT8, GrB_TIMES_INT8, GrB_DIV_INT8,
GrB_PLUS_INT16, GrB_MINUS_INT16, GrB_TIMES_INT16, GrB_DIV_INT16,
GrB_PLUS_INT32, GrB_MINUS_INT32, GrB_TIMES_INT32, GrB_DIV_INT32,
GrB_PLUS_INT64, GrB_MINUS_INT64, GrB_TIMES_INT64, GrB_DIV_INT64,
GrB_PLUS_UINT8, GrB_MINUS_UINT8, GrB_TIMES_UINT8, GrB_DIV_UINT8,
GrB_PLUS_UINT16, GrB_MINUS_UINT16, GrB_TIMES_UINT16, GrB_DIV_UINT16,
GrB_PLUS_UINT32, GrB_MINUS_UINT32, GrB_TIMES_UINT32, GrB_DIV_UINT32,
GrB_PLUS_UINT64, GrB_MINUS_UINT64, GrB_TIMES_UINT64, GrB_DIV_UINT64,
GrB_PLUS_FP32, GrB_MINUS_FP32, GrB_TIMES_FP32, GrB_DIV_FP32,
GrB_PLUS_FP64, GrB_MINUS_FP64, GrB_TIMES_FP64, GrB_DIV_FP64,
// complex:
GxB_PLUS_FC32, GxB_MINUS_FC32, GxB_TIMES_FC32, GxB_DIV_FC32,
GxB_PLUS_FC64, GxB_MINUS_FC64, GxB_TIMES_FC64, GxB_DIV_FC64,
// z = y-x z = y/x z = 1 z = any(x,y)
GxB_RMINUS_BOOL, GxB_RDIV_BOOL, GxB_PAIR_BOOL, GxB_ANY_BOOL,
GxB_RMINUS_INT8, GxB_RDIV_INT8, GxB_PAIR_INT8, GxB_ANY_INT8,
GxB_RMINUS_INT16, GxB_RDIV_INT16, GxB_PAIR_INT16, GxB_ANY_INT16,
GxB_RMINUS_INT32, GxB_RDIV_INT32, GxB_PAIR_INT32, GxB_ANY_INT32,
GxB_RMINUS_INT64, GxB_RDIV_INT64, GxB_PAIR_INT64, GxB_ANY_INT64,
GxB_RMINUS_UINT8, GxB_RDIV_UINT8, GxB_PAIR_UINT8, GxB_ANY_UINT8,
GxB_RMINUS_UINT16, GxB_RDIV_UINT16, GxB_PAIR_UINT16, GxB_ANY_UINT16,
GxB_RMINUS_UINT32, GxB_RDIV_UINT32, GxB_PAIR_UINT32, GxB_ANY_UINT32,
GxB_RMINUS_UINT64, GxB_RDIV_UINT64, GxB_PAIR_UINT64, GxB_ANY_UINT64,
GxB_RMINUS_FP32, GxB_RDIV_FP32, GxB_PAIR_FP32, GxB_ANY_FP32,
GxB_RMINUS_FP64, GxB_RDIV_FP64, GxB_PAIR_FP64, GxB_ANY_FP64,
// complex:
GxB_RMINUS_FC32, GxB_RDIV_FC32, GxB_PAIR_FC32, GxB_ANY_FC32,
GxB_RMINUS_FC64, GxB_RDIV_FC64, GxB_PAIR_FC64, GxB_ANY_FC64,
// The GxB_IS* comparators z=f(x,y) return the same type as their
// inputs. Each of them compute z = (x OP y), where x, y, and z all have
// the same type. The value z is either 1 for true or 0 for false, but it
// is a value with the same type as x and y.
// z = (x == y) z = (x != y)
GxB_ISEQ_BOOL, GxB_ISNE_BOOL,
GxB_ISEQ_INT8, GxB_ISNE_INT8,
GxB_ISEQ_INT16, GxB_ISNE_INT16,
GxB_ISEQ_INT32, GxB_ISNE_INT32,
GxB_ISEQ_INT64, GxB_ISNE_INT64,
GxB_ISEQ_UINT8, GxB_ISNE_UINT8,
GxB_ISEQ_UINT16, GxB_ISNE_UINT16,
GxB_ISEQ_UINT32, GxB_ISNE_UINT32,
GxB_ISEQ_UINT64, GxB_ISNE_UINT64,
GxB_ISEQ_FP32, GxB_ISNE_FP32,
GxB_ISEQ_FP64, GxB_ISNE_FP64,
// complex:
GxB_ISEQ_FC32, GxB_ISNE_FC32,
GxB_ISEQ_FC64, GxB_ISNE_FC64,
// z = (x > y) z = (x < y) z = (x >= y) z = (x <= y)
GxB_ISGT_BOOL, GxB_ISLT_BOOL, GxB_ISGE_BOOL, GxB_ISLE_BOOL,
GxB_ISGT_INT8, GxB_ISLT_INT8, GxB_ISGE_INT8, GxB_ISLE_INT8,
GxB_ISGT_INT16, GxB_ISLT_INT16, GxB_ISGE_INT16, GxB_ISLE_INT16,
GxB_ISGT_INT32, GxB_ISLT_INT32, GxB_ISGE_INT32, GxB_ISLE_INT32,
GxB_ISGT_INT64, GxB_ISLT_INT64, GxB_ISGE_INT64, GxB_ISLE_INT64,
GxB_ISGT_UINT8, GxB_ISLT_UINT8, GxB_ISGE_UINT8, GxB_ISLE_UINT8,
GxB_ISGT_UINT16, GxB_ISLT_UINT16, GxB_ISGE_UINT16, GxB_ISLE_UINT16,
GxB_ISGT_UINT32, GxB_ISLT_UINT32, GxB_ISGE_UINT32, GxB_ISLE_UINT32,
GxB_ISGT_UINT64, GxB_ISLT_UINT64, GxB_ISGE_UINT64, GxB_ISLE_UINT64,
GxB_ISGT_FP32, GxB_ISLT_FP32, GxB_ISGE_FP32, GxB_ISLE_FP32,
GxB_ISGT_FP64, GxB_ISLT_FP64, GxB_ISGE_FP64, GxB_ISLE_FP64,
// z = min(x,y) z = max (x,y)
GrB_MIN_BOOL, GrB_MAX_BOOL,
GrB_MIN_INT8, GrB_MAX_INT8,
GrB_MIN_INT16, GrB_MAX_INT16,
GrB_MIN_INT32, GrB_MAX_INT32,
GrB_MIN_INT64, GrB_MAX_INT64,
GrB_MIN_UINT8, GrB_MAX_UINT8,
GrB_MIN_UINT16, GrB_MAX_UINT16,
GrB_MIN_UINT32, GrB_MAX_UINT32,
GrB_MIN_UINT64, GrB_MAX_UINT64,
GrB_MIN_FP32, GrB_MAX_FP32,
GrB_MIN_FP64, GrB_MAX_FP64,
// Binary operators for each of the 11 real types:
// The operators convert non-boolean types internally to boolean and return
// a value 1 or 0 in the same type, for true or false. Each computes z =
// ((x != 0) OP (y != 0)), where x, y, and z all the same type. These
// operators are useful as multiplicative operators when combined with
// non-boolean monoids of the same type.
// z = (x || y) z = (x && y) z = (x != y)
GxB_LOR_BOOL, GxB_LAND_BOOL, GxB_LXOR_BOOL,
GxB_LOR_INT8, GxB_LAND_INT8, GxB_LXOR_INT8,
GxB_LOR_INT16, GxB_LAND_INT16, GxB_LXOR_INT16,
GxB_LOR_INT32, GxB_LAND_INT32, GxB_LXOR_INT32,
GxB_LOR_INT64, GxB_LAND_INT64, GxB_LXOR_INT64,
GxB_LOR_UINT8, GxB_LAND_UINT8, GxB_LXOR_UINT8,
GxB_LOR_UINT16, GxB_LAND_UINT16, GxB_LXOR_UINT16,
GxB_LOR_UINT32, GxB_LAND_UINT32, GxB_LXOR_UINT32,
GxB_LOR_UINT64, GxB_LAND_UINT64, GxB_LXOR_UINT64,
GxB_LOR_FP32, GxB_LAND_FP32, GxB_LXOR_FP32,
GxB_LOR_FP64, GxB_LAND_FP64, GxB_LXOR_FP64,
// Binary operators that operate only on boolean types: LOR, LAND, LXOR,
// and LXNOR. The naming convention differs (_BOOL is not appended to the
// name). They are the same as GxB_LOR_BOOL, GxB_LAND_BOOL, and
// GxB_LXOR_BOOL, and GrB_EQ_BOOL, respectively.
// z = (x || y) z = (x && y) z = (x != y) z = (x == y)
GrB_LOR, GrB_LAND, GrB_LXOR, GrB_LXNOR,
// Operators for floating-point reals:
// z = atan2(x,y) z = hypot(x,y) z = fmod(x,y) z = remainder(x,y)
GxB_ATAN2_FP32, GxB_HYPOT_FP32, GxB_FMOD_FP32, GxB_REMAINDER_FP32,
GxB_ATAN2_FP64, GxB_HYPOT_FP64, GxB_FMOD_FP64, GxB_REMAINDER_FP64,
// z = ldexp(x,y) z = copysign (x,y)
GxB_LDEXP_FP32, GxB_COPYSIGN_FP32,
GxB_LDEXP_FP64, GxB_COPYSIGN_FP64,
// Bitwise operations on signed and unsigned integers: note that
// bitwise operations on signed integers can lead to different results,
// depending on your compiler; results are implementation-defined.
// z = (x | y) z = (x & y) z = (x ^ y) z = ~(x ^ y)
GrB_BOR_INT8, GrB_BAND_INT8, GrB_BXOR_INT8, GrB_BXNOR_INT8,
GrB_BOR_INT16, GrB_BAND_INT16, GrB_BXOR_INT16, GrB_BXNOR_INT16,
GrB_BOR_INT32, GrB_BAND_INT32, GrB_BXOR_INT32, GrB_BXNOR_INT32,
GrB_BOR_INT64, GrB_BAND_INT64, GrB_BXOR_INT64, GrB_BXNOR_INT64,
GrB_BOR_UINT8, GrB_BAND_UINT8, GrB_BXOR_UINT8, GrB_BXNOR_UINT8,
GrB_BOR_UINT16, GrB_BAND_UINT16, GrB_BXOR_UINT16, GrB_BXNOR_UINT16,
GrB_BOR_UINT32, GrB_BAND_UINT32, GrB_BXOR_UINT32, GrB_BXNOR_UINT32,
GrB_BOR_UINT64, GrB_BAND_UINT64, GrB_BXOR_UINT64, GrB_BXNOR_UINT64,
// z = bitget(x,y) z = bitset(x,y) z = bitclr(x,y)
GxB_BGET_INT8, GxB_BSET_INT8, GxB_BCLR_INT8,
GxB_BGET_INT16, GxB_BSET_INT16, GxB_BCLR_INT16,
GxB_BGET_INT32, GxB_BSET_INT32, GxB_BCLR_INT32,
GxB_BGET_INT64, GxB_BSET_INT64, GxB_BCLR_INT64,
GxB_BGET_UINT8, GxB_BSET_UINT8, GxB_BCLR_UINT8,
GxB_BGET_UINT16, GxB_BSET_UINT16, GxB_BCLR_UINT16,
GxB_BGET_UINT32, GxB_BSET_UINT32, GxB_BCLR_UINT32,
GxB_BGET_UINT64, GxB_BSET_UINT64, GxB_BCLR_UINT64 ;
//------------------------------------------------------------------------------
// z=f(x,y) where z and x have the same type, but y is GrB_INT8
//------------------------------------------------------------------------------
// z = bitshift (x,y) computes z = x left-shifted by y bits if y >= 0, or z
// = x right-shifted by (-y) bits if y < 0. z is equal to x if y is zero.
// z and x have the same type, as given by the suffix on the operator name.
// Since y must be signed, it cannot have the same type as x when x is
// unsigned; it is always GrB_INT8 for all 8 versions of this operator.
// The GxB_BSHIFT_* operators compute the arithmetic shift, and produce the
// same results as the bitshift.m function, for all possible inputs.
GB_PUBLIC GrB_BinaryOp
// z = bitshift(x,y)
GxB_BSHIFT_INT8,
GxB_BSHIFT_INT16,
GxB_BSHIFT_INT32,
GxB_BSHIFT_INT64,
GxB_BSHIFT_UINT8,
GxB_BSHIFT_UINT16,
GxB_BSHIFT_UINT32,
GxB_BSHIFT_UINT64 ;
//------------------------------------------------------------------------------
// z=f(x,y) where z is BOOL and the type of x,y is given by the suffix
//------------------------------------------------------------------------------
GB_PUBLIC GrB_BinaryOp
// Six comparators z=f(x,y) return their result as boolean, but
// where x and y have the same type. The suffix in their names refers to
// the type of x and y since z is always boolean. If used as multiply
// operators in a semiring, they can only be combined with boolean monoids.
// The _BOOL versions of these operators give the same results as their
// IS*_BOOL counterparts. GrB_EQ_BOOL and GrB_LXNOR are identical.
// z = (x == y) z = (x != y) z = (x > y) z = (x < y)
GrB_EQ_BOOL, GrB_NE_BOOL, GrB_GT_BOOL, GrB_LT_BOOL,
GrB_EQ_INT8, GrB_NE_INT8, GrB_GT_INT8, GrB_LT_INT8,
GrB_EQ_INT16, GrB_NE_INT16, GrB_GT_INT16, GrB_LT_INT16,
GrB_EQ_INT32, GrB_NE_INT32, GrB_GT_INT32, GrB_LT_INT32,
GrB_EQ_INT64, GrB_NE_INT64, GrB_GT_INT64, GrB_LT_INT64,
GrB_EQ_UINT8, GrB_NE_UINT8, GrB_GT_UINT8, GrB_LT_UINT8,
GrB_EQ_UINT16, GrB_NE_UINT16, GrB_GT_UINT16, GrB_LT_UINT16,
GrB_EQ_UINT32, GrB_NE_UINT32, GrB_GT_UINT32, GrB_LT_UINT32,
GrB_EQ_UINT64, GrB_NE_UINT64, GrB_GT_UINT64, GrB_LT_UINT64,
GrB_EQ_FP32, GrB_NE_FP32, GrB_GT_FP32, GrB_LT_FP32,
GrB_EQ_FP64, GrB_NE_FP64, GrB_GT_FP64, GrB_LT_FP64,
// complex:
GxB_EQ_FC32, GxB_NE_FC32,
GxB_EQ_FC64, GxB_NE_FC64,
// z = (x >= y) z = (x <= y)
GrB_GE_BOOL, GrB_LE_BOOL,
GrB_GE_INT8, GrB_LE_INT8,
GrB_GE_INT16, GrB_LE_INT16,
GrB_GE_INT32, GrB_LE_INT32,
GrB_GE_INT64, GrB_LE_INT64,
GrB_GE_UINT8, GrB_LE_UINT8,
GrB_GE_UINT16, GrB_LE_UINT16,
GrB_GE_UINT32, GrB_LE_UINT32,
GrB_GE_UINT64, GrB_LE_UINT64,
GrB_GE_FP32, GrB_LE_FP32,
GrB_GE_FP64, GrB_LE_FP64 ;
//------------------------------------------------------------------------------
// z=f(x,y) where z is complex and the type of x,y is given by the suffix
//------------------------------------------------------------------------------
GB_PUBLIC GrB_BinaryOp
// z = cmplx (x,y)
GxB_CMPLX_FP32,
GxB_CMPLX_FP64 ;
//==============================================================================
// positional GrB_UnaryOp and GrB_BinaryOp operators
//==============================================================================
// Positional operators do not depend on the value of an entry, but its row or
// column index in the matrix instead. For example, for an entry A(i,j),
// first_i(A(i,j),y) is equal to i. These operators are useful for returning
// node id's as the result of a semiring operation. If used as a mask, zero
// has a special value, and thus z=first_i1(A(i,j),j) returns i+1 instead of i.
// This can be useful when using a positional operator to construct a mask
// matrix or vector for another GraphBLAS operation. It is also essential for
// the @GrB interface, since the user view of matrix indices in @GrB is
// 1-based, not 0-based.
// When applied to a vector, j is always equal to 0. For a GxB_SCALAR,
// both i and j are always zero.
// GraphBLAS defines a GrB_Index as uint64_t, but these operators return a
// GrB_INT32 or GrB_INT64 type, which is more flexible to use because the
// result of this operator can be negated, to flag an entry for example. The
// value -1 can be used to denote "no node" or "no position". GrB_INT32 is
// useful for graphs smaller than 2^31 nodes. If the row or column index
// exceeds INT32_MAX, the result is determined by the typecast from the
// 64-bit index to the smaller 32-bit index.
// Positional operators cannot be used to construct monoids. They can be used
// as multiplicative operators in semirings, and as operators for GrB_eWise*,
// and GrB_apply (bind first or second). For the latter, the operator cannot
// depend on the bound scalar.
// When used as multiplicative operators in a semiring, FIRSTJ and SECONDI
// are identical. If C(i,j) += t is computed where t = A(i,k)*B(k,j), then
// t = k in both cases. Likewise, FIRSTJ1 and SECONDI1 are identical.
GB_PUBLIC GrB_BinaryOp
GxB_FIRSTI_INT32, GxB_FIRSTI_INT64, // z = first_i(A(i,j),y) == i
GxB_FIRSTI1_INT32, GxB_FIRSTI1_INT64, // z = first_i1(A(i,j),y) == i+1
GxB_FIRSTJ_INT32, GxB_FIRSTJ_INT64, // z = first_j(A(i,j),y) == j
GxB_FIRSTJ1_INT32, GxB_FIRSTJ1_INT64, // z = first_j1(A(i,j),y) == j+1
GxB_SECONDI_INT32, GxB_SECONDI_INT64, // z = second_i(x,B(i,j)) == i
GxB_SECONDI1_INT32, GxB_SECONDI1_INT64, // z = second_i1(x,B(i,j)) == i+1
GxB_SECONDJ_INT32, GxB_SECONDJ_INT64, // z = second_j(x,B(i,j)) == j
GxB_SECONDJ1_INT32, GxB_SECONDJ1_INT64 ; // z = second_j1(x,B(i,j)) == j+1
GB_PUBLIC GrB_UnaryOp
GxB_POSITIONI_INT32, GxB_POSITIONI_INT64, // z=position_i(A(i,j)) == i
GxB_POSITIONI1_INT32, GxB_POSITIONI1_INT64, // z=position_i1(A(i,j)) == i+1
GxB_POSITIONJ_INT32, GxB_POSITIONJ_INT64, // z=position_j(A(i,j)) == j
GxB_POSITIONJ1_INT32, GxB_POSITIONJ1_INT64 ;// z=position_j1(A(i,j)) == j+1
//==============================================================================
// special GrB_BinaryOp for build methods only
//==============================================================================
// In GrB*build* methods, passing dup as NULL means that no duplicates are
// tolerated. If duplicates appear, an error is returned. If dup is a binary
// operator, it is applied to reduce duplicates to a single value. The
// GxB_IGNORE_DUP is a special case. It is not an operator, but an indication
// that any duplicates are to be ignored.
GB_PUBLIC GrB_BinaryOp GxB_IGNORE_DUP ;
//==============================================================================
// About boolean and bitwise binary operators
//==============================================================================
// Some of the boolean operators compute the same thing with different names.
// For example, x*y and x&&y give the same results for boolean x and y.
// Operations such as x < y when x and y are boolean are treated as if true=1
// and false=0. Below is the truth table for all binary operators with boolean
// inputs. This table is defined by how C typecasts boolean values for
// non-boolean operations. For example, if x, y, and z are boolean, x = true,
// and y = true, then z = x + y = true + true = true. DIV (x/y) is defined
// below. RDIV (y/x) is shown as \ in the table; it is the same as 2nd.
// x y 1st 2nd min max + - * / or and xor eq ne > < ge le \ pow pair
// 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 1 1 0 1 1
// 0 1 0 1 0 1 1 1 0 0 1 0 1 0 1 0 1 0 1 1 0 1
// 1 0 1 0 0 1 1 1 0 1 1 0 1 0 1 1 0 1 0 0 1 1
// 1 1 1 1 1 1 1 0 1 1 1 1 0 1 0 0 0 1 1 1 1 1
// GraphBLAS includes a GrB_DIV_BOOL operator in its specification, but does
// not define what boolean "division" means. SuiteSparse:GraphBLAS makes the
// following interpretation.
// GraphBLAS does not generate exceptions for divide-by-zero. Floating-point
// divide-by-zero follows the IEEE 754 standard: 1/0 is +Inf, -1/0 is -Inf, and
// 0/0 is NaN. For integer division by zero, if x is positive, x/0 is the
// largest integer, -x/0 is the integer minimum (zero for unsigned integers),
// and 0/0 is zero. For example, for int8, 1/0 is 127, and -1/0 is -128. For
// uint8, 1/0 is 255 and 0/0 is zero.
// Boolean division is treated as if it were an unsigned integer type with
// true=1 and false=0, and with the max and min value being 1 and 0. As a
// result, GrB_IDENTITY_BOOL, GrB_AINV_BOOL, and GrB_MINV_BOOL all give the
// same result (z = x).
// With this convention for boolean "division", there are 11 unique binary
// operators that are purely boolean. Other named *_BOOL operators are
// redundant but are included in GraphBLAS so that the name space of operators
// is complete. Below is a list of all operators and their equivalents.
// x: 0 0 1 1
// y: 0 1 0 1
// z: see below
//
// z = 0 0 0 0 0 (zero function, not predefined)
// z = (x && y) 0 0 0 1 AND, MIN, TIMES
// z = (x > y) 0 0 1 0 GT, ISGT, and set diff (x\y)
// z = x 0 0 1 1 FIRST, DIV
//
// z = (x < y) 0 1 0 0 LT, ISLT, and set diff (y\x)
// z = y 0 1 0 1 SECOND, RDIV
// z = (x != y) 0 1 1 0 XOR, MINUS, RMINUS, NE, ISNE
// z = (x || y) 0 1 1 1 OR, MAX, PLUS
//
// z = ~(x || y) 1 0 0 0 (nor(x,y) function, not predefined)
// z = (x == y) 1 0 0 1 LXNOR, EQ, ISEQ
// z = ~y 1 0 1 0 (not(y), not predefined)
// z = (x >= y) 1 0 1 1 GE, ISGE, POW, and "x implies y"
//
// z = ~x 1 1 0 0 (not(x), not predefined)
// z = (x <= y) 1 1 0 1 LE, ISLE, and "y implies x"
// z = ~(x && y) 1 1 1 0 (nand(x,y) function, not predefined)
// z = 1 1 1 1 1 PAIR, ONEB
//
// z = any(x,y) 0 . . 1 ANY (pick x or y arbitrarily)
// Four more that have no _BOOL suffix are also redundant with the operators
// of the form GxB_*_BOOL (GrB_LOR, GrB_LAND, GrB_LXOR, and GrB_LXNOR).
// Note that the boolean binary operator space is not complete. Five other
// boolean functions could be pre-defined as well: z = 0, nor(x,y),
// nand(x,y), not(x), and not(y).
// Four of the possible 16 bitwise operators are pre-defined: BOR, BAND,
// BXOR, and BXNOR. This assumes that the computations for each bit are
// entirely independent (so BSHIFT would not fit in the table above).
//------------------------------------------------------------------------------
// methods for binary operators
//------------------------------------------------------------------------------
typedef void (*GxB_binary_function) (void *, const void *, const void *) ;
// GrB_BinaryOp_new creates a user-defined binary op, with an automatic
// detection of the operator name.
#undef GrB_BinaryOp_new
#undef GrM_BinaryOp_new
GB_PUBLIC
GrB_Info GRB (BinaryOp_new)
(
GrB_BinaryOp *binaryop, // handle for the new binary operator
GxB_binary_function function, // pointer to the binary function
GrB_Type ztype, // type of output z
GrB_Type xtype, // type of input x
GrB_Type ytype // type of input y
) ;
#define GrB_BinaryOp_new(op,f,z,x,y) \
GxB_BinaryOp_new(op,f,z,x,y, GB_STR(f), NULL)
#define GrM_BinaryOp_new(op,f,z,x,y) \
GxM_BinaryOp_new(op,f,z,x,y, GB_STR(f), NULL)
// GxB_BinaryOp_new creates a named user-defined binary op.
GB_PUBLIC
GrB_Info GxB_BinaryOp_new
(
GrB_BinaryOp *op, // handle for the new binary operator
GxB_binary_function function, // pointer to the binary function
GrB_Type ztype, // type of output z
GrB_Type xtype, // type of input x
GrB_Type ytype, // type of input y
const char *binop_name, // name of the user function
const char *binop_defn // definition of the user function
) ;
// GB_BinaryOp_new is historical: use GxB_BinaryOp_new instead
GB_PUBLIC
GrB_Info GB_BinaryOp_new // not user-callable
(
GrB_BinaryOp *binaryop, // handle for the new binary operator
GxB_binary_function function, // pointer to the binary function
GrB_Type ztype, // type of output z
GrB_Type xtype, // type of input x
GrB_Type ytype, // type of input y
const char *binop_name // name of the user function
) ;
// NOTE: GxB_BinaryOp_ztype is historical. Use GxB_BinaryOp_ztype_name instead.
GB_PUBLIC
GrB_Info GxB_BinaryOp_ztype // return the type of z
(
GrB_Type *ztype, // return type of output z
GrB_BinaryOp binaryop // binary operator to query
) ;
GB_PUBLIC
GrB_Info GxB_BinaryOp_ztype_name // return the type_name of z
(
char *type_name, // user array of size GxB_MAX_NAME_LEN
const GrB_BinaryOp binaryop // binary operator to query
) ;
// NOTE: GxB_BinaryOp_xtype is historical. Use GxB_BinaryOp_xtype_name instead.
GB_PUBLIC
GrB_Info GxB_BinaryOp_xtype // return the type of x
(
GrB_Type *xtype, // return type of input x
GrB_BinaryOp binaryop // binary operator to query
) ;
GB_PUBLIC
GrB_Info GxB_BinaryOp_xtype_name // return the type_name of x
(
char *type_name, // user array of size GxB_MAX_NAME_LEN
const GrB_BinaryOp binaryop // binary operator to query
) ;
// NOTE: GxB_BinaryOp_ytype is historical. Use GxB_BinaryOp_ytype_name instead.
GB_PUBLIC
GrB_Info GxB_BinaryOp_ytype // return the type of y
(
GrB_Type *ytype, // return type of input y
GrB_BinaryOp binaryop // binary operator to query
) ;
GB_PUBLIC
GrB_Info GxB_BinaryOp_ytype_name // return the type_name of y
(
char *type_name, // user array of size GxB_MAX_NAME_LEN
const GrB_BinaryOp binaryop // binary operator to query
) ;
GB_PUBLIC
GrB_Info GrB_BinaryOp_free // free a user-created binary operator
(
GrB_BinaryOp *binaryop // handle of binary operator to free
) ;
//==============================================================================
// GxB_SelectOp: select operators (historical)
//==============================================================================
// GrB_IndexUnaryOp should be used instead of GxB_SelectOp.
// GxB_SelectOp is an operator used by GxB_select to select entries from an
// input matrix A that are kept in the output C. If an entry A(i,j) in the
// matrix A, of size nrows-by-ncols, has the value aij, then it calls the
// select function as result = f (i, j, aij, thunk). If the function returns
// true, the entry is kept in the output C. If f returns false, the entry is
// not kept in C. The type of x for the GxB_SelectOp operator may be any of
// the 11 built-in types, or any user-defined type. It may also be GrB_NULL,
// to indicate that the function is type-generic and does not depend at all on
// the value aij. In this case, x is passed to f as a NULL pointer.
// The optional Thunk parameter to GxB_select is a GrB_Scalar. For built-in
// select operators (TRIL, TRIU, DIAG, and OFFDIAG), Thunk must have any
// built-in type, and thunk = (int64_t) Thunk is used to specify the diagonal
// for these operators. Thunk may be NULL, in which case its value is treated
// as zero, if it has a built-in type. The value of Thunk (if present) is not
// modified by any built-in select operator.
// For user-defined select operators, Thunk is not typecasted at all. If
// the user operator is defined with a non-NULL Thunk input, then it must
// be non-NULL and of the same type, when calling GxB_select.
// GxB_SelectOp: a function z=f(i,j,x,thunk) for the GxB_Select operation.
// The function f must have the signature:
// bool f (GrB_Index i, GrB_Index j, const void *x, const void *thunk) ;
// The values of i and j are guaranteed to be in the range 0 to
// GrB_INDEX_MAX, and they can be safely typecasted to int64_t then negated,
// if desired, without any risk of integer overflow.
typedef struct GB_SelectOp_opaque *GxB_SelectOp ;
//------------------------------------------------------------------------------
// built-in select operators (historical)
//------------------------------------------------------------------------------
// GxB_select (C, Mask, accum, op, A, Thunk, desc) always returns a matrix C of
// the same size as A (or A' if GrB_TRAN is in the descriptor).
GB_PUBLIC GxB_SelectOp
GxB_TRIL, // C=tril(A,thunk): returns true if ((j-i) <= thunk)
GxB_TRIU, // C=triu(A,thunk): returns true if ((j-i) >= thunk)
GxB_DIAG, // C=diag(A,thunk): returns true if ((j-i) == thunk)
GxB_OFFDIAG, // C=A-diag(A,thunk): returns true if ((j-i) != thunk)
GxB_NONZERO, // C=A(A ~= 0)
GxB_EQ_ZERO, // C=A(A == 0)
GxB_GT_ZERO, // C=A(A > 0)
GxB_GE_ZERO, // C=A(A >= 0)
GxB_LT_ZERO, // C=A(A < 0)
GxB_LE_ZERO, // C=A(A <= 0)
GxB_NE_THUNK, // C=A(A ~= thunk)
GxB_EQ_THUNK, // C=A(A == thunk)
GxB_GT_THUNK, // C=A(A > thunk)
GxB_GE_THUNK, // C=A(A >= thunk)
GxB_LT_THUNK, // C=A(A < thunk)
GxB_LE_THUNK ; // C=A(A <= thunk)
// For GxB_TRIL, GxB_TRIU, GxB_DIAG, and GxB_OFFDIAG, the parameter Thunk is a
// GrB_Scalar of any built-in type. If GrB_NULL, or empty, Thunk is treated as
// zero. Otherwise, the single entry is typecasted as (int64_t) Thunk.
// These select operators do not depend on the values of A, but just their
// position, and they work on matrices of any type.
// For GxB_*ZERO, the result depends only on the value of A(i,j). The Thunk
// parameter to GxB_select is ignored and may be GrB_NULL.
// The operators GxB_TRIL, GxB_TRIU, GxB_DIAG, GxB_OFFDIAG, GxB_NONZERO,
// GxB_EQ_ZERO, GxB_NE_THUNK, and GxB_EQ_THUNK work on all built-in types and
// all user-defined types.
// GxB_GT_*, GxB_GE_*, GxB_LT_*, and GxB_LE_* only work on the 11 built-in
// types (not complex). They cannot be used for user-defined types.
//------------------------------------------------------------------------------
// select operators: (historical)
//------------------------------------------------------------------------------
// User-defined GxB_SelectOps are historical. New code should use
// GrB_IndexUnaryOp_new instead.
typedef bool (*GxB_select_function) // return true if A(i,j) is kept
(
GrB_Index i, // row index of A(i,j)
GrB_Index j, // column index of A(i,j)
const void *x, // value of A(i,j)
const void *thunk // optional input for select function
) ;
#undef GxB_SelectOp_new
#undef GxM_SelectOp_new
GB_PUBLIC
GrB_Info GXB (SelectOp_new) // create a new user-defined select operator
(
GxB_SelectOp *selectop, // handle for the new select operator
GxB_select_function function,// pointer to the select function
GrB_Type xtype, // type of input x, or NULL if type-generic
GrB_Type ttype // type of thunk, or NULL if not used
) ;
#define GxB_SelectOp_new(op,f,x,t) GB_SelectOp_new (op,f,x,t, GB_STR(f))
#define GxM_SelectOp_new(op,f,x,t) GM_SelectOp_new (op,f,x,t, GB_STR(f))
// GB_SelectOp_new should not be called directly, but only through the
// GxB_SelectOp_new macro (but use GrB_IndexUnaryOp_new instead).
GB_PUBLIC
GrB_Info GB_SelectOp_new // not user-callable
(
GxB_SelectOp *selectop, // handle for the new select operator
GxB_select_function function,// pointer to the select function
GrB_Type xtype, // type of input x
GrB_Type ttype, // type of thunk, or NULL if not used
const char *name // name of the underlying function
) ;
// GxB_SelectOp_xtype is historical. Use a GrB_IndexUnaryOp instead.
GB_PUBLIC
GrB_Info GxB_SelectOp_xtype // return the type of x
(
GrB_Type *xtype, // return type of input x
GxB_SelectOp selectop // select operator
) ;
// GxB_SelectOp_ttype is historical. Use a GrB_IndexUnaryOp instead.
GB_PUBLIC
GrB_Info GxB_SelectOp_ttype // return the type of thunk
(
GrB_Type *ttype, // return type of input thunk
GxB_SelectOp selectop // select operator
) ;
GB_PUBLIC
GrB_Info GxB_SelectOp_free // free a user-created select operator
(
GxB_SelectOp *selectop // handle of select operator to free
) ;
//==============================================================================
// GrB_IndexUnaryOp: a unary operator that depends on the row/col indices
//==============================================================================
// The indexop has the form z = f(aij, i, j, y) where aij is the numerical
// value of the A(i,j) entry, i and j are its row and column index, and y
// is a scalar. For vectors, it has the form z = f(vi, i, 0, y).
typedef struct GB_IndexUnaryOp_opaque *GrB_IndexUnaryOp ;
typedef void (*GxB_index_unary_function)
(
void *z, // output value z, of type ztype
const void *x, // input value x of type xtype; value of v(i) or A(i,j)
GrB_Index i, // row index of A(i,j)
GrB_Index j, // column index of A(i,j), or zero for v(i)
const void *y // input scalar y
) ;
// GrB_IndexUnaryOp_new creates a user-defined unary op, with an automatic
// detection of the operator name.
#undef GrB_IndexUnaryOp_new
#undef GrM_IndexUnaryOp_new
GB_PUBLIC
GrB_Info GRB (IndexUnaryOp_new) // create a new user-defined IndexUnary op
(
GrB_IndexUnaryOp *op, // handle for the new IndexUnary operator
GxB_index_unary_function function, // pointer to IndexUnary function
GrB_Type ztype, // type of output z
GrB_Type xtype, // type of input x (the A(i,j) entry)
GrB_Type ytype // type of input y (the scalar)
) ;
#define GrB_IndexUnaryOp_new(op,f,z,x,y) \
GxB_IndexUnaryOp_new(op,f,z,x,y, GB_STR(f), NULL)
#define GrM_IndexUnaryOp_new(op,f,z,x,y) \
GxM_IndexUnaryOp_new(op,f,z,x,y, GB_STR(f), NULL)
GB_PUBLIC
GrB_Info GxB_IndexUnaryOp_new // create a named user-created IndexUnaryOp
(
GrB_IndexUnaryOp *op, // handle for the new IndexUnary operator
GxB_index_unary_function function, // pointer to index_unary function
GrB_Type ztype, // type of output z
GrB_Type xtype, // type of input x
GrB_Type ytype, // type of input y
const char *idxop_name, // name of the user function
const char *idxop_defn // definition of the user function
) ;
GB_PUBLIC
GrB_Info GxB_IndexUnaryOp_ztype_name // return the type_name of z
(
char *type_name, // user array of size GxB_MAX_NAME_LEN
const GrB_IndexUnaryOp op // IndexUnary operator
) ;
// For TRIL, TRIU, DIAG, OFFDIAG, COLLE, COLGT, ROWLE, and ROWGT,
// the xtype_name is an empty string (""), since these functions do not depend
// on the type of the matrix input.
GB_PUBLIC
GrB_Info GxB_IndexUnaryOp_xtype_name // return the type_name of x
(
char *type_name, // user array of size GxB_MAX_NAME_LEN
const GrB_IndexUnaryOp op // select operator
) ;
GB_PUBLIC
GrB_Info GxB_IndexUnaryOp_ytype_name // return the type_name of the scalary y
(
char *type_name, // user array of size GxB_MAX_NAME_LEN
const GrB_IndexUnaryOp op // select operator
) ;
GB_PUBLIC
GrB_Info GrB_IndexUnaryOp_free // free a user-created IndexUnaryOp
(
GrB_IndexUnaryOp *op // handle of IndexUnary to free
) ;
//------------------------------------------------------------------------------
// built-in IndexUnaryOps
//------------------------------------------------------------------------------
// To facilitate computations with negative integers, the indices i and j are
// of type int64_t. The scalar y has the type corresponding to the suffix
// of the name of the operator.
GB_PUBLIC GrB_IndexUnaryOp
//--------------------------------------------------------------------------
// Result has the integer type INT32 or INT64, the same as the suffix
//--------------------------------------------------------------------------
// These operators work on any data type, including user-defined.
// ROWINDEX: (i+y): row index plus y
GrB_ROWINDEX_INT32, GrB_ROWINDEX_INT64,
// COLINDEX: (j+y): col index plus y
GrB_COLINDEX_INT32, GrB_COLINDEX_INT64,
// DIAGINDEX: (j-(i+y)): diagonal index plus y
GrB_DIAGINDEX_INT32, GrB_DIAGINDEX_INT64,
//--------------------------------------------------------------------------
// Result is bool, depending only on the indices i,j, and y
//--------------------------------------------------------------------------
// These operators work on any data type, including user-defined.
// The scalar y is int64.
// TRIL: (j <= (i+y)): lower triangular part
GrB_TRIL,
// TRIU: (j >= (i+y)): upper triangular part
GrB_TRIU,
// DIAG: (j == (i+y)): diagonal
GrB_DIAG,
// OFFDIAG: (j != (i+y)): offdiagonal
GrB_OFFDIAG,
// COLLE: (j <= y): columns 0:y
GrB_COLLE,
// COLGT: (j > y): columns y+1:ncols-1
GrB_COLGT,
// ROWLE: (i <= y): rows 0:y
GrB_ROWLE,
// ROWGT: (i > y): rows y+1:nrows-1
GrB_ROWGT,
//--------------------------------------------------------------------------
// Result is bool, depending only on the value aij
//--------------------------------------------------------------------------
// These operators work on matrices and vectors of any built-in type,
// including complex types. aij and the scalar y have the same type as the
// operator suffix.
// VALUEEQ: (aij == y)
GrB_VALUEEQ_INT8, GrB_VALUEEQ_UINT8, GrB_VALUEEQ_FP32, GrB_VALUEEQ_BOOL,
GrB_VALUEEQ_INT16, GrB_VALUEEQ_UINT16, GrB_VALUEEQ_FP64,
GrB_VALUEEQ_INT32, GrB_VALUEEQ_UINT32, GxB_VALUEEQ_FC32,
GrB_VALUEEQ_INT64, GrB_VALUEEQ_UINT64, GxB_VALUEEQ_FC64,
// VALUENE: (aij != y)
GrB_VALUENE_INT8, GrB_VALUENE_UINT8, GrB_VALUENE_FP32, GrB_VALUENE_BOOL,
GrB_VALUENE_INT16, GrB_VALUENE_UINT16, GrB_VALUENE_FP64,
GrB_VALUENE_INT32, GrB_VALUENE_UINT32, GxB_VALUENE_FC32,
GrB_VALUENE_INT64, GrB_VALUENE_UINT64, GxB_VALUENE_FC64,
// These operators work on matrices and vectors of any real (non-complex)
// built-in type.
// VALUELT: (aij < y)
GrB_VALUELT_INT8, GrB_VALUELT_UINT8, GrB_VALUELT_FP32, GrB_VALUELT_BOOL,
GrB_VALUELT_INT16, GrB_VALUELT_UINT16, GrB_VALUELT_FP64,
GrB_VALUELT_INT32, GrB_VALUELT_UINT32,
GrB_VALUELT_INT64, GrB_VALUELT_UINT64,
// VALUELE: (aij <= y)
GrB_VALUELE_INT8, GrB_VALUELE_UINT8, GrB_VALUELE_FP32, GrB_VALUELE_BOOL,
GrB_VALUELE_INT16, GrB_VALUELE_UINT16, GrB_VALUELE_FP64,
GrB_VALUELE_INT32, GrB_VALUELE_UINT32,
GrB_VALUELE_INT64, GrB_VALUELE_UINT64,
// VALUEGT: (aij > y)
GrB_VALUEGT_INT8, GrB_VALUEGT_UINT8, GrB_VALUEGT_FP32, GrB_VALUEGT_BOOL,
GrB_VALUEGT_INT16, GrB_VALUEGT_UINT16, GrB_VALUEGT_FP64,
GrB_VALUEGT_INT32, GrB_VALUEGT_UINT32,
GrB_VALUEGT_INT64, GrB_VALUEGT_UINT64,
// VALUEGE: (aij >= y)
GrB_VALUEGE_INT8, GrB_VALUEGE_UINT8, GrB_VALUEGE_FP32, GrB_VALUEGE_BOOL,
GrB_VALUEGE_INT16, GrB_VALUEGE_UINT16, GrB_VALUEGE_FP64,
GrB_VALUEGE_INT32, GrB_VALUEGE_UINT32,
GrB_VALUEGE_INT64, GrB_VALUEGE_UINT64 ;
//==============================================================================
// GrB_Monoid
//==============================================================================
// A monoid is an associative operator z=op(x,y) where all three types of z, x,
// and y are identical. The monoid also has an identity element, such that
// op(x,identity) = op(identity,x) = x.
typedef struct GB_Monoid_opaque *GrB_Monoid ;
GB_PUBLIC
GrB_Info GrB_Monoid_new_BOOL // create a new boolean monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
bool identity // identity value of the monoid
) ;
GB_PUBLIC
GrB_Info GrB_Monoid_new_INT8 // create a new int8 monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
int8_t identity // identity value of the monoid
) ;
GB_PUBLIC
GrB_Info GrB_Monoid_new_UINT8 // create a new uint8 monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
uint8_t identity // identity value of the monoid
) ;
GB_PUBLIC
GrB_Info GrB_Monoid_new_INT16 // create a new int16 monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
int16_t identity // identity value of the monoid
) ;
GB_PUBLIC
GrB_Info GrB_Monoid_new_UINT16 // create a new uint16 monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
uint16_t identity // identity value of the monoid
) ;
GB_PUBLIC
GrB_Info GrB_Monoid_new_INT32 // create a new int32 monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
int32_t identity // identity value of the monoid
) ;
GB_PUBLIC
GrB_Info GrB_Monoid_new_UINT32 // create a new uint32 monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
uint32_t identity // identity value of the monoid
) ;
GB_PUBLIC
GrB_Info GrB_Monoid_new_INT64 // create a new int64 monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
int64_t identity // identity value of the monoid
) ;
GB_PUBLIC
GrB_Info GrB_Monoid_new_UINT64 // create a new uint64 monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
uint64_t identity // identity value of the monoid
) ;
GB_PUBLIC
GrB_Info GrB_Monoid_new_FP32 // create a new float monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
float identity // identity value of the monoid
) ;
GB_PUBLIC
GrB_Info GrB_Monoid_new_FP64 // create a new double monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
double identity // identity value of the monoid
) ;
GB_PUBLIC
GrB_Info GxB_Monoid_new_FC32 // create a new float complex monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
GxB_FC32_t identity // identity value of the monoid
) ;
GB_PUBLIC
GrB_Info GxB_Monoid_new_FC64 // create a new double complex monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
GxB_FC64_t identity // identity value of the monoid
) ;
GB_PUBLIC
GrB_Info GrB_Monoid_new_UDT // create a monoid with a user-defined type
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
void *identity // identity value of the monoid
) ;
// Type-generic method for creating a new monoid:
/*
GB_PUBLIC
GrB_Info GrB_Monoid_new // create a monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
<type> identity // identity value of the monoid
) ;
*/
#if GxB_STDC_VERSION >= 201112L
#define GrB_Monoid_new(monoid,op,identity) \
_Generic \
( \
(identity), \
GB_CASES (, GrB, Monoid_new) \
) \
(monoid, op, identity)
#endif
// GxB_Monoid_terminal_new is identical to GrB_Monoid_new, except that a
// terminal value can be specified. The terminal may be NULL, which indicates
// no terminal value (and in this case, it is identical to GrB_Monoid_new).
// The terminal value, if not NULL, must have the same type as the identity.
GB_PUBLIC
GrB_Info GxB_Monoid_terminal_new_BOOL // create a new boolean monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
bool identity, // identity value of the monoid
bool terminal // terminal value of the monoid
) ;
GB_PUBLIC
GrB_Info GxB_Monoid_terminal_new_INT8 // create a new int8 monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
int8_t identity, // identity value of the monoid
int8_t terminal // terminal value of the monoid
) ;
GB_PUBLIC
GrB_Info GxB_Monoid_terminal_new_UINT8 // create a new uint8 monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
uint8_t identity, // identity value of the monoid
uint8_t terminal // terminal value of the monoid
) ;
GB_PUBLIC
GrB_Info GxB_Monoid_terminal_new_INT16 // create a new int16 monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
int16_t identity, // identity value of the monoid
int16_t terminal // terminal value of the monoid
) ;
GB_PUBLIC
GrB_Info GxB_Monoid_terminal_new_UINT16 // create a new uint16 monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
uint16_t identity, // identity value of the monoid
uint16_t terminal // terminal value of the monoid
) ;
GB_PUBLIC
GrB_Info GxB_Monoid_terminal_new_INT32 // create a new int32 monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
int32_t identity, // identity value of the monoid
int32_t terminal // terminal value of the monoid
) ;
GB_PUBLIC
GrB_Info GxB_Monoid_terminal_new_UINT32 // create a new uint32 monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
uint32_t identity, // identity value of the monoid
uint32_t terminal // terminal value of the monoid
) ;
GB_PUBLIC
GrB_Info GxB_Monoid_terminal_new_INT64 // create a new int64 monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
int64_t identity, // identity value of the monoid
int64_t terminal // terminal value of the monoid
) ;
GB_PUBLIC
GrB_Info GxB_Monoid_terminal_new_UINT64 // create a new uint64 monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
uint64_t identity, // identity value of the monoid
uint64_t terminal // terminal value of the monoid
) ;
GB_PUBLIC
GrB_Info GxB_Monoid_terminal_new_FP32 // create a new float monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
float identity, // identity value of the monoid
float terminal // terminal value of the monoid
) ;
GB_PUBLIC
GrB_Info GxB_Monoid_terminal_new_FP64 // create a new double monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
double identity, // identity value of the monoid
double terminal // terminal value of the monoid
) ;
GB_PUBLIC
GrB_Info GxB_Monoid_terminal_new_FC32 // create a new float complex monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
GxB_FC32_t identity, // identity value of the monoid
GxB_FC32_t terminal // terminal value of the monoid
) ;
GB_PUBLIC
GrB_Info GxB_Monoid_terminal_new_FC64 // create a new double complex monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
GxB_FC64_t identity, // identity value of the monoid
GxB_FC64_t terminal // terminal value of the monoid
) ;
GB_PUBLIC
GrB_Info GxB_Monoid_terminal_new_UDT // create a monoid with a user type
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
void *identity, // identity value of the monoid
void *terminal // terminal value of the monoid
) ;
// Type-generic method for creating a new monoid with a terminal value:
/*
GB_PUBLIC
GrB_Info GxB_Monoid_terminal_new // create a monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
<type> identity, // identity value of the monoid
<type> terminal // terminal value of the monoid
) ;
*/
#if GxB_STDC_VERSION >= 201112L
#define GxB_Monoid_terminal_new(monoid,op,identity,terminal) \
_Generic \
( \
(identity), \
GB_CASES (, GxB, Monoid_terminal_new) \
) \
(monoid, op, identity, terminal)
#endif
GB_PUBLIC
GrB_Info GxB_Monoid_operator // return the monoid operator
(
GrB_BinaryOp *op, // returns the binary op of the monoid
GrB_Monoid monoid // monoid to query
) ;
GB_PUBLIC
GrB_Info GxB_Monoid_identity // return the monoid identity
(
void *identity, // returns the identity of the monoid
GrB_Monoid monoid // monoid to query
) ;
GB_PUBLIC
GrB_Info GxB_Monoid_terminal // return the monoid terminal
(
bool *has_terminal, // true if the monoid has a terminal value
void *terminal, // returns the terminal of the monoid,
// unmodified if has_terminal is false
GrB_Monoid monoid // monoid to query
) ;
GB_PUBLIC
GrB_Info GrB_Monoid_free // free a user-created monoid
(
GrB_Monoid *monoid // handle of monoid to free
) ;
//==============================================================================
// GrB_Semiring
//==============================================================================
typedef struct GB_Semiring_opaque *GrB_Semiring ;
GB_PUBLIC
GrB_Info GrB_Semiring_new // create a semiring
(
GrB_Semiring *semiring, // handle of semiring to create
GrB_Monoid add, // add monoid of the semiring
GrB_BinaryOp multiply // multiply operator of the semiring
) ;
GB_PUBLIC
GrB_Info GxB_Semiring_add // return the add monoid of a semiring
(
GrB_Monoid *add, // returns add monoid of the semiring
GrB_Semiring semiring // semiring to query
) ;
GB_PUBLIC
GrB_Info GxB_Semiring_multiply // return multiply operator of a semiring
(
GrB_BinaryOp *multiply, // returns multiply operator of the semiring
GrB_Semiring semiring // semiring to query
) ;
GB_PUBLIC
GrB_Info GrB_Semiring_free // free a user-created semiring
(
GrB_Semiring *semiring // handle of semiring to free
) ;
//==============================================================================
// GrB_Scalar: a GraphBLAS scalar
//==============================================================================
// GxB_Scalar has become GrB_Scalar. The older name GxB_Scalar is kept as
// historical, but GrB_Scalar should be used instead.
typedef struct GB_Scalar_opaque *GxB_Scalar ; // historical: use GrB_Scalar
typedef struct GB_Scalar_opaque *GrB_Scalar ; // use this instead
// These methods create, free, copy, and clear a GrB_Scalar. The nvals,
// and type methods return basic information about a GrB_Scalar.
GB_PUBLIC
GrB_Info GrB_Scalar_new // create a new GrB_Scalar with no entry
(
GrB_Scalar *s, // handle of GrB_Scalar to create
GrB_Type type // type of GrB_Scalar to create
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_dup // make an exact copy of a GrB_Scalar
(
GrB_Scalar *s, // handle of output GrB_Scalar to create
const GrB_Scalar t // input GrB_Scalar to copy
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_clear // clear a GrB_Scalar of its entry
( // type remains unchanged.
GrB_Scalar s // GrB_Scalar to clear
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_nvals // get the number of entries in a GrB_Scalar
(
GrB_Index *nvals, // GrB_Scalar has nvals entries (0 or 1)
const GrB_Scalar s // GrB_Scalar to query
) ;
// NOTE: GxB_Scalar_type is historical. Use GxB_Scalar_type_name instead.
GB_PUBLIC
GrB_Info GxB_Scalar_type // get the type of a GrB_Scalar
(
GrB_Type *type, // returns the type of the GrB_Scalar
const GrB_Scalar s // GrB_Scalar to query
) ;
GB_PUBLIC
GrB_Info GxB_Scalar_type_name // return the name of the type of a scalar
(
char *type_name, // name of the type (char array of size at least
// GxB_MAX_NAME_LEN, owned by the user application).
const GrB_Scalar s // GrB_Scalar to query
) ;
GB_PUBLIC
GrB_Info GxB_Scalar_memoryUsage // return # of bytes used for a scalar
(
size_t *size, // # of bytes used by the scalar s
const GrB_Scalar s // GrB_Scalar to query
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_free // free a GrB_Scalar
(
GrB_Scalar *s // handle of GrB_Scalar to free
) ;
// historical names identical to GrB_Scalar_methods above:
GB_PUBLIC GrB_Info GxB_Scalar_new (GrB_Scalar *s, GrB_Type type) ;
GB_PUBLIC GrB_Info GxB_Scalar_dup (GrB_Scalar *s, const GrB_Scalar t) ;
GB_PUBLIC GrB_Info GxB_Scalar_clear (GrB_Scalar s) ;
GB_PUBLIC GrB_Info GxB_Scalar_nvals (GrB_Index *nvals, const GrB_Scalar s) ;
GB_PUBLIC GrB_Info GxB_Scalar_free (GrB_Scalar *s) ;
//------------------------------------------------------------------------------
// GrB_Scalar_setElement
//------------------------------------------------------------------------------
// Set a single GrB_Scalar s, from a user scalar x: s = x, typecasting from the
// type of x to the type of w as needed.
GB_PUBLIC
GrB_Info GrB_Scalar_setElement_BOOL // s = x
(
GrB_Scalar s, // GrB_Scalar to modify
bool x // user scalar to assign to s
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_setElement_INT8 // s = x
(
GrB_Scalar s, // GrB_Scalar to modify
int8_t x // user scalar to assign to s
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_setElement_UINT8 // s = x
(
GrB_Scalar s, // GrB_Scalar to modify
uint8_t x // user scalar to assign to s
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_setElement_INT16 // s = x
(
GrB_Scalar s, // GrB_Scalar to modify
int16_t x // user scalar to assign to s
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_setElement_UINT16 // s = x
(
GrB_Scalar s, // GrB_Scalar to modify
uint16_t x // user scalar to assign to s
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_setElement_INT32 // s = x
(
GrB_Scalar s, // GrB_Scalar to modify
int32_t x // user scalar to assign to s
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_setElement_UINT32 // s = x
(
GrB_Scalar s, // GrB_Scalar to modify
uint32_t x // user scalar to assign to s
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_setElement_INT64 // s = x
(
GrB_Scalar s, // GrB_Scalar to modify
int64_t x // user scalar to assign to s
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_setElement_UINT64 // s = x
(
GrB_Scalar s, // GrB_Scalar to modify
uint64_t x // user scalar to assign to s
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_setElement_FP32 // s = x
(
GrB_Scalar s, // GrB_Scalar to modify
float x // user scalar to assign to s
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_setElement_FP64 // s = x
(
GrB_Scalar s, // GrB_Scalar to modify
double x // user scalar to assign to s
) ;
GB_PUBLIC
GrB_Info GxB_Scalar_setElement_FC32 // s = x
(
GrB_Scalar s, // GrB_Scalar to modify
GxB_FC32_t x // user scalar to assign to s
) ;
GB_PUBLIC
GrB_Info GxB_Scalar_setElement_FC64 // s = x
(
GrB_Scalar s, // GrB_Scalar to modify
GxB_FC64_t x // user scalar to assign to s
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_setElement_UDT // s = x
(
GrB_Scalar s, // GrB_Scalar to modify
void *x // user scalar to assign to s
) ;
// historical names identical to GrB_Scalar_methods above:
GB_PUBLIC GrB_Info GxB_Scalar_setElement_BOOL (GrB_Scalar s, bool x) ;
GB_PUBLIC GrB_Info GxB_Scalar_setElement_INT8 (GrB_Scalar s, int8_t x) ;
GB_PUBLIC GrB_Info GxB_Scalar_setElement_INT16 (GrB_Scalar s, int16_t x) ;
GB_PUBLIC GrB_Info GxB_Scalar_setElement_INT32 (GrB_Scalar s, int32_t x) ;
GB_PUBLIC GrB_Info GxB_Scalar_setElement_INT64 (GrB_Scalar s, int64_t x) ;
GB_PUBLIC GrB_Info GxB_Scalar_setElement_UINT8 (GrB_Scalar s, uint8_t x) ;
GB_PUBLIC GrB_Info GxB_Scalar_setElement_UINT16 (GrB_Scalar s, uint16_t x) ;
GB_PUBLIC GrB_Info GxB_Scalar_setElement_UINT32 (GrB_Scalar s, uint32_t x) ;
GB_PUBLIC GrB_Info GxB_Scalar_setElement_UINT64 (GrB_Scalar s, uint64_t x) ;
GB_PUBLIC GrB_Info GxB_Scalar_setElement_FP32 (GrB_Scalar s, float x) ;
GB_PUBLIC GrB_Info GxB_Scalar_setElement_FP64 (GrB_Scalar s, double x) ;
GB_PUBLIC GrB_Info GxB_Scalar_setElement_UDT (GrB_Scalar s, void *x) ;
// Type-generic version: x can be any supported C type or void * for a
// user-defined type.
/*
GB_PUBLIC
GrB_Info GrB_Scalar_setElement // s = x
(
GrB_Scalar s, // GrB_Scalar to modify
<type> x // user scalar to assign to s
) ;
*/
#if GxB_STDC_VERSION >= 201112L
#define GrB_Scalar_setElement(s,x) \
_Generic \
( \
(x), \
GB_CASES (, GrB, Scalar_setElement) \
) \
(s, x)
#define GxB_Scalar_setElement(s,x) GrB_Scalar_setElement (s, x)
#endif
//------------------------------------------------------------------------------
// GrB_Scalar_extractElement
//------------------------------------------------------------------------------
// Extract a single entry from a GrB_Scalar, x = s, typecasting from the type
// of s to the type of x as needed.
GB_PUBLIC
GrB_Info GrB_Scalar_extractElement_BOOL // x = s
(
bool *x, // user scalar extracted
const GrB_Scalar s // GrB_Scalar to extract an entry from
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_extractElement_INT8 // x = s
(
int8_t *x, // user scalar extracted
const GrB_Scalar s // GrB_Scalar to extract an entry from
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_extractElement_UINT8 // x = s
(
uint8_t *x, // user scalar extracted
const GrB_Scalar s // GrB_Scalar to extract an entry from
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_extractElement_INT16 // x = s
(
int16_t *x, // user scalar extracted
const GrB_Scalar s // GrB_Scalar to extract an entry from
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_extractElement_UINT16 // x = s
(
uint16_t *x, // user scalar extracted
const GrB_Scalar s // GrB_Scalar to extract an entry from
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_extractElement_INT32 // x = s
(
int32_t *x, // user scalar extracted
const GrB_Scalar s // GrB_Scalar to extract an entry from
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_extractElement_UINT32 // x = s
(
uint32_t *x, // user scalar extracted
const GrB_Scalar s // GrB_Scalar to extract an entry from
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_extractElement_INT64 // x = s
(
int64_t *x, // user scalar extracted
const GrB_Scalar s // GrB_Scalar to extract an entry from
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_extractElement_UINT64 // x = s
(
uint64_t *x, // user scalar extracted
const GrB_Scalar s // GrB_Scalar to extract an entry from
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_extractElement_FP32 // x = s
(
float *x, // user scalar extracted
const GrB_Scalar s // GrB_Scalar to extract an entry from
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_extractElement_FP64 // x = s
(
double *x, // user scalar extracted
const GrB_Scalar s // GrB_Scalar to extract an entry from
) ;
GB_PUBLIC
GrB_Info GxB_Scalar_extractElement_FC32 // x = s
(
GxB_FC32_t *x, // user scalar extracted
const GrB_Scalar s // GrB_Scalar to extract an entry from
) ;
GB_PUBLIC
GrB_Info GxB_Scalar_extractElement_FC64 // x = s
(
GxB_FC64_t *x, // user scalar extracted
const GrB_Scalar s // GrB_Scalar to extract an entry from
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_extractElement_UDT // x = s
(
void *x, // user scalar extracted
const GrB_Scalar s // GrB_Scalar to extract an entry from
) ;
// historical names identical to GrB_Scalar_methods above:
GB_PUBLIC GrB_Info GxB_Scalar_extractElement_BOOL (bool *x, const GrB_Scalar s) ;
GB_PUBLIC GrB_Info GxB_Scalar_extractElement_INT8 (int8_t *x, const GrB_Scalar s) ;
GB_PUBLIC GrB_Info GxB_Scalar_extractElement_INT16 (int16_t *x, const GrB_Scalar s) ;
GB_PUBLIC GrB_Info GxB_Scalar_extractElement_INT32 (int32_t *x, const GrB_Scalar s) ;
GB_PUBLIC GrB_Info GxB_Scalar_extractElement_INT64 (int64_t *x, const GrB_Scalar s) ;
GB_PUBLIC GrB_Info GxB_Scalar_extractElement_UINT8 (uint8_t *x, const GrB_Scalar s) ;
GB_PUBLIC GrB_Info GxB_Scalar_extractElement_UINT16 (uint16_t *x, const GrB_Scalar s) ;
GB_PUBLIC GrB_Info GxB_Scalar_extractElement_UINT32 (uint32_t *x, const GrB_Scalar s) ;
GB_PUBLIC GrB_Info GxB_Scalar_extractElement_UINT64 (uint64_t *x, const GrB_Scalar s) ;
GB_PUBLIC GrB_Info GxB_Scalar_extractElement_FP32 (float *x, const GrB_Scalar s) ;
GB_PUBLIC GrB_Info GxB_Scalar_extractElement_FP64 (double *x, const GrB_Scalar s) ;
GB_PUBLIC GrB_Info GxB_Scalar_extractElement_UDT (void *x, const GrB_Scalar s) ;
// Type-generic version: x can be a pointer to any supported C type or void *
// for a user-defined type.
/*
GB_PUBLIC
GrB_Info GrB_Scalar_extractElement // x = s
(
<type> *x, // user scalar extracted
const GrB_Scalar s // GrB_Scalar to extract an entry from
) ;
*/
#if GxB_STDC_VERSION >= 201112L
#define GrB_Scalar_extractElement(x,s) \
_Generic \
( \
(x), \
GB_CASES (*, GrB, Scalar_extractElement) \
) \
(x, s)
#define GxB_Scalar_extractElement(x,s) GrB_Scalar_extractElement (x, s)
#endif
//==============================================================================
// GrB_Vector: a GraphBLAS vector
//==============================================================================
typedef struct GB_Vector_opaque *GrB_Vector ;
// These methods create, free, copy, and clear a vector. The size, nvals,
// and type methods return basic information about a vector.
GB_PUBLIC
GrB_Info GrB_Vector_new // create a new vector with no entries
(
GrB_Vector *v, // handle of vector to create
GrB_Type type, // type of vector to create
GrB_Index n // vector dimension is n-by-1
// (n must be <= GrB_INDEX_MAX+1)
) ;
GB_PUBLIC
GrB_Info GrB_Vector_dup // make an exact copy of a vector
(
GrB_Vector *w, // handle of output vector to create
const GrB_Vector u // input vector to copy
) ;
GB_PUBLIC
GrB_Info GrB_Vector_clear // clear a vector of all entries;
( // type and dimension remain unchanged.
GrB_Vector v // vector to clear
) ;
GB_PUBLIC
GrB_Info GrB_Vector_size // get the dimension of a vector
(
GrB_Index *n, // vector dimension is n-by-1
const GrB_Vector v // vector to query
) ;
GB_PUBLIC
GrB_Info GrB_Vector_nvals // get the number of entries in a vector
(
GrB_Index *nvals, // vector has nvals entries
const GrB_Vector v // vector to query
) ;
// NOTE: GxB_Vector_type is historical. Use GxB_Vector_type_name instead.
GB_PUBLIC
GrB_Info GxB_Vector_type // get the type of a vector
(
GrB_Type *type, // returns the type of the vector
const GrB_Vector v // vector to query
) ;
GB_PUBLIC
GrB_Info GxB_Vector_type_name // return the name of the type of a vector
(
char *type_name, // name of the type (char array of size at least
// GxB_MAX_NAME_LEN, owned by the user application).
const GrB_Vector v // vector to query
) ;
GB_PUBLIC
GrB_Info GxB_Vector_memoryUsage // return # of bytes used for a vector
(
size_t *size, // # of bytes used by the vector v
const GrB_Vector v // vector to query
) ;
GB_PUBLIC
GrB_Info GxB_Vector_iso // return iso status of a vector
(
bool *iso, // true if the vector is iso-valued
const GrB_Vector v // vector to query
) ;
GB_PUBLIC
GrB_Info GrB_Vector_free // free a vector
(
GrB_Vector *v // handle of vector to free
) ;
//------------------------------------------------------------------------------
// GrB_Vector_build
//------------------------------------------------------------------------------
// GrB_Vector_build: w = sparse (I,1,X), but using any
// associative operator to assemble duplicate entries.
GB_PUBLIC
GrB_Info GrB_Vector_build_BOOL // build a vector from (I,X) tuples
(
GrB_Vector w, // vector to build
const GrB_Index *I, // array of row indices of tuples
const bool *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GrB_Vector_build_INT8 // build a vector from (I,X) tuples
(
GrB_Vector w, // vector to build
const GrB_Index *I, // array of row indices of tuples
const int8_t *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GrB_Vector_build_UINT8 // build a vector from (I,X) tuples
(
GrB_Vector w, // vector to build
const GrB_Index *I, // array of row indices of tuples
const uint8_t *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GrB_Vector_build_INT16 // build a vector from (I,X) tuples
(
GrB_Vector w, // vector to build
const GrB_Index *I, // array of row indices of tuples
const int16_t *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GrB_Vector_build_UINT16 // build a vector from (I,X) tuples
(
GrB_Vector w, // vector to build
const GrB_Index *I, // array of row indices of tuples
const uint16_t *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GrB_Vector_build_INT32 // build a vector from (I,X) tuples
(
GrB_Vector w, // vector to build
const GrB_Index *I, // array of row indices of tuples
const int32_t *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GrB_Vector_build_UINT32 // build a vector from (I,X) tuples
(
GrB_Vector w, // vector to build
const GrB_Index *I, // array of row indices of tuples
const uint32_t *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GrB_Vector_build_INT64 // build a vector from (I,X) tuples
(
GrB_Vector w, // vector to build
const GrB_Index *I, // array of row indices of tuples
const int64_t *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GrB_Vector_build_UINT64 // build a vector from (I,X) tuples
(
GrB_Vector w, // vector to build
const GrB_Index *I, // array of row indices of tuples
const uint64_t *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GrB_Vector_build_FP32 // build a vector from (I,X) tuples
(
GrB_Vector w, // vector to build
const GrB_Index *I, // array of row indices of tuples
const float *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GrB_Vector_build_FP64 // build a vector from (I,X) tuples
(
GrB_Vector w, // vector to build
const GrB_Index *I, // array of row indices of tuples
const double *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GxB_Vector_build_FC32 // build a vector from (I,X) tuples
(
GrB_Vector w, // vector to build
const GrB_Index *I, // array of row indices of tuples
const GxB_FC32_t *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GxB_Vector_build_FC64 // build a vector from (I,X) tuples
(
GrB_Vector w, // vector to build
const GrB_Index *I, // array of row indices of tuples
const GxB_FC64_t *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GrB_Vector_build_UDT // build a vector from (I,X) tuples
(
GrB_Vector w, // vector to build
const GrB_Index *I, // array of row indices of tuples
const void *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GxB_Vector_build_Scalar // build a vector from (i,scalar) tuples
(
GrB_Vector w, // vector to build
const GrB_Index *I, // array of row indices of tuples
GrB_Scalar scalar, // value for all tuples
GrB_Index nvals // number of tuples
) ;
// Type-generic version: X can be a pointer to any supported C type or void *
// for a user-defined type.
/*
GB_PUBLIC
GrB_Info GrB_Vector_build // build a vector from (I,X) tuples
(
GrB_Vector w, // vector to build
const GrB_Index *I, // array of row indices of tuples
const <type> *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
*/
#if GxB_STDC_VERSION >= 201112L
#define GrB_Vector_build(w,I,X,nvals,dup) \
_Generic \
( \
(X), \
GB_CASES (*, GrB, Vector_build) \
) \
(w, I, ((const void *) (X)), nvals, dup)
#endif
//------------------------------------------------------------------------------
// GrB_Vector_setElement
//------------------------------------------------------------------------------
// Set a single scalar in a vector, w(i) = x, typecasting from the type of x to
// the type of w as needed.
GB_PUBLIC
GrB_Info GrB_Vector_setElement_BOOL // w(i) = x
(
GrB_Vector w, // vector to modify
bool x, // scalar to assign to w(i)
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GrB_Vector_setElement_INT8 // w(i) = x
(
GrB_Vector w, // vector to modify
int8_t x, // scalar to assign to w(i)
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GrB_Vector_setElement_UINT8 // w(i) = x
(
GrB_Vector w, // vector to modify
uint8_t x, // scalar to assign to w(i)
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GrB_Vector_setElement_INT16 // w(i) = x
(
GrB_Vector w, // vector to modify
int16_t x, // scalar to assign to w(i)
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GrB_Vector_setElement_UINT16 // w(i) = x
(
GrB_Vector w, // vector to modify
uint16_t x, // scalar to assign to w(i)
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GrB_Vector_setElement_INT32 // w(i) = x
(
GrB_Vector w, // vector to modify
int32_t x, // scalar to assign to w(i)
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GrB_Vector_setElement_UINT32 // w(i) = x
(
GrB_Vector w, // vector to modify
uint32_t x, // scalar to assign to w(i)
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GrB_Vector_setElement_INT64 // w(i) = x
(
GrB_Vector w, // vector to modify
int64_t x, // scalar to assign to w(i)
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GrB_Vector_setElement_UINT64 // w(i) = x
(
GrB_Vector w, // vector to modify
uint64_t x, // scalar to assign to w(i)
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GrB_Vector_setElement_FP32 // w(i) = x
(
GrB_Vector w, // vector to modify
float x, // scalar to assign to w(i)
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GrB_Vector_setElement_FP64 // w(i) = x
(
GrB_Vector w, // vector to modify
double x, // scalar to assign to w(i)
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GxB_Vector_setElement_FC32 // w(i) = x
(
GrB_Vector w, // vector to modify
GxB_FC32_t x, // scalar to assign to w(i)
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GxB_Vector_setElement_FC64 // w(i) = x
(
GrB_Vector w, // vector to modify
GxB_FC64_t x, // scalar to assign to w(i)
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GrB_Vector_setElement_UDT // w(i) = x
(
GrB_Vector w, // vector to modify
void *x, // scalar to assign to w(i)
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GrB_Vector_setElement_Scalar // w(i) = x
(
GrB_Vector w, // vector to modify
GrB_Scalar x, // scalar to assign to w(i)
GrB_Index i // row index
) ;
// Type-generic version: x can be any supported C type or void * for a
// user-defined type.
/*
GB_PUBLIC
GrB_Info GrB_Vector_setElement // w(i) = x
(
GrB_Vector w, // vector to modify
<type> x, // scalar to assign to w(i)
GrB_Index i // row index
) ;
*/
#if GxB_STDC_VERSION >= 201112L
#define GrB_Vector_setElement(w,x,i) \
_Generic \
( \
(x), \
GB_CASES (, GrB, Vector_setElement), \
default: GrB_Vector_setElement_Scalar \
) \
(w, x, i)
#endif
//------------------------------------------------------------------------------
// GrB_Vector_extractElement
//------------------------------------------------------------------------------
// Extract a single entry from a vector, x = v(i), typecasting from the type of
// v to the type of x as needed.
GB_PUBLIC
GrB_Info GrB_Vector_extractElement_BOOL // x = v(i)
(
bool *x, // scalar extracted
const GrB_Vector v, // vector to extract an entry from
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GrB_Vector_extractElement_INT8 // x = v(i)
(
int8_t *x, // scalar extracted
const GrB_Vector v, // vector to extract an entry from
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GrB_Vector_extractElement_UINT8 // x = v(i)
(
uint8_t *x, // scalar extracted
const GrB_Vector v, // vector to extract an entry from
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GrB_Vector_extractElement_INT16 // x = v(i)
(
int16_t *x, // scalar extracted
const GrB_Vector v, // vector to extract an entry from
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GrB_Vector_extractElement_UINT16 // x = v(i)
(
uint16_t *x, // scalar extracted
const GrB_Vector v, // vector to extract an entry from
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GrB_Vector_extractElement_INT32 // x = v(i)
(
int32_t *x, // scalar extracted
const GrB_Vector v, // vector to extract an entry from
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GrB_Vector_extractElement_UINT32 // x = v(i)
(
uint32_t *x, // scalar extracted
const GrB_Vector v, // vector to extract an entry from
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GrB_Vector_extractElement_INT64 // x = v(i)
(
int64_t *x, // scalar extracted
const GrB_Vector v, // vector to extract an entry from
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GrB_Vector_extractElement_UINT64 // x = v(i)
(
uint64_t *x, // scalar extracted
const GrB_Vector v, // vector to extract an entry from
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GrB_Vector_extractElement_FP32 // x = v(i)
(
float *x, // scalar extracted
const GrB_Vector v, // vector to extract an entry from
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GrB_Vector_extractElement_FP64 // x = v(i)
(
double *x, // scalar extracted
const GrB_Vector v, // vector to extract an entry from
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GxB_Vector_extractElement_FC32 // x = v(i)
(
GxB_FC32_t *x, // scalar extracted
const GrB_Vector v, // vector to extract an entry from
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GxB_Vector_extractElement_FC64 // x = v(i)
(
GxB_FC64_t *x, // scalar extracted
const GrB_Vector v, // vector to extract an entry from
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GrB_Vector_extractElement_UDT // x = v(i)
(
void *x, // scalar extracted
const GrB_Vector v, // vector to extract an entry from
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GrB_Vector_extractElement_Scalar // x = v(i)
(
GrB_Scalar x, // scalar extracted
const GrB_Vector v, // vector to extract an entry from
GrB_Index i // row index
) ;
// Type-generic version: x can be a pointer to any supported C type or void *
// for a user-defined type.
/*
GB_PUBLIC
GrB_Info GrB_Vector_extractElement // x = v(i)
(
<type> *x, // scalar extracted
const GrB_Vector v, // vector to extract an entry from
GrB_Index i // row index
) ;
*/
#if GxB_STDC_VERSION >= 201112L
#define GrB_Vector_extractElement(x,v,i) \
_Generic \
( \
(x), \
GB_CASES (*, GrB, Vector_extractElement), \
default: GrB_Vector_extractElement_Scalar \
) \
(x, v, i)
#endif
//------------------------------------------------------------------------------
// GrB_Vector_removeElement
//------------------------------------------------------------------------------
// GrB_Vector_removeElement (v,i) removes the element v(i) from the vector v.
GB_PUBLIC
GrB_Info GrB_Vector_removeElement
(
GrB_Vector v, // vector to remove an element from
GrB_Index i // index
) ;
//------------------------------------------------------------------------------
// GrB_Vector_extractTuples
//------------------------------------------------------------------------------
// Extracts all tuples from a vector, like [I,~,X] = find (v). If
// any parameter I and/or X is NULL, then that component is not extracted. For
// example, to extract just the row indices, pass I as non-NULL, and X as NULL.
// This is like [I,~,~] = find (v).
GB_PUBLIC
GrB_Info GrB_Vector_extractTuples_BOOL // [I,~,X] = find (v)
(
GrB_Index *I, // array for returning row indices of tuples
bool *X, // array for returning values of tuples
GrB_Index *nvals, // I, X size on input; # tuples on output
const GrB_Vector v // vector to extract tuples from
) ;
GB_PUBLIC
GrB_Info GrB_Vector_extractTuples_INT8 // [I,~,X] = find (v)
(
GrB_Index *I, // array for returning row indices of tuples
int8_t *X, // array for returning values of tuples
GrB_Index *nvals, // I, X size on input; # tuples on output
const GrB_Vector v // vector to extract tuples from
) ;
GB_PUBLIC
GrB_Info GrB_Vector_extractTuples_UINT8 // [I,~,X] = find (v)
(
GrB_Index *I, // array for returning row indices of tuples
uint8_t *X, // array for returning values of tuples
GrB_Index *nvals, // I, X size on input; # tuples on output
const GrB_Vector v // vector to extract tuples from
) ;
GB_PUBLIC
GrB_Info GrB_Vector_extractTuples_INT16 // [I,~,X] = find (v)
(
GrB_Index *I, // array for returning row indices of tuples
int16_t *X, // array for returning values of tuples
GrB_Index *nvals, // I, X size on input; # tuples on output
const GrB_Vector v // vector to extract tuples from
) ;
GB_PUBLIC
GrB_Info GrB_Vector_extractTuples_UINT16 // [I,~,X] = find (v)
(
GrB_Index *I, // array for returning row indices of tuples
uint16_t *X, // array for returning values of tuples
GrB_Index *nvals, // I, X size on input; # tuples on output
const GrB_Vector v // vector to extract tuples from
) ;
GB_PUBLIC
GrB_Info GrB_Vector_extractTuples_INT32 // [I,~,X] = find (v)
(
GrB_Index *I, // array for returning row indices of tuples
int32_t *X, // array for returning values of tuples
GrB_Index *nvals, // I, X size on input; # tuples on output
const GrB_Vector v // vector to extract tuples from
) ;
GB_PUBLIC
GrB_Info GrB_Vector_extractTuples_UINT32 // [I,~,X] = find (v)
(
GrB_Index *I, // array for returning row indices of tuples
uint32_t *X, // array for returning values of tuples
GrB_Index *nvals, // I, X size on input; # tuples on output
const GrB_Vector v // vector to extract tuples from
) ;
GB_PUBLIC
GrB_Info GrB_Vector_extractTuples_INT64 // [I,~,X] = find (v)
(
GrB_Index *I, // array for returning row indices of tuples
int64_t *X, // array for returning values of tuples
GrB_Index *nvals, // I, X size on input; # tuples on output
const GrB_Vector v // vector to extract tuples from
) ;
GB_PUBLIC
GrB_Info GrB_Vector_extractTuples_UINT64 // [I,~,X] = find (v)
(
GrB_Index *I, // array for returning row indices of tuples
uint64_t *X, // array for returning values of tuples
GrB_Index *nvals, // I, X size on input; # tuples on output
const GrB_Vector v // vector to extract tuples from
) ;
GB_PUBLIC
GrB_Info GrB_Vector_extractTuples_FP32 // [I,~,X] = find (v)
(
GrB_Index *I, // array for returning row indices of tuples
float *X, // array for returning values of tuples
GrB_Index *nvals, // I, X size on input; # tuples on output
const GrB_Vector v // vector to extract tuples from
) ;
GB_PUBLIC
GrB_Info GrB_Vector_extractTuples_FP64 // [I,~,X] = find (v)
(
GrB_Index *I, // array for returning row indices of tuples
double *X, // array for returning values of tuples
GrB_Index *nvals, // I, X size on input; # tuples on output
const GrB_Vector v // vector to extract tuples from
) ;
GB_PUBLIC
GrB_Info GxB_Vector_extractTuples_FC32 // [I,~,X] = find (v)
(
GrB_Index *I, // array for returning row indices of tuples
GxB_FC32_t *X, // array for returning values of tuples
GrB_Index *nvals, // I, X size on input; # tuples on output
const GrB_Vector v // vector to extract tuples from
) ;
GB_PUBLIC
GrB_Info GxB_Vector_extractTuples_FC64 // [I,~,X] = find (v)
(
GrB_Index *I, // array for returning row indices of tuples
GxB_FC64_t *X, // array for returning values of tuples
GrB_Index *nvals, // I, X size on input; # tuples on output
const GrB_Vector v // vector to extract tuples from
) ;
GB_PUBLIC
GrB_Info GrB_Vector_extractTuples_UDT // [I,~,X] = find (v)
(
GrB_Index *I, // array for returning row indices of tuples
void *X, // array for returning values of tuples
GrB_Index *nvals, // I, X size on input; # tuples on output
const GrB_Vector v // vector to extract tuples from
) ;
// Type-generic version: X can be a pointer to any supported C type or void *
// for a user-defined type.
/*
GB_PUBLIC
GrB_Info GrB_Vector_extractTuples // [I,~,X] = find (v)
(
GrB_Index *I, // array for returning row indices of tuples
<type> *X, // array for returning values of tuples
GrB_Index *nvals, // I, X size on input; # tuples on output
const GrB_Vector v // vector to extract tuples from
) ;
*/
#if GxB_STDC_VERSION >= 201112L
#define GrB_Vector_extractTuples(I,X,nvals,v) \
_Generic \
( \
(X), \
GB_CASES (*, GrB, Vector_extractTuples) \
) \
(I, X, nvals, v)
#endif
//==============================================================================
// GrB_Matrix: a GraphBLAS matrix
//==============================================================================
typedef struct GB_Matrix_opaque *GrB_Matrix ;
// These methods create, free, copy, and clear a matrix. The nrows, ncols,
// nvals, and type methods return basic information about a matrix.
GB_PUBLIC
GrB_Info GrB_Matrix_new // create a new matrix with no entries
(
GrB_Matrix *A, // handle of matrix to create
GrB_Type type, // type of matrix to create
GrB_Index nrows, // matrix dimension is nrows-by-ncols
GrB_Index ncols // (nrows and ncols must be <= GrB_INDEX_MAX+1)
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_dup // make an exact copy of a matrix
(
GrB_Matrix *C, // handle of output matrix to create
const GrB_Matrix A // input matrix to copy
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_clear // clear a matrix of all entries;
( // type and dimensions remain unchanged
GrB_Matrix A // matrix to clear
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_nrows // get the number of rows of a matrix
(
GrB_Index *nrows, // matrix has nrows rows
const GrB_Matrix A // matrix to query
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_ncols // get the number of columns of a matrix
(
GrB_Index *ncols, // matrix has ncols columns
const GrB_Matrix A // matrix to query
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_nvals // get the number of entries in a matrix
(
GrB_Index *nvals, // matrix has nvals entries
const GrB_Matrix A // matrix to query
) ;
// NOTE: GxB_Matrix_type is historical. Use GxB_Matrix_type_name instead.
GB_PUBLIC
GrB_Info GxB_Matrix_type // get the type of a matrix
(
GrB_Type *type, // returns the type of the matrix
const GrB_Matrix A // matrix to query
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_type_name // return the name of the type of a matrix
(
char *type_name, // name of the type (char array of size at least
// GxB_MAX_NAME_LEN, owned by the user application).
const GrB_Matrix A // matrix to query
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_memoryUsage // return # of bytes used for a matrix
(
size_t *size, // # of bytes used by the matrix A
const GrB_Matrix A // matrix to query
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_iso // return iso status of a matrix
(
bool *iso, // true if the matrix is iso-valued
const GrB_Matrix A // matrix to query
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_free // free a matrix
(
GrB_Matrix *A // handle of matrix to free
) ;
//------------------------------------------------------------------------------
// GrB_Matrix_build
//------------------------------------------------------------------------------
// GrB_Matrix_build: C = sparse (I,J,X), but using any
// associative operator to assemble duplicate entries.
GB_PUBLIC
GrB_Info GrB_Matrix_build_BOOL // build a matrix from (I,J,X) tuples
(
GrB_Matrix C, // matrix to build
const GrB_Index *I, // array of row indices of tuples
const GrB_Index *J, // array of column indices of tuples
const bool *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_build_INT8 // build a matrix from (I,J,X) tuples
(
GrB_Matrix C, // matrix to build
const GrB_Index *I, // array of row indices of tuples
const GrB_Index *J, // array of column indices of tuples
const int8_t *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_build_UINT8 // build a matrix from (I,J,X) tuples
(
GrB_Matrix C, // matrix to build
const GrB_Index *I, // array of row indices of tuples
const GrB_Index *J, // array of column indices of tuples
const uint8_t *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_build_INT16 // build a matrix from (I,J,X) tuples
(
GrB_Matrix C, // matrix to build
const GrB_Index *I, // array of row indices of tuples
const GrB_Index *J, // array of column indices of tuples
const int16_t *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_build_UINT16 // build a matrix from (I,J,X) tuples
(
GrB_Matrix C, // matrix to build
const GrB_Index *I, // array of row indices of tuples
const GrB_Index *J, // array of column indices of tuples
const uint16_t *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_build_INT32 // build a matrix from (I,J,X) tuples
(
GrB_Matrix C, // matrix to build
const GrB_Index *I, // array of row indices of tuples
const GrB_Index *J, // array of column indices of tuples
const int32_t *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_build_UINT32 // build a matrix from (I,J,X) tuples
(
GrB_Matrix C, // matrix to build
const GrB_Index *I, // array of row indices of tuples
const GrB_Index *J, // array of column indices of tuples
const uint32_t *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_build_INT64 // build a matrix from (I,J,X) tuples
(
GrB_Matrix C, // matrix to build
const GrB_Index *I, // array of row indices of tuples
const GrB_Index *J, // array of column indices of tuples
const int64_t *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_build_UINT64 // build a matrix from (I,J,X) tuples
(
GrB_Matrix C, // matrix to build
const GrB_Index *I, // array of row indices of tuples
const GrB_Index *J, // array of column indices of tuples
const uint64_t *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_build_FP32 // build a matrix from (I,J,X) tuples
(
GrB_Matrix C, // matrix to build
const GrB_Index *I, // array of row indices of tuples
const GrB_Index *J, // array of column indices of tuples
const float *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_build_FP64 // build a matrix from (I,J,X) tuples
(
GrB_Matrix C, // matrix to build
const GrB_Index *I, // array of row indices of tuples
const GrB_Index *J, // array of column indices of tuples
const double *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_build_FC32 // build a matrix from (I,J,X) tuples
(
GrB_Matrix C, // matrix to build
const GrB_Index *I, // array of row indices of tuples
const GrB_Index *J, // array of column indices of tuples
const GxB_FC32_t *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_build_FC64 // build a matrix from (I,J,X) tuples
(
GrB_Matrix C, // matrix to build
const GrB_Index *I, // array of row indices of tuples
const GrB_Index *J, // array of column indices of tuples
const GxB_FC64_t *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_build_UDT // build a matrix from (I,J,X) tuples
(
GrB_Matrix C, // matrix to build
const GrB_Index *I, // array of row indices of tuples
const GrB_Index *J, // array of column indices of tuples
const void *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_build_Scalar // build a matrix from (I,J,scalar) tuples
(
GrB_Matrix C, // matrix to build
const GrB_Index *I, // array of row indices of tuples
const GrB_Index *J, // array of column indices of tuples
GrB_Scalar scalar, // value for all tuples
GrB_Index nvals // number of tuples
) ;
// Type-generic version: X can be a pointer to any supported C type or void *
// for a user-defined type.
/*
GB_PUBLIC
GrB_Info GrB_Matrix_build // build a matrix from (I,J,X) tuples
(
GrB_Matrix C, // matrix to build
const GrB_Index *I, // array of row indices of tuples
const GrB_Index *J, // array of column indices of tuples
const <type> *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
*/
#if GxB_STDC_VERSION >= 201112L
#define GrB_Matrix_build(C,I,J,X,nvals,dup) \
_Generic \
( \
(X), \
GB_CASES (*, GrB, Matrix_build) \
) \
(C, I, J, ((const void *) (X)), nvals, dup)
#endif
//------------------------------------------------------------------------------
// GrB_Matrix_setElement
//------------------------------------------------------------------------------
// Set a single entry in a matrix, C(i,j) = x, typecasting
// from the type of x to the type of C, as needed.
GB_PUBLIC
GrB_Info GrB_Matrix_setElement_BOOL // C (i,j) = x
(
GrB_Matrix C, // matrix to modify
bool x, // scalar to assign to C(i,j)
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_setElement_INT8 // C (i,j) = x
(
GrB_Matrix C, // matrix to modify
int8_t x, // scalar to assign to C(i,j)
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_setElement_UINT8 // C (i,j) = x
(
GrB_Matrix C, // matrix to modify
uint8_t x, // scalar to assign to C(i,j)
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_setElement_INT16 // C (i,j) = x
(
GrB_Matrix C, // matrix to modify
int16_t x, // scalar to assign to C(i,j)
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_setElement_UINT16 // C (i,j) = x
(
GrB_Matrix C, // matrix to modify
uint16_t x, // scalar to assign to C(i,j)
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_setElement_INT32 // C (i,j) = x
(
GrB_Matrix C, // matrix to modify
int32_t x, // scalar to assign to C(i,j)
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_setElement_UINT32 // C (i,j) = x
(
GrB_Matrix C, // matrix to modify
uint32_t x, // scalar to assign to C(i,j)
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_setElement_INT64 // C (i,j) = x
(
GrB_Matrix C, // matrix to modify
int64_t x, // scalar to assign to C(i,j)
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_setElement_UINT64 // C (i,j) = x
(
GrB_Matrix C, // matrix to modify
uint64_t x, // scalar to assign to C(i,j)
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_setElement_FP32 // C (i,j) = x
(
GrB_Matrix C, // matrix to modify
float x, // scalar to assign to C(i,j)
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_setElement_FP64 // C (i,j) = x
(
GrB_Matrix C, // matrix to modify
double x, // scalar to assign to C(i,j)
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_setElement_FC32 // C (i,j) = x
(
GrB_Matrix C, // matrix to modify
GxB_FC32_t x, // scalar to assign to C(i,j)
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_setElement_FC64 // C (i,j) = x
(
GrB_Matrix C, // matrix to modify
GxB_FC64_t x, // scalar to assign to C(i,j)
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_setElement_UDT // C (i,j) = x
(
GrB_Matrix C, // matrix to modify
void *x, // scalar to assign to C(i,j)
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_setElement_Scalar // C (i,j) = x
(
GrB_Matrix C, // matrix to modify
GrB_Scalar x, // scalar to assign to C(i,j)
GrB_Index i, // row index
GrB_Index j // column index
) ;
// Type-generic version: x can be any supported C type or void * for a
// user-defined type.
/*
GB_PUBLIC
GrB_Info GrB_Matrix_setElement // C (i,j) = x
(
GrB_Matrix C, // matrix to modify
<type> x, // scalar to assign to C(i,j)
GrB_Index i, // row index
GrB_Index j // column index
) ;
*/
#if GxB_STDC_VERSION >= 201112L
#define GrB_Matrix_setElement(C,x,i,j) \
_Generic \
( \
(x), \
GB_CASES (, GrB, Matrix_setElement), \
default: GrB_Matrix_setElement_Scalar \
) \
(C, x, i, j)
#endif
//------------------------------------------------------------------------------
// GrB_Matrix_extractElement
//------------------------------------------------------------------------------
// Extract a single entry from a matrix, x = A(i,j), typecasting from the type
// of A to the type of x, as needed.
GB_PUBLIC
GrB_Info GrB_Matrix_extractElement_BOOL // x = A(i,j)
(
bool *x, // extracted scalar
const GrB_Matrix A, // matrix to extract a scalar from
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_extractElement_INT8 // x = A(i,j)
(
int8_t *x, // extracted scalar
const GrB_Matrix A, // matrix to extract a scalar from
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_extractElement_UINT8 // x = A(i,j)
(
uint8_t *x, // extracted scalar
const GrB_Matrix A, // matrix to extract a scalar from
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_extractElement_INT16 // x = A(i,j)
(
int16_t *x, // extracted scalar
const GrB_Matrix A, // matrix to extract a scalar from
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_extractElement_UINT16 // x = A(i,j)
(
uint16_t *x, // extracted scalar
const GrB_Matrix A, // matrix to extract a scalar from
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_extractElement_INT32 // x = A(i,j)
(
int32_t *x, // extracted scalar
const GrB_Matrix A, // matrix to extract a scalar from
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_extractElement_UINT32 // x = A(i,j)
(
uint32_t *x, // extracted scalar
const GrB_Matrix A, // matrix to extract a scalar from
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_extractElement_INT64 // x = A(i,j)
(
int64_t *x, // extracted scalar
const GrB_Matrix A, // matrix to extract a scalar from
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_extractElement_UINT64 // x = A(i,j)
(
uint64_t *x, // extracted scalar
const GrB_Matrix A, // matrix to extract a scalar from
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_extractElement_FP32 // x = A(i,j)
(
float *x, // extracted scalar
const GrB_Matrix A, // matrix to extract a scalar from
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_extractElement_FP64 // x = A(i,j)
(
double *x, // extracted scalar
const GrB_Matrix A, // matrix to extract a scalar from
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_extractElement_FC32 // x = A(i,j)
(
GxB_FC32_t *x, // extracted scalar
const GrB_Matrix A, // matrix to extract a scalar from
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_extractElement_FC64 // x = A(i,j)
(
GxB_FC64_t *x, // extracted scalar
const GrB_Matrix A, // matrix to extract a scalar from
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_extractElement_UDT // x = A(i,j)
(
void *x, // extracted scalar
const GrB_Matrix A, // matrix to extract a scalar from
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_extractElement_Scalar // x = A(i,j)
(
GrB_Scalar x, // extracted scalar
const GrB_Matrix A, // matrix to extract a scalar from
GrB_Index i, // row index
GrB_Index j // column index
) ;
// Type-generic version: x can be a pointer to any supported C type or void *
// for a user-defined type.
/*
GB_PUBLIC
GrB_Info GrB_Matrix_extractElement // x = A(i,j)
(
<type> *x, // extracted scalar
const GrB_Matrix A, // matrix to extract a scalar from
GrB_Index i, // row index
GrB_Index j // column index
) ;
*/
#if GxB_STDC_VERSION >= 201112L
#define GrB_Matrix_extractElement(x,A,i,j) \
_Generic \
( \
(x), \
GB_CASES (*, GrB, Matrix_extractElement), \
default: GrB_Matrix_extractElement_Scalar \
) \
(x, A, i, j)
#endif
//------------------------------------------------------------------------------
// GrB_Matrix_removeElement
//------------------------------------------------------------------------------
// GrB_Matrix_removeElement (A,i,j) removes the entry A(i,j) from the matrix A.
GB_PUBLIC
GrB_Info GrB_Matrix_removeElement
(
GrB_Matrix C, // matrix to remove entry from
GrB_Index i, // row index
GrB_Index j // column index
) ;
//------------------------------------------------------------------------------
// GrB_Matrix_extractTuples
//------------------------------------------------------------------------------
// Extracts all tuples from a matrix, like [I,J,X] = find (A). If
// any parameter I, J and/or X is NULL, then that component is not extracted.
// For example, to extract just the row and col indices, pass I and J as
// non-NULL, and X as NULL. This is like [I,J,~] = find (A).
GB_PUBLIC
GrB_Info GrB_Matrix_extractTuples_BOOL // [I,J,X] = find (A)
(
GrB_Index *I, // array for returning row indices of tuples
GrB_Index *J, // array for returning col indices of tuples
bool *X, // array for returning values of tuples
GrB_Index *nvals, // I,J,X size on input; # tuples on output
const GrB_Matrix A // matrix to extract tuples from
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_extractTuples_INT8 // [I,J,X] = find (A)
(
GrB_Index *I, // array for returning row indices of tuples
GrB_Index *J, // array for returning col indices of tuples
int8_t *X, // array for returning values of tuples
GrB_Index *nvals, // I,J,X size on input; # tuples on output
const GrB_Matrix A // matrix to extract tuples from
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_extractTuples_UINT8 // [I,J,X] = find (A)
(
GrB_Index *I, // array for returning row indices of tuples
GrB_Index *J, // array for returning col indices of tuples
uint8_t *X, // array for returning values of tuples
GrB_Index *nvals, // I,J,X size on input; # tuples on output
const GrB_Matrix A // matrix to extract tuples from
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_extractTuples_INT16 // [I,J,X] = find (A)
(
GrB_Index *I, // array for returning row indices of tuples
GrB_Index *J, // array for returning col indices of tuples
int16_t *X, // array for returning values of tuples
GrB_Index *nvals, // I,J,X size on input; # tuples on output
const GrB_Matrix A // matrix to extract tuples from
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_extractTuples_UINT16 // [I,J,X] = find (A)
(
GrB_Index *I, // array for returning row indices of tuples
GrB_Index *J, // array for returning col indices of tuples
uint16_t *X, // array for returning values of tuples
GrB_Index *nvals, // I,J,X size on input; # tuples on output
const GrB_Matrix A // matrix to extract tuples from
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_extractTuples_INT32 // [I,J,X] = find (A)
(
GrB_Index *I, // array for returning row indices of tuples
GrB_Index *J, // array for returning col indices of tuples
int32_t *X, // array for returning values of tuples
GrB_Index *nvals, // I,J,X size on input; # tuples on output
const GrB_Matrix A // matrix to extract tuples from
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_extractTuples_UINT32 // [I,J,X] = find (A)
(
GrB_Index *I, // array for returning row indices of tuples
GrB_Index *J, // array for returning col indices of tuples
uint32_t *X, // array for returning values of tuples
GrB_Index *nvals, // I,J,X size on input; # tuples on output
const GrB_Matrix A // matrix to extract tuples from
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_extractTuples_INT64 // [I,J,X] = find (A)
(
GrB_Index *I, // array for returning row indices of tuples
GrB_Index *J, // array for returning col indices of tuples
int64_t *X, // array for returning values of tuples
GrB_Index *nvals, // I,J,X size on input; # tuples on output
const GrB_Matrix A // matrix to extract tuples from
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_extractTuples_UINT64 // [I,J,X] = find (A)
(
GrB_Index *I, // array for returning row indices of tuples
GrB_Index *J, // array for returning col indices of tuples
uint64_t *X, // array for returning values of tuples
GrB_Index *nvals, // I,J,X size on input; # tuples on output
const GrB_Matrix A // matrix to extract tuples from
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_extractTuples_FP32 // [I,J,X] = find (A)
(
GrB_Index *I, // array for returning row indices of tuples
GrB_Index *J, // array for returning col indices of tuples
float *X, // array for returning values of tuples
GrB_Index *nvals, // I,J,X size on input; # tuples on output
const GrB_Matrix A // matrix to extract tuples from
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_extractTuples_FP64 // [I,J,X] = find (A)
(
GrB_Index *I, // array for returning row indices of tuples
GrB_Index *J, // array for returning col indices of tuples
double *X, // array for returning values of tuples
GrB_Index *nvals, // I,J,X size on input; # tuples on output
const GrB_Matrix A // matrix to extract tuples from
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_extractTuples_FC32 // [I,J,X] = find (A)
(
GrB_Index *I, // array for returning row indices of tuples
GrB_Index *J, // array for returning col indices of tuples
GxB_FC32_t *X, // array for returning values of tuples
GrB_Index *nvals, // I,J,X size on input; # tuples on output
const GrB_Matrix A // matrix to extract tuples from
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_extractTuples_FC64 // [I,J,X] = find (A)
(
GrB_Index *I, // array for returning row indices of tuples
GrB_Index *J, // array for returning col indices of tuples
GxB_FC64_t *X, // array for returning values of tuples
GrB_Index *nvals, // I,J,X size on input; # tuples on output
const GrB_Matrix A // matrix to extract tuples from
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_extractTuples_UDT // [I,J,X] = find (A)
(
GrB_Index *I, // array for returning row indices of tuples
GrB_Index *J, // array for returning col indices of tuples
void *X, // array for returning values of tuples
GrB_Index *nvals, // I,J,X size on input; # tuples on output
const GrB_Matrix A // matrix to extract tuples from
) ;
// Type-generic version: X can be a pointer to any supported C type or void *
// for a user-defined type.
/*
GB_PUBLIC
GrB_Info GrB_Matrix_extractTuples // [I,J,X] = find (A)
(
GrB_Index *I, // array for returning row indices of tuples
GrB_Index *J, // array for returning col indices of tuples
<type> *X, // array for returning values of tuples
GrB_Index *nvals, // I,J,X size on input; # tuples on output
const GrB_Matrix A // matrix to extract tuples from
) ;
*/
#if GxB_STDC_VERSION >= 201112L
#define GrB_Matrix_extractTuples(I,J,X,nvals,A) \
_Generic \
( \
(X), \
GB_CASES (*, GrB, Matrix_extractTuples) \
) \
(I, J, X, nvals, A)
#endif
//------------------------------------------------------------------------------
// GxB_Matrix_concat and GxB_Matrix_split
//------------------------------------------------------------------------------
// GxB_Matrix_concat concatenates an array of matrices (Tiles) into a single
// GrB_Matrix C.
// Tiles is an m-by-n dense array of matrices held in row-major format, where
// Tiles [i*n+j] is the (i,j)th tile, and where m > 0 and n > 0 must hold. Let
// A{i,j} denote the (i,j)th tile. The matrix C is constructed by
// concatenating these tiles together, as:
// C = [ A{0,0} A{0,1} A{0,2} ... A{0,n-1}
// A{1,0} A{1,1} A{1,2} ... A{1,n-1}
// ...
// A{m-1,0} A{m-1,1} A{m-1,2} ... A{m-1,n-1} ]
// On input, the matrix C must already exist. Any existing entries in C are
// discarded. C must have dimensions nrows by ncols where nrows is the sum of
// # of rows in the matrices A{i,0} for all i, and ncols is the sum of the # of
// columns in the matrices A{0,j} for all j. All matrices in any given tile
// row i must have the same number of rows (that is, nrows(A{i,0}) must equal
// nrows(A{i,j}) for all j), and all matrices in any given tile column j must
// have the same number of columns (that is, ncols(A{0,j}) must equal
// ncols(A{i,j}) for all i).
// The type of C is unchanged, and all matrices A{i,j} are typecasted into the
// type of C. Any settings made to C by GxB_Matrix_Option_set (format by row
// or by column, bitmap switch, hyper switch, and sparsity control) are
// unchanged.
GB_PUBLIC
GrB_Info GxB_Matrix_concat // concatenate a 2D array of matrices
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix *Tiles, // 2D row-major array of size m-by-n
const GrB_Index m,
const GrB_Index n,
const GrB_Descriptor desc // unused, except threading control
) ;
// GxB_Matrix_split does the opposite of GxB_Matrix_concat. It splits a single
// input matrix A into a 2D array of tiles. On input, the Tiles array must be
// a non-NULL pointer to a previously allocated array of size at least m*n
// where both m and n must be > 0. The Tiles_nrows array has size m, and
// Tiles_ncols has size n. The (i,j)th tile has dimension
// Tiles_nrows[i]-by-Tiles_ncols[j]. The sum of Tiles_nrows [0:m-1] must equal
// the number of rows of A, and the sum of Tiles_ncols [0:n-1] must equal the
// number of columns of A. The type of each tile is the same as the type of A;
// no typecasting is done.
GB_PUBLIC
GrB_Info GxB_Matrix_split // split a matrix into 2D array of matrices
(
GrB_Matrix *Tiles, // 2D row-major array of size m-by-n
const GrB_Index m,
const GrB_Index n,
const GrB_Index *Tile_nrows, // array of size m
const GrB_Index *Tile_ncols, // array of size n
const GrB_Matrix A, // input matrix to split
const GrB_Descriptor desc // unused, except threading control
) ;
//------------------------------------------------------------------------------
// GxB_Matrix_diag, GxB_Vector_diag, GrB_Matrix_diag
//------------------------------------------------------------------------------
// GxB_Matrix_diag constructs a matrix from a vector. Let n be the length of
// the v vector, from GrB_Vector_size (&n, v). If k = 0, then C is an n-by-n
// diagonal matrix with the entries from v along the main diagonal of C, with
// C(i,i) = v(i). If k is nonzero, C is square with dimension n+abs(k). If k
// is positive, it denotes diagonals above the main diagonal, with C(i,i+k) =
// v(i). If k is negative, it denotes diagonals below the main diagonal of C,
// with C(i-k,i) = v(i).
// C must already exist on input, of the correct size. Any existing entries in
// C are discarded. The type of C is preserved, so that if the type of C and v
// differ, the entries are typecasted into the type of C. Any settings made to
// C by GxB_Matrix_Option_set (format by row or by column, bitmap switch, hyper
// switch, and sparsity control) are unchanged.
GB_PUBLIC
GrB_Info GxB_Matrix_diag // construct a diagonal matrix from a vector
(
GrB_Matrix C, // output matrix
const GrB_Vector v, // input vector
int64_t k,
const GrB_Descriptor desc // to specify # of threads
) ;
// GrB_Matrix_diag is identical to GxB_Matrix_diag (C, v, k, NULL),
// using the default # of threads from the global setting.
GB_PUBLIC
GrB_Info GrB_Matrix_diag // construct a diagonal matrix from a vector
(
GrB_Matrix C, // output matrix
const GrB_Vector v, // input vector
int64_t k
) ;
// GxB_Vector_diag extracts a vector v from an input matrix A, which may be
// rectangular. If k = 0, the main diagonal of A is extracted; k > 0 denotes
// diagonals above the main diagonal of A, and k < 0 denotes diagonals below
// the main diagonal of A. Let A have dimension m-by-n. If k is in the range
// 0 to n-1, then v has length min(m,n-k). If k is negative and in the range
// -1 to -m+1, then v has length min(m+k,n). If k is outside these ranges,
// v has length 0 (this is not an error).
// v must already exist on input, of the correct length; that is
// GrB_Vector_size (&len,v) must return len = 0 if k >= n or k <= -m, len =
// min(m,n-k) if k is in the range 0 to n-1, and len = min(m+k,n) if k is in
// the range -1 to -m+1. Any existing entries in v are discarded. The type of
// v is preserved, so that if the type of A and v differ, the entries are
// typecasted into the type of v. Any settings made to v by
// GxB_Vector_Option_set (bitmap switch and sparsity control) are unchanged.
GB_PUBLIC
GrB_Info GxB_Vector_diag // extract a diagonal from a matrix, as a vector
(
GrB_Vector v, // output vector
const GrB_Matrix A, // input matrix
int64_t k,
const GrB_Descriptor desc // unused, except threading control
) ;
//==============================================================================
// SuiteSparse:GraphBLAS options
//==============================================================================
// The following options modify how SuiteSparse:GraphBLAS stores and operates
// on its matrices. The GxB_*Option* methods allow the user to suggest how the
// internal representation of a matrix, or all matrices, should be held. These
// options have no effect on the result (except for minor roundoff differences
// for floating-point types). They only affect the time and memory usage of the
// computations.
// GxB_Matrix_Option_set: sets an option for a specific matrix
// GxB_Matrix_Option_get: queries the current option of a specific matrix
// GxB_Vector_Option_set: sets an option for a specific vector
// GxB_Vector_Option_get: queries the current option of a specific vector
// GxB_Global_Option_set: sets an option for all future matrices
// GxB_Global_Option_get: queries current option for all future matrices
#define GxB_HYPER 0 // (historical, use GxB_HYPER_SWITCH)
typedef enum // for global options or matrix options
{
//------------------------------------------------------------
// for GxB_Matrix_Option_get/set and GxB_Global_Option_get/set:
//------------------------------------------------------------
GxB_HYPER_SWITCH = 0, // defines switch to hypersparse (a double value)
GxB_BITMAP_SWITCH = 34, // defines switch to bitmap (a double value)
GxB_FORMAT = 1, // defines CSR/CSC format: GxB_BY_ROW or GxB_BY_COL
//------------------------------------------------------------
// for GxB_Global_Option_get only:
//------------------------------------------------------------
GxB_MODE = 2, // mode passed to GrB_init (blocking or non-blocking)
GxB_LIBRARY_NAME = 8, // name of the library (char *)
GxB_LIBRARY_VERSION = 9, // library version (3 int's)
GxB_LIBRARY_DATE = 10, // date of the library (char *)
GxB_LIBRARY_ABOUT = 11, // about the library (char *)
GxB_LIBRARY_URL = 12, // URL for the library (char *)
GxB_LIBRARY_LICENSE = 13, // license of the library (char *)
GxB_LIBRARY_COMPILE_DATE = 14, // date library was compiled (char *)
GxB_LIBRARY_COMPILE_TIME = 15, // time library was compiled (char *)
GxB_API_VERSION = 16, // API version (3 int's)
GxB_API_DATE = 17, // date of the API (char *)
GxB_API_ABOUT = 18, // about the API (char *)
GxB_API_URL = 19, // URL for the API (char *)
GxB_COMPILER_VERSION = 23, // compiler version (3 int's)
GxB_COMPILER_NAME = 24, // compiler name (char *)
//------------------------------------------------------------
// for GxB_Global_Option_get/set only:
//------------------------------------------------------------
GxB_GLOBAL_NTHREADS = GxB_NTHREADS, // max number of threads to use
// If <= GxB_DEFAULT, then GraphBLAS selects the number
// of threads automatically.
GxB_GLOBAL_CHUNK = GxB_CHUNK, // chunk size for small problems.
// If <= GxB_DEFAULT, then the default is used.
GxB_BURBLE = 99, // diagnostic output (bool *)
GxB_PRINTF = 101, // printf function diagnostic output
GxB_FLUSH = 102, // flush function diagnostic output
GxB_MEMORY_POOL = 103, // memory pool control
GxB_PRINT_1BASED = 104, // print matrices as 0-based or 1-based
//------------------------------------------------------------
// for GxB_Matrix_Option_get only:
//------------------------------------------------------------
GxB_SPARSITY_STATUS = 33, // hyper, sparse, bitmap or full (1,2,4,8)
GxB_IS_HYPER = 6, // historical; use GxB_SPARSITY_STATUS
//------------------------------------------------------------
// for GxB_Matrix_Option_get/set only:
//------------------------------------------------------------
GxB_SPARSITY_CONTROL = 32, // sparsity control: 0 to 15; see below
//------------------------------------------------------------
// GPU and options (DRAFT: do not use)
//------------------------------------------------------------
GxB_GLOBAL_GPU_CONTROL = GxB_GPU_CONTROL,
GxB_GLOBAL_GPU_CHUNK = GxB_GPU_CHUNK,
} GxB_Option_Field ;
// GxB_FORMAT can be by row or by column:
typedef enum
{
GxB_BY_ROW = 0, // CSR: compressed sparse row format
GxB_BY_COL = 1, // CSC: compressed sparse column format
GxB_NO_FORMAT = -1 // format not defined
}
GxB_Format_Value ;
// The default format is by row. These constants are defined as GB_PUBLIC
// const, so that if SuiteSparse:GraphBLAS is recompiled with a different
// default format, and the application is relinked but not recompiled, it will
// acquire the new default values.
GB_PUBLIC const GxB_Format_Value GxB_FORMAT_DEFAULT ;
// the default hyper_switch parameter
GB_PUBLIC const double GxB_HYPER_DEFAULT ;
// GxB_SPARSITY_CONTROL can be any sum or bitwise OR of these 4 values:
#define GxB_HYPERSPARSE 1 // store matrix in hypersparse form
#define GxB_SPARSE 2 // store matrix as sparse form (compressed vector)
#define GxB_BITMAP 4 // store matrix as a bitmap
#define GxB_FULL 8 // store matrix as full; all entries must be present
// size of b array for GxB_set/get (GxB_BITMAP_SWITCH, b)
#define GxB_NBITMAP_SWITCH 8 // size of bitmap_switch parameter array
// any sparsity value:
#define GxB_ANY_SPARSITY (GxB_HYPERSPARSE + GxB_SPARSE + GxB_BITMAP + GxB_FULL)
// the default sparsity control is any format:
#define GxB_AUTO_SPARSITY GxB_ANY_SPARSITY
// GxB_Matrix_Option_set (A, GxB_SPARSITY_CONTROL, scontrol) provides hints
// about which data structure GraphBLAS should use for the matrix A:
//
// GxB_AUTO_SPARSITY: GraphBLAS selects automatically.
// GxB_HYPERSPARSE: always hypersparse, taking O(nvals(A)) space.
// GxB_SPARSE: always in a sparse struture: compressed-sparse row/column,
// taking O(nrows+nvals(A)) space if stored by row, or
// O(ncols+nvals(A)) if stored by column.
// GxB_BITMAP: always in a bitmap struture, taking O(nrows*ncols) space.
// GxB_FULL: always in a full structure, taking O(nrows*ncols) space,
// unless not all entries are present, in which case the bitmap
// storage is used.
//
// These options can be summed. For example, to allow a matrix to be sparse
// or hypersparse, but not bitmap or full, use GxB_SPARSE + GxB_HYPERSPARSE.
// Since GxB_FULL can only be used when all entries are present, matrices with
// the just GxB_FULL control setting are stored in bitmap form if any entries
// are not present.
//
// Only the least 4 bits of the sparsity control are considered, so the
// formats can be bitwise negated. For example, to allow for any format
// except full, use ~GxB_FULL.
//
// GxB_Matrix_Option_get (A, GxB_SPARSITY_STATUS, &sparsity) returns the
// current data structure currently used for the matrix A (either hypersparse,
// sparse, bitmap, or full).
//
// GxB_Matrix_Option_get (A, GxB_SPARSITY_CONTROL, &scontrol) returns the hint
// for how A should be stored (hypersparse, sparse, bitmap, or full, or any
// combination).
// GxB_HYPER_SWITCH:
// If the matrix or vector structure can be sparse or hypersparse, the
// GxB_HYPER_SWITCH parameter controls when each of these structures are
// used. The parameter is not used if the matrix or vector is full or
// bitmap.
//
// Let k be the actual number of non-empty vectors (with at least one
// entry). This value k is not dependent on whether or not the matrix is
// stored in hypersparse structure. Let n be the number of vectors (the #
// of columns if CSC, or rows if CSR). Let h be the value of the
// GxB_HYPER_SWITCH setting of the matrix.
//
// If a matrix is currently hypersparse, it can be converted to
// non-hypersparse if (n <= 1 || k > 2*n*h). Otherwise it stays
// hypersparse. If (n <= 1) the matrix is always stored as
// non-hypersparse.
//
// If currently non-hypersparse, it can be converted to hypersparse if (n
// > 1 && k <= n*h). Otherwise, it stays non-hypersparse. If (n <= 1)
// the matrix always remains non-hypersparse.
//
// Setting GxB_HYPER_SWITCH to GxB_ALWAYS_HYPER or GxB_NEVER_HYPER ensures
// a matrix always stays hypersparse, or always stays non-hypersparse,
// respectively.
GB_PUBLIC const double GxB_ALWAYS_HYPER, GxB_NEVER_HYPER ;
GB_PUBLIC
GrB_Info GxB_Matrix_Option_set // set an option in a matrix
(
GrB_Matrix A, // matrix to modify
GxB_Option_Field field, // option to change
... // value to change it to
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_Option_get // gets the current option of a matrix
(
GrB_Matrix A, // matrix to query
GxB_Option_Field field, // option to query
... // return value of the matrix option
) ;
GB_PUBLIC
GrB_Info GxB_Vector_Option_set // set an option in a vector
(
GrB_Vector A, // vector to modify
GxB_Option_Field field, // option to change
... // value to change it to
) ;
GB_PUBLIC
GrB_Info GxB_Vector_Option_get // gets the current option of a vector
(
GrB_Vector A, // vector to query
GxB_Option_Field field, // option to query
... // return value of the vector option
) ;
// GxB_Global_Option_set controls the global defaults used when a new matrix is
// created. GrB_init defines the following initial settings:
//
// GxB_Global_Option_set (GxB_HYPER_SWITCH, GxB_HYPER_DEFAULT) ;
// GxB_Global_Option_set (GxB_BITMAP_SWITCH, NULL) ;
// GxB_Global_Option_set (GxB_FORMAT, GxB_FORMAT_DEFAULT) ;
//
// The compile-time constants GxB_HYPER_DEFAULT and GxB_FORMAT_DEFAULT are
// equal to 0.0625 and GxB_BY_ROW, by default. That is, by default, all new
// matrices are held by row in CSR format. If a matrix has fewer than n/16
// columns, it can be converted to hypersparse structure. If it has more than
// n/8 columns, it can be converted to a sparse structure. Modifying these
// global settings via GxB_Global_Option_set has no effect on matrices already
// created.
GB_PUBLIC
GrB_Info GxB_Global_Option_set // set a global default option
(
GxB_Option_Field field, // option to change
... // value to change it to
) ;
GB_PUBLIC
GrB_Info GxB_Global_Option_get // gets the current global default option
(
GxB_Option_Field field, // option to query
... // return value of the global option
) ;
//------------------------------------------------------------------------------
// GxB_set and GxB_get
//------------------------------------------------------------------------------
// The simplest way to set/get a value of a GrB_Descriptor is with
// the generic GxB_set and GxB_get functions:
// GxB_set (desc, field, value) ;
// GxB_get (desc, field, &value) ;
// GxB_set and GxB_get are generic methods that and set or query the options in
// a GrB_Matrix, a GrB_Descriptor, or in the global options. They can be used
// with the following syntax. Note that GxB_NTHREADS can be used for both the
// global nthreads_max, and for the # of threads in the descriptor.
// To set/get the global options:
//
// GxB_set (GxB_HYPER_SWITCH, double h) ;
// GxB_set (GxB_HYPER_SWITCH, GxB_ALWAYS_HYPER) ;
// GxB_set (GxB_HYPER_SWITCH, GxB_NEVER_HYPER) ;
// GxB_get (GxB_HYPER_SWITCH, double *h) ;
//
// double b [GxB_NBITMAP_SWITCH] ;
// GxB_set (GxB_BITMAP_SWITCH, b) ;
// GxB_set (GxB_BITMAP_SWITCH, NULL) ; // set defaults
// GxB_get (GxB_BITMAP_SWITCH, b) ;
//
// GxB_set (GxB_FORMAT, GxB_BY_ROW) ;
// GxB_set (GxB_FORMAT, GxB_BY_COL) ;
// GxB_get (GxB_FORMAT, GxB_Format_Value *s) ;
//
// GxB_set (GxB_NTHREADS, nthreads_max) ;
// GxB_get (GxB_NTHREADS, int *nthreads_max) ;
//
// GxB_set (GxB_CHUNK, double chunk) ;
// GxB_get (GxB_CHUNK, double *chunk) ;
//
// GxB_set (GxB_BURBLE, bool burble) ;
// GxB_get (GxB_BURBLE, bool *burble) ;
//
// GxB_set (GxB_PRINTF, void *printf_function) ;
// GxB_get (GxB_PRINTF, void **printf_function) ;
//
// GxB_set (GxB_FLUSH, void *flush_function) ;
// GxB_get (GxB_FLUSH, void **flush_function) ;
//
// int64_t free_pool_limit [64] ;
// GxB_set (GxB_MEMORY_POOL, free_pool_limit) ;
// GxB_set (GxB_MEMORY_POOL, NULL) ; // set defaults
// GxB_get (GxB_MEMORY_POOL, free_pool_limit) ;
// To get global options that can be queried but not modified:
//
// GxB_get (GxB_MODE, GrB_Mode *mode) ;
// To set/get a matrix option:
//
// GxB_set (GrB_Matrix A, GxB_HYPER_SWITCH, double h) ;
// GxB_set (GrB_Matrix A, GxB_HYPER_SWITCH, GxB_ALWAYS_HYPER) ;
// GxB_set (GrB_Matrix A, GxB_HYPER_SWITCH, GxB_NEVER_HYPER) ;
// GxB_get (GrB_Matrix A, GxB_HYPER_SWITCH, double *h) ;
//
// GxB_set (GrB_Matrix A, GxB_BITMAP_SWITCH, double b) ;
// GxB_get (GrB_Matrix A, GxB_BITMAP_SWITCH, double *b) ;
//
// GxB_set (GrB_Matrix A, GxB_FORMAT, GxB_BY_ROW) ;
// GxB_set (GrB_Matrix A, GxB_FORMAT, GxB_BY_COL) ;
// GxB_get (GrB_Matrix A, GxB_FORMAT, GxB_Format_Value *s) ;
//
// GxB_set (GrB_Matrix A, GxB_SPARSITY_CONTROL, GxB_AUTO_SPARSITY) ;
// GxB_set (GrB_Matrix A, GxB_SPARSITY_CONTROL, scontrol) ;
// GxB_get (GrB_Matrix A, GxB_SPARSITY_CONTROL, int *scontrol) ;
//
// GxB_get (GrB_Matrix A, GxB_SPARSITY_STATUS, int *sparsity) ;
// To set/get a vector option or status:
//
// GxB_set (GrB_Vector v, GxB_BITMAP_SWITCH, double b) ;
// GxB_get (GrB_Vector v, GxB_BITMAP_SWITCH, double *b) ;
//
// GxB_set (GrB_Vector v, GxB_FORMAT, GxB_BY_ROW) ;
// GxB_set (GrB_Vector v, GxB_FORMAT, GxB_BY_COL) ;
// GxB_get (GrB_Vector v, GxB_FORMAT, GxB_Format_Value *s) ;
//
// GxB_set (GrB_Vector v, GxB_SPARSITY_CONTROL, GxB_AUTO_SPARSITY) ;
// GxB_set (GrB_Vector v, GxB_SPARSITY_CONTROL, scontrol) ;
// GxB_get (GrB_Vector v, GxB_SPARSITY_CONTROL, int *scontrol) ;
//
// GxB_get (GrB_Vector v, GxB_SPARSITY_STATUS, int *sparsity) ;
// To set/get a descriptor field:
//
// GxB_set (GrB_Descriptor d, GrB_OUTP, GxB_DEFAULT) ;
// GxB_set (GrB_Descriptor d, GrB_OUTP, GrB_REPLACE) ;
// GxB_get (GrB_Descriptor d, GrB_OUTP, GrB_Desc_Value *v) ;
//
// GxB_set (GrB_Descriptor d, GrB_MASK, GxB_DEFAULT) ;
// GxB_set (GrB_Descriptor d, GrB_MASK, GrB_COMP) ;
// GxB_set (GrB_Descriptor d, GrB_MASK, GrB_STRUCTURE) ;
// GxB_set (GrB_Descriptor d, GrB_MASK, GrB_COMP + GrB_STRUCTURE) ;
// GxB_get (GrB_Descriptor d, GrB_MASK, GrB_Desc_Value *v) ;
//
// GxB_set (GrB_Descriptor d, GrB_INP0, GxB_DEFAULT) ;
// GxB_set (GrB_Descriptor d, GrB_INP0, GrB_TRAN) ;
// GxB_get (GrB_Descriptor d, GrB_INP0, GrB_Desc_Value *v) ;
//
// GxB_set (GrB_Descriptor d, GrB_INP1, GxB_DEFAULT) ;
// GxB_set (GrB_Descriptor d, GrB_INP1, GrB_TRAN) ;
// GxB_get (GrB_Descriptor d, GrB_INP1, GrB_Desc_Value *v) ;
//
// GxB_set (GrB_Descriptor d, GxB_AxB_METHOD, GxB_DEFAULT) ;
// GxB_set (GrB_Descriptor d, GxB_AxB_METHOD, GxB_AxB_GUSTAVSON) ;
// GxB_set (GrB_Descriptor d, GxB_AxB_METHOD, GxB_AxB_HASH) ;
// GxB_set (GrB_Descriptor d, GxB_AxB_METHOD, GxB_AxB_SAXPY) ;
// GxB_set (GrB_Descriptor d, GxB_AxB_METHOD, GxB_AxB_DOT) ;
// GxB_get (GrB_Descriptor d, GrB_AxB_METHOD, GrB_Desc_Value *v) ;
//
// GxB_set (GrB_Descriptor d, GxB_NTHREADS, nthreads) ;
// GxB_get (GrB_Descriptor d, GxB_NTHREADS, int *nthreads) ;
//
// GxB_set (GrB_Descriptor d, GxB_CHUNK, double chunk) ;
// GxB_get (GrB_Descriptor d, GxB_CHUNK, double *chunk) ;
//
// GxB_set (GrB_Descriptor d, GxB_SORT, int sort) ;
// GxB_get (GrB_Descriptor d, GxB_SORT, int *sort) ;
//
// GxB_set (GrB_Descriptor d, GxB_COMPRESSION, int method) ;
// GxB_get (GrB_Descriptor d, GxB_COMPRESSION, int *method) ;
//
// GxB_set (GrB_Descriptor d, GxB_IMPORT, int method) ;
// GxB_get (GrB_Descriptor d, GxB_IMPORT, int *method) ;
#if GxB_STDC_VERSION >= 201112L
#define GxB_set(arg1,...) \
_Generic \
( \
(arg1), \
int : GxB_Global_Option_set , \
GxB_Option_Field : GxB_Global_Option_set , \
GrB_Vector : GxB_Vector_Option_set , \
GrB_Matrix : GxB_Matrix_Option_set , \
GrB_Descriptor : GxB_Desc_set \
) \
(arg1, __VA_ARGS__)
#define GxB_get(arg1,...) \
_Generic \
( \
(arg1), \
const int : GxB_Global_Option_get , \
int : GxB_Global_Option_get , \
const GxB_Option_Field : GxB_Global_Option_get , \
GxB_Option_Field : GxB_Global_Option_get , \
const GrB_Vector : GxB_Vector_Option_get , \
GrB_Vector : GxB_Vector_Option_get , \
const GrB_Matrix : GxB_Matrix_Option_get , \
GrB_Matrix : GxB_Matrix_Option_get , \
const GrB_Descriptor : GxB_Desc_get , \
GrB_Descriptor : GxB_Desc_get \
) \
(arg1, __VA_ARGS__)
#endif
//==============================================================================
// GrB_free: free any GraphBLAS object
//==============================================================================
// for null and invalid objects
#define GrB_NULL NULL
#define GrB_INVALID_HANDLE NULL
#if GxB_STDC_VERSION >= 201112L
#define GrB_free(object) \
_Generic \
( \
(object), \
GrB_Type *: GrB_Type_free , \
GrB_UnaryOp *: GrB_UnaryOp_free , \
GrB_BinaryOp *: GrB_BinaryOp_free , \
GxB_SelectOp *: GxB_SelectOp_free , \
GrB_IndexUnaryOp *: GrB_IndexUnaryOp_free , \
GrB_Monoid *: GrB_Monoid_free , \
GrB_Semiring *: GrB_Semiring_free , \
GrB_Scalar *: GrB_Scalar_free , \
GrB_Vector *: GrB_Vector_free , \
GrB_Matrix *: GrB_Matrix_free , \
GrB_Descriptor *: GrB_Descriptor_free , \
GxB_Iterator *: GxB_Iterator_free \
) \
(object)
#endif
//==============================================================================
// GrB_wait: finish computations
//==============================================================================
typedef enum
{
GrB_COMPLETE = 0, // establishes a happens-before relation
GrB_MATERIALIZE = 1 // object is complete
}
GrB_WaitMode ;
// Finish all pending work in a specific object.
GB_PUBLIC GrB_Info GrB_Type_wait (GrB_Type type , GrB_WaitMode waitmode) ;
GB_PUBLIC GrB_Info GrB_UnaryOp_wait (GrB_UnaryOp op , GrB_WaitMode waitmode) ;
GB_PUBLIC GrB_Info GrB_BinaryOp_wait (GrB_BinaryOp op , GrB_WaitMode waitmode) ;
GB_PUBLIC GrB_Info GxB_SelectOp_wait (GxB_SelectOp op , GrB_WaitMode waitmode) ;
GB_PUBLIC GrB_Info GrB_IndexUnaryOp_wait (GrB_IndexUnaryOp op , GrB_WaitMode waitmode) ;
GB_PUBLIC GrB_Info GrB_Monoid_wait (GrB_Monoid monoid , GrB_WaitMode waitmode) ;
GB_PUBLIC GrB_Info GrB_Semiring_wait (GrB_Semiring semiring, GrB_WaitMode waitmode) ;
GB_PUBLIC GrB_Info GrB_Descriptor_wait (GrB_Descriptor desc , GrB_WaitMode waitmode) ;
GB_PUBLIC GrB_Info GrB_Scalar_wait (GrB_Scalar s , GrB_WaitMode waitmode) ;
GB_PUBLIC GrB_Info GrB_Vector_wait (GrB_Vector v , GrB_WaitMode waitmode) ;
GB_PUBLIC GrB_Info GrB_Matrix_wait (GrB_Matrix A , GrB_WaitMode waitmode) ;
// GrB_wait (object,waitmode) polymorphic function:
#if GxB_STDC_VERSION >= 201112L
#define GrB_wait(object,waitmode) \
_Generic \
( \
(object), \
GrB_Type : GrB_Type_wait , \
GrB_UnaryOp : GrB_UnaryOp_wait , \
GrB_BinaryOp : GrB_BinaryOp_wait , \
GxB_SelectOp : GxB_SelectOp_wait , \
GrB_IndexUnaryOp : GrB_IndexUnaryOp_wait , \
GrB_Monoid : GrB_Monoid_wait , \
GrB_Semiring : GrB_Semiring_wait , \
GrB_Scalar : GrB_Scalar_wait , \
GrB_Vector : GrB_Vector_wait , \
GrB_Matrix : GrB_Matrix_wait , \
GrB_Descriptor : GrB_Descriptor_wait \
) \
(object, waitmode)
#endif
// NOTE: GxB_Scalar_wait is historical; use GrB_Scalar_wait instead
GB_PUBLIC GrB_Info GxB_Scalar_wait (GrB_Scalar *s) ;
//==============================================================================
// GrB_error: error handling
//==============================================================================
// Each GraphBLAS method and operation returns a GrB_Info error code.
// GrB_error returns additional information on the error in a thread-safe
// null-terminated string. The string returned by GrB_error is owned by
// the GraphBLAS library and must not be free'd.
GB_PUBLIC GrB_Info GrB_Type_error (const char **error, const GrB_Type type) ;
GB_PUBLIC GrB_Info GrB_UnaryOp_error (const char **error, const GrB_UnaryOp op) ;
GB_PUBLIC GrB_Info GrB_BinaryOp_error (const char **error, const GrB_BinaryOp op) ;
GB_PUBLIC GrB_Info GxB_SelectOp_error (const char **error, const GxB_SelectOp op) ;
GB_PUBLIC GrB_Info GrB_IndexUnaryOp_error (const char **error, const GrB_IndexUnaryOp op) ;
GB_PUBLIC GrB_Info GrB_Monoid_error (const char **error, const GrB_Monoid monoid) ;
GB_PUBLIC GrB_Info GrB_Semiring_error (const char **error, const GrB_Semiring semiring) ;
GB_PUBLIC GrB_Info GrB_Scalar_error (const char **error, const GrB_Scalar s) ;
GB_PUBLIC GrB_Info GrB_Vector_error (const char **error, const GrB_Vector v) ;
GB_PUBLIC GrB_Info GrB_Matrix_error (const char **error, const GrB_Matrix A) ;
GB_PUBLIC GrB_Info GrB_Descriptor_error (const char **error, const GrB_Descriptor d) ;
// GxB_Scalar_error is historical: use GrB_Scalar_error instead
GB_PUBLIC GrB_Info GxB_Scalar_error (const char **error, const GrB_Scalar s) ;
// GrB_error (error,object) polymorphic function:
#if GxB_STDC_VERSION >= 201112L
#define GrB_error(error,object) \
_Generic \
( \
(object), \
const GrB_Type : GrB_Type_error , \
GrB_Type : GrB_Type_error , \
const GrB_UnaryOp : GrB_UnaryOp_error , \
GrB_UnaryOp : GrB_UnaryOp_error , \
const GrB_BinaryOp : GrB_BinaryOp_error , \
GrB_BinaryOp : GrB_BinaryOp_error , \
const GxB_SelectOp : GxB_SelectOp_error , \
GxB_SelectOp : GxB_SelectOp_error , \
const GrB_IndexUnaryOp : GrB_IndexUnaryOp_error , \
GrB_IndexUnaryOp : GrB_IndexUnaryOp_error , \
const GrB_Monoid : GrB_Monoid_error , \
GrB_Monoid : GrB_Monoid_error , \
const GrB_Semiring : GrB_Semiring_error , \
GrB_Semiring : GrB_Semiring_error , \
const GrB_Scalar : GrB_Scalar_error , \
GrB_Scalar : GrB_Scalar_error , \
const GrB_Vector : GrB_Vector_error , \
GrB_Vector : GrB_Vector_error , \
const GrB_Matrix : GrB_Matrix_error , \
GrB_Matrix : GrB_Matrix_error , \
const GrB_Descriptor : GrB_Descriptor_error , \
GrB_Descriptor : GrB_Descriptor_error \
) \
(error, object)
#endif
//==============================================================================
// GrB_mxm, vxm, mxv: matrix multiplication over a semiring
//==============================================================================
GB_PUBLIC
GrB_Info GrB_mxm // C<Mask> = accum (C, A*B)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_Semiring semiring, // defines '+' and '*' for A*B
const GrB_Matrix A, // first input: matrix A
const GrB_Matrix B, // second input: matrix B
const GrB_Descriptor desc // descriptor for C, Mask, A, and B
) ;
GB_PUBLIC
GrB_Info GrB_vxm // w'<Mask> = accum (w, u'*A)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_Semiring semiring, // defines '+' and '*' for u'*A
const GrB_Vector u, // first input: vector u
const GrB_Matrix A, // second input: matrix A
const GrB_Descriptor desc // descriptor for w, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_mxv // w<Mask> = accum (w, A*u)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_Semiring semiring, // defines '+' and '*' for A*B
const GrB_Matrix A, // first input: matrix A
const GrB_Vector u, // second input: vector u
const GrB_Descriptor desc // descriptor for w, mask, and A
) ;
//==============================================================================
// GrB_eWiseMult: element-wise matrix and vector operations, set intersection
//==============================================================================
// GrB_eWiseMult computes C<Mask> = accum (C, A.*B), where ".*" is the Hadamard
// product, and where pairs of elements in two matrices (or vectors) are
// pairwise "multiplied" with C(i,j) = mult (A(i,j),B(i,j)).
GB_PUBLIC
GrB_Info GrB_Vector_eWiseMult_Semiring // w<Mask> = accum (w, u.*v)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_Semiring semiring, // defines '.*' for t=u.*v
const GrB_Vector u, // first input: vector u
const GrB_Vector v, // second input: vector v
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_eWiseMult_Monoid // w<Mask> = accum (w, u.*v)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_Monoid monoid, // defines '.*' for t=u.*v
const GrB_Vector u, // first input: vector u
const GrB_Vector v, // second input: vector v
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_eWiseMult_BinaryOp // w<Mask> = accum (w, u.*v)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp mult, // defines '.*' for t=u.*v
const GrB_Vector u, // first input: vector u
const GrB_Vector v, // second input: vector v
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_eWiseMult_Semiring // C<Mask> = accum (C, A.*B)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_Semiring semiring, // defines '.*' for T=A.*B
const GrB_Matrix A, // first input: matrix A
const GrB_Matrix B, // second input: matrix B
const GrB_Descriptor desc // descriptor for C, Mask, A, and B
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_eWiseMult_Monoid // C<Mask> = accum (C, A.*B)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_Monoid monoid, // defines '.*' for T=A.*B
const GrB_Matrix A, // first input: matrix A
const GrB_Matrix B, // second input: matrix B
const GrB_Descriptor desc // descriptor for C, Mask, A, and B
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_eWiseMult_BinaryOp // C<Mask> = accum (C, A.*B)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp mult, // defines '.*' for T=A.*B
const GrB_Matrix A, // first input: matrix A
const GrB_Matrix B, // second input: matrix B
const GrB_Descriptor desc // descriptor for C, Mask, A, and B
) ;
// All 6 of the above type-specific functions are captured in a single
// type-generic function, GrB_eWiseMult:
#if GxB_STDC_VERSION >= 201112L
#define GrB_eWiseMult(C,Mask,accum,op,A,B,desc) \
_Generic \
( \
(C), \
GrB_Matrix : \
_Generic \
( \
(op), \
const GrB_Semiring : GrB_Matrix_eWiseMult_Semiring , \
GrB_Semiring : GrB_Matrix_eWiseMult_Semiring , \
const GrB_Monoid : GrB_Matrix_eWiseMult_Monoid , \
GrB_Monoid : GrB_Matrix_eWiseMult_Monoid , \
const GrB_BinaryOp : GrB_Matrix_eWiseMult_BinaryOp , \
GrB_BinaryOp : GrB_Matrix_eWiseMult_BinaryOp \
), \
GrB_Vector : \
_Generic \
( \
(op), \
const GrB_Semiring : GrB_Vector_eWiseMult_Semiring , \
GrB_Semiring : GrB_Vector_eWiseMult_Semiring , \
const GrB_Monoid : GrB_Vector_eWiseMult_Monoid , \
GrB_Monoid : GrB_Vector_eWiseMult_Monoid , \
const GrB_BinaryOp : GrB_Vector_eWiseMult_BinaryOp , \
GrB_BinaryOp : GrB_Vector_eWiseMult_BinaryOp \
) \
) \
(C, Mask, accum, op, A, B, desc)
#endif
//==============================================================================
// GrB_eWiseAdd: element-wise matrix and vector operations, set union
//==============================================================================
// GrB_eWiseAdd computes C<Mask> = accum (C, A+B), where pairs of elements in
// two matrices (or two vectors) are pairwise "added".
GB_PUBLIC
GrB_Info GrB_Vector_eWiseAdd_Semiring // w<mask> = accum (w, u+v)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_Semiring semiring, // defines '+' for t=u+v
const GrB_Vector u, // first input: vector u
const GrB_Vector v, // second input: vector v
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_eWiseAdd_Monoid // w<mask> = accum (w, u+v)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_Monoid monoid, // defines '+' for t=u+v
const GrB_Vector u, // first input: vector u
const GrB_Vector v, // second input: vector v
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_eWiseAdd_BinaryOp // w<mask> = accum (w, u+v)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp add, // defines '+' for t=u+v
const GrB_Vector u, // first input: vector u
const GrB_Vector v, // second input: vector v
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_eWiseAdd_Semiring // C<Mask> = accum (C, A+B)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_Semiring semiring, // defines '+' for T=A+B
const GrB_Matrix A, // first input: matrix A
const GrB_Matrix B, // second input: matrix B
const GrB_Descriptor desc // descriptor for C, Mask, A, and B
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_eWiseAdd_Monoid // C<Mask> = accum (C, A+B)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_Monoid monoid, // defines '+' for T=A+B
const GrB_Matrix A, // first input: matrix A
const GrB_Matrix B, // second input: matrix B
const GrB_Descriptor desc // descriptor for C, Mask, A, and B
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_eWiseAdd_BinaryOp // C<Mask> = accum (C, A+B)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp add, // defines '+' for T=A+B
const GrB_Matrix A, // first input: matrix A
const GrB_Matrix B, // second input: matrix B
const GrB_Descriptor desc // descriptor for C, Mask, A, and B
) ;
#if GxB_STDC_VERSION >= 201112L
#define GrB_eWiseAdd(C,Mask,accum,op,A,B,desc) \
_Generic \
( \
(C), \
GrB_Matrix : \
_Generic \
( \
(op), \
const GrB_Semiring : GrB_Matrix_eWiseAdd_Semiring , \
GrB_Semiring : GrB_Matrix_eWiseAdd_Semiring , \
const GrB_Monoid : GrB_Matrix_eWiseAdd_Monoid , \
GrB_Monoid : GrB_Matrix_eWiseAdd_Monoid , \
const GrB_BinaryOp : GrB_Matrix_eWiseAdd_BinaryOp , \
GrB_BinaryOp : GrB_Matrix_eWiseAdd_BinaryOp \
), \
GrB_Vector : \
_Generic \
( \
(op), \
const GrB_Semiring : GrB_Vector_eWiseAdd_Semiring , \
GrB_Semiring : GrB_Vector_eWiseAdd_Semiring , \
const GrB_Monoid : GrB_Vector_eWiseAdd_Monoid , \
GrB_Monoid : GrB_Vector_eWiseAdd_Monoid , \
const GrB_BinaryOp : GrB_Vector_eWiseAdd_BinaryOp , \
GrB_BinaryOp : GrB_Vector_eWiseAdd_BinaryOp \
) \
) \
(C, Mask, accum, op, A, B, desc)
#endif
//==============================================================================
// GxB_eWiseUnion: a variant of GrB_eWiseAdd
//==============================================================================
// GxB_eWiseUnion is a variant of eWiseAdd. They differ when an entry is
// present in A but not B, or in B but not A.
// eWiseAdd does the following, for a matrix, where "+" is the add binary op:
// if A(i,j) and B(i,j) are both present:
// C(i,j) = A(i,j) + B(i,j)
// else if A(i,j) is present but not B(i,j)
// C(i,j) = A(i,j)
// else if B(i,j) is present but not A(i,j)
// C(i,j) = B(i,j)
// by constrast, eWiseUnion always applies the operator:
// if A(i,j) and B(i,j) are both present:
// C(i,j) = A(i,j) + B(i,j)
// else if A(i,j) is present but not B(i,j)
// C(i,j) = A(i,j) + beta
// else if B(i,j) is present but not A(i,j)
// C(i,j) = alpha + B(i,j)
GB_PUBLIC
GrB_Info GxB_Vector_eWiseUnion // w<mask> = accum (w, u+v)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp add, // defines '+' for t=u+v
const GrB_Vector u, // first input: vector u
const GrB_Scalar alpha,
const GrB_Vector v, // second input: vector v
const GrB_Scalar beta,
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_eWiseUnion // C<M> = accum (C, A+B)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp add, // defines '+' for T=A+B
const GrB_Matrix A, // first input: matrix A
const GrB_Scalar alpha,
const GrB_Matrix B, // second input: matrix B
const GrB_Scalar beta,
const GrB_Descriptor desc // descriptor for C, M, A, and B
) ;
#if GxB_STDC_VERSION >= 201112L
#define GxB_eWiseUnion(C,Mask,accum,op,A,alpha,B,beta,desc) \
_Generic \
( \
(C), \
const GrB_Matrix : GxB_Matrix_eWiseUnion , \
GrB_Matrix : GxB_Matrix_eWiseUnion , \
const GrB_Vector : GxB_Vector_eWiseUnion , \
GrB_Vector : GxB_Vector_eWiseUnion \
) \
(C, Mask, accum, op, A, alpha, B, beta, desc)
#endif
//==============================================================================
// GrB_extract: extract a submatrix or subvector
//==============================================================================
// Extract entries from a matrix or vector; T = A(I,J). This (like most
// GraphBLAS methods) is then followed by C<Mask>=accum(C,T).
// To extract all rows of a matrix or vector, as in A (:,J), use I=GrB_ALL as
// the input argument. For all columns of a matrix, use J=GrB_ALL.
GB_PUBLIC const uint64_t *GrB_ALL ;
// To extract a range of rows and columns, I and J can be a list of 2 or 3
// indices that defines a range (begin:end) or a strided range (begin:inc:end).
// To specify the colon syntax I = begin:end, the array I has size at least 2,
// where I [GxB_BEGIN] = begin and I [GxB_END] = end. The parameter ni is then
// passed as the special value GxB_RANGE. To specify the colon syntax I =
// begin:inc:end, the array I has size at least three, with the values begin,
// end, and inc (in that order), and then pass in the value ni = GxB_STRIDE.
// The same can be done for the list J and its size, nj.
// These special values of ni and nj can be used for GrB_assign,
// GrB_extract, and GxB_subassign.
#define GxB_RANGE (INT64_MAX)
#define GxB_STRIDE (INT64_MAX-1)
#define GxB_BACKWARDS (INT64_MAX-2)
// for the strided range begin:inc:end, I [GxB_BEGIN] is the value of begin, I
// [GxB_END] is the value end, I [GxB_INC] is the magnitude of the stride. If
// the stride is negative, use ni = GxB_BACKWARDS.
#define GxB_BEGIN (0)
#define GxB_END (1)
#define GxB_INC (2)
// For example, the notation 10:-2:1 defines a sequence [10 8 6 4 2].
// The end point of the sequence (1) need not appear in the sequence, if
// the last increment goes past it. To specify the same in GraphBLAS,
// use:
// GrB_Index I [3], ni = GxB_BACKWARDS ;
// I [GxB_BEGIN ] = 10 ; // the start of the sequence
// I [GxB_INC ] = 2 ; // the magnitude of the increment
// I [GxB_END ] = 1 ; // the end of the sequence
GB_PUBLIC
GrB_Info GrB_Vector_extract // w<mask> = accum (w, u(I))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_Vector u, // first input: vector u
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_extract // C<Mask> = accum (C, A(I,J))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_Matrix A, // first input: matrix A
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C, Mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Col_extract // w<mask> = accum (w, A(I,j))
(
GrB_Vector w, // input/output matrix for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_Matrix A, // first input: matrix A
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
GrB_Index j, // column index
const GrB_Descriptor desc // descriptor for w, mask, and A
) ;
//------------------------------------------------------------------------------
// GrB_extract: generic matrix/vector extraction
//------------------------------------------------------------------------------
// GrB_extract is a generic interface to the following functions:
// GrB_Vector_extract (w,mask,acc,u,I,ni,d) // w<m> = acc (w, u(I))
// GrB_Col_extract (w,mask,acc,A,I,ni,j,d) // w<m> = acc (w, A(I,j))
// GrB_Matrix_extract (C,Mask,acc,A,I,ni,J,nj,d) // C<Mask> = acc (C, A(I,J))
#if GxB_STDC_VERSION >= 201112L
#define GrB_extract(arg1,Mask,accum,arg4,...) \
_Generic \
( \
(arg1), \
GrB_Vector : \
_Generic \
( \
(arg4), \
const GrB_Vector : GrB_Vector_extract , \
GrB_Vector : GrB_Vector_extract , \
const GrB_Matrix : GrB_Col_extract , \
GrB_Matrix : GrB_Col_extract \
), \
GrB_Matrix : GrB_Matrix_extract \
) \
(arg1, Mask, accum, arg4, __VA_ARGS__)
#endif
//==============================================================================
// GxB_subassign: matrix and vector subassign: C(I,J)<Mask> = accum (C(I,J), A)
//==============================================================================
// Assign entries in a matrix or vector; C(I,J) = A.
// Each GxB_subassign function is very similar to its corresponding GrB_assign
// function in the spec, but they differ in two ways: (1) the mask in
// GxB_subassign has the same size as w(I) for vectors and C(I,J) for matrices,
// and (2) they differ in the GrB_REPLACE option. See the user guide for
// details.
// In GraphBLAS notation, the two methods can be described as follows:
// matrix and vector subassign: C(I,J)<Mask> = accum (C(I,J), A)
// matrix and vector assign: C<Mask>(I,J) = accum (C(I,J), A)
// --- assign ------------------------------------------------------------------
//
// GrB_Matrix_assign C<M>(I,J) += A M same size as matrix C.
// A is |I|-by-|J|
//
// GrB_Vector_assign w<m>(I) += u m same size as column vector w.
// u is |I|-by-1
//
// GrB_Row_assign C<m'>(i,J) += u' m is a column vector the same
// size as a row of C.
// u is |J|-by-1, i is a scalar.
//
// GrB_Col_assign C<m>(I,j) += u m is a column vector the same
// size as a column of C.
// u is |I|-by-1, j is a scalar.
//
// --- subassign ---------------------------------------------------------------
//
// GxB_Matrix_subassign C(I,J)<M> += A M same size as matrix A.
// A is |I|-by-|J|
//
// GxB_Vector_subassign w(I)<m> += u m same size as column vector u.
// u is |I|-by-1
//
// GxB_Row_subassign C(i,J)<m'> += u' m same size as column vector u.
// u is |J|-by-1, i is a scalar.
//
// GxB_Col_subassign C(I,j)<m> += u m same size as column vector u.
// u is |I|-by-1, j is a scalar.
GB_PUBLIC
GrB_Info GxB_Vector_subassign // w(I)<mask> = accum (w(I),u)
(
GrB_Vector w, // input/output matrix for results
const GrB_Vector mask, // optional mask for w(I), unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w(I),t)
const GrB_Vector u, // first input: vector u
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w(I) and mask
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_subassign // C(I,J)<Mask> = accum (C(I,J),A)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),T)
const GrB_Matrix A, // first input: matrix A
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C(I,J), Mask, and A
) ;
GB_PUBLIC
GrB_Info GxB_Col_subassign // C(I,j)<mask> = accum (C(I,j),u)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Vector mask, // optional mask for C(I,j), unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(C(I,j),t)
const GrB_Vector u, // input vector
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
GrB_Index j, // column index
const GrB_Descriptor desc // descriptor for C(I,j) and mask
) ;
GB_PUBLIC
GrB_Info GxB_Row_subassign // C(i,J)<mask'> = accum (C(i,J),u')
(
GrB_Matrix C, // input/output matrix for results
const GrB_Vector mask, // optional mask for C(i,J), unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(C(i,J),t)
const GrB_Vector u, // input vector
GrB_Index i, // row index
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C(i,J) and mask
) ;
//------------------------------------------------------------------------------
// GxB_Vector_subassign_[SCALAR]: scalar expansion assignment to subvector
//------------------------------------------------------------------------------
// Assigns a single scalar to a subvector, w(I)<mask> = accum(w(I),x). The
// scalar x is implicitly expanded into a vector u of size ni-by-1, with each
// entry in u equal to x, and then w(I)<mask> = accum(w(I),u) is done.
GB_PUBLIC
GrB_Info GxB_Vector_subassign_BOOL // w(I)<mask> = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w(I), unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w(I),x)
bool x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w(I) and mask
) ;
GB_PUBLIC
GrB_Info GxB_Vector_subassign_INT8 // w(I)<mask> = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w(I), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
int8_t x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w(I) and mask
) ;
GB_PUBLIC
GrB_Info GxB_Vector_subassign_UINT8 // w(I)<mask> = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w(I), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
uint8_t x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w(I) and mask
) ;
GB_PUBLIC
GrB_Info GxB_Vector_subassign_INT16 // w(I)<mask> = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w(I), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
int16_t x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w(I) and mask
) ;
GB_PUBLIC
GrB_Info GxB_Vector_subassign_UINT16 // w(I)<mask> = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w(I), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
uint16_t x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w(I) and mask
) ;
GB_PUBLIC
GrB_Info GxB_Vector_subassign_INT32 // w(I)<mask> = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w(I), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
int32_t x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w(I) and mask
) ;
GB_PUBLIC
GrB_Info GxB_Vector_subassign_UINT32 // w(I)<mask> = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w(I), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
uint32_t x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w(I) and mask
) ;
GB_PUBLIC
GrB_Info GxB_Vector_subassign_INT64 // w(I)<mask> = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w(I), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
int64_t x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w(I) and mask
) ;
GB_PUBLIC
GrB_Info GxB_Vector_subassign_UINT64 // w(I)<mask> = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w(I), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
uint64_t x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w(I) and mask
) ;
GB_PUBLIC
GrB_Info GxB_Vector_subassign_FP32 // w(I)<mask> = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w(I), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
float x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w(I) and mask
) ;
GB_PUBLIC
GrB_Info GxB_Vector_subassign_FP64 // w(I)<mask> = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w(I), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
double x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w(I) and mask
) ;
GB_PUBLIC
GrB_Info GxB_Vector_subassign_FC32 // w(I)<mask> = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w(I), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
GxB_FC32_t x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w(I) and mask
) ;
GB_PUBLIC
GrB_Info GxB_Vector_subassign_FC64 // w(I)<mask> = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w(I), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
GxB_FC64_t x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w(I) and mask
) ;
GB_PUBLIC
GrB_Info GxB_Vector_subassign_UDT // w(I)<mask> = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w(I), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
void *x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w(I) and mask
) ;
GB_PUBLIC
GrB_Info GxB_Vector_subassign_Scalar // w(I)<mask> = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w(I), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
GrB_Scalar x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w(I) and mask
) ;
//------------------------------------------------------------------------------
// GxB_Matrix_subassign_[SCALAR]: scalar expansion assignment to submatrix
//------------------------------------------------------------------------------
// Assigns a single scalar to a submatrix, C(I,J)<Mask> = accum(C(I,J),x). The
// scalar x is implicitly expanded into a matrix A of size ni-by-nj, with each
// entry in A equal to x, and then C(I,J)<Mask> = accum(C(I,J),A) is done.
GB_PUBLIC
GrB_Info GxB_Matrix_subassign_BOOL // C(I,J)<Mask> = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
bool x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C(I,J) and Mask
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_subassign_INT8 // C(I,J)<Mask> = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
int8_t x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C(I,J) and Mask
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_subassign_UINT8 // C(I,J)<Mask> = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
uint8_t x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C(I,J) and Mask
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_subassign_INT16 // C(I,J)<Mask> = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
int16_t x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C(I,J) and Mask
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_subassign_UINT16 // C(I,J)<Mask> = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
uint16_t x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C(I,J) and Mask
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_subassign_INT32 // C(I,J)<Mask> = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
int32_t x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C(I,J) and Mask
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_subassign_UINT32 // C(I,J)<Mask> = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
uint32_t x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C(I,J) and Mask
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_subassign_INT64 // C(I,J)<Mask> = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
int64_t x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C(I,J) and Mask
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_subassign_UINT64 // C(I,J)<Mask> = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
uint64_t x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C(I,J) and Mask
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_subassign_FP32 // C(I,J)<Mask> = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
float x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C(I,J) and Mask
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_subassign_FP64 // C(I,J)<Mask> = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
double x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C(I,J) and Mask
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_subassign_FC32 // C(I,J)<Mask> = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
GxB_FC32_t x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C(I,J) and Mask
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_subassign_FC64 // C(I,J)<Mask> = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
GxB_FC64_t x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C(I,J) and Mask
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_subassign_UDT // C(I,J)<Mask> = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
void *x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C(I,J) and Mask
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_subassign_Scalar // C(I,J)<Mask> = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
GrB_Scalar x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C(I,J) and Mask
) ;
//------------------------------------------------------------------------------
// GxB_subassign: generic submatrix/subvector assignment
//------------------------------------------------------------------------------
// GxB_subassign is a generic function that provides access to all specific
// GxB_*_subassign* functions:
// GxB_Vector_subassign (w,m,acc,u,I,ni,d) // w(I)<m> = acc(w(I),u)
// GxB_Matrix_subassign (C,M,acc,A,I,ni,J,nj,d) // C(I,J)<M> = acc(C(I,J),A)
// GxB_Col_subassign (C,m,acc,u,I,ni,j,d) // C(I,j)<m> = acc(C(I,j),u)
// GxB_Row_subassign (C,m,acc,u,i,J,nj,d) // C(i,J)<m'> = acc(C(i,J),u')
// GxB_Vector_subassign_T (w,m,acc,x,I,ni,d) // w(I)<m> = acc(w(I),x)
// GxB_Matrix_subassign_T (C,M,acc,x,I,ni,J,nj,d) // C(I,J)<M> = acc(C(I,J),x)
#if GxB_STDC_VERSION >= 201112L
#define GxB_subassign(arg1,Mask,accum,arg4,arg5,...) \
_Generic \
( \
(arg1), \
GrB_Vector : \
_Generic \
( \
(arg4), \
GB_CASES (, GxB, Vector_subassign) , \
const GrB_Scalar : GxB_Vector_subassign_Scalar, \
GrB_Scalar : GxB_Vector_subassign_Scalar, \
default: GxB_Vector_subassign \
), \
default: \
_Generic \
( \
(arg4), \
GB_CASES (, GxB, Matrix_subassign) , \
const GrB_Scalar : GxB_Matrix_subassign_Scalar, \
GrB_Scalar : GxB_Matrix_subassign_Scalar, \
const GrB_Vector : \
_Generic \
( \
(arg5), \
const GrB_Index *: GxB_Col_subassign , \
GrB_Index *: GxB_Col_subassign , \
default: GxB_Row_subassign \
), \
GrB_Vector : \
_Generic \
( \
(arg5), \
const GrB_Index *: GxB_Col_subassign , \
GrB_Index *: GxB_Col_subassign , \
default: GxB_Row_subassign \
), \
default: GxB_Matrix_subassign \
) \
) \
(arg1, Mask, accum, arg4, arg5, __VA_ARGS__)
#endif
//==============================================================================
// GrB_assign: matrix and vector assign: C<Mask>(I,J) = accum (C(I,J), A)
//==============================================================================
// Assign entries in a matrix or vector; C(I,J) = A.
// Each of these can be used with their generic name, GrB_assign.
GB_PUBLIC
GrB_Info GrB_Vector_assign // w<mask>(I) = accum (w(I),u)
(
GrB_Vector w, // input/output matrix for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w(I),t)
const GrB_Vector u, // first input: vector u
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_assign // C<Mask>(I,J) = accum (C(I,J),A)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),T)
const GrB_Matrix A, // first input: matrix A
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C, Mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Col_assign // C<mask>(I,j) = accum (C(I,j),u)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Vector mask, // optional mask for C(:,j), unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(C(I,j),t)
const GrB_Vector u, // input vector
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
GrB_Index j, // column index
const GrB_Descriptor desc // descriptor for C(:,j) and mask
) ;
GB_PUBLIC
GrB_Info GrB_Row_assign // C<mask'>(i,J) = accum (C(i,J),u')
(
GrB_Matrix C, // input/output matrix for results
const GrB_Vector mask, // optional mask for C(i,:), unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(C(i,J),t)
const GrB_Vector u, // input vector
GrB_Index i, // row index
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C(i,:) and mask
) ;
//------------------------------------------------------------------------------
// GrB_Vector_assign_[SCALAR]: scalar expansion assignment to subvector
//------------------------------------------------------------------------------
// Assigns a single scalar to a subvector, w<mask>(I) = accum(w(I),x). The
// scalar x is implicitly expanded into a vector u of size ni-by-1, with each
// entry in u equal to x, and then w<mask>(I) = accum(w(I),u) is done.
GB_PUBLIC
GrB_Info GrB_Vector_assign_BOOL // w<mask>(I) = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w(I),x)
bool x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_assign_INT8 // w<mask>(I) = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
int8_t x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_assign_UINT8 // w<mask>(I) = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
uint8_t x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_assign_INT16 // w<mask>(I) = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
int16_t x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_assign_UINT16 // w<mask>(I) = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
uint16_t x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_assign_INT32 // w<mask>(I) = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
int32_t x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_assign_UINT32 // w<mask>(I) = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
uint32_t x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_assign_INT64 // w<mask>(I) = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
int64_t x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_assign_UINT64 // w<mask>(I) = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
uint64_t x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_assign_FP32 // w<mask>(I) = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
float x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_assign_FP64 // w<mask>(I) = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
double x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GxB_Vector_assign_FC32 // w<mask>(I) = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
GxB_FC32_t x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GxB_Vector_assign_FC64 // w<mask>(I) = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
GxB_FC64_t x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_assign_UDT // w<mask>(I) = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
void *x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_assign_Scalar // w<mask>(I) = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
GrB_Scalar x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w and mask
) ;
//------------------------------------------------------------------------------
// GrB_Matrix_assign_[SCALAR]: scalar expansion assignment to submatrix
//------------------------------------------------------------------------------
// Assigns a single scalar to a submatrix, C<Mask>(I,J) = accum(C(I,J),x). The
// scalar x is implicitly expanded into a matrix A of size ni-by-nj, with each
// entry in A equal to x, and then C<Mask>(I,J) = accum(C(I,J),A) is done.
GB_PUBLIC
GrB_Info GrB_Matrix_assign_BOOL // C<Mask>(I,J) = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
bool x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C and Mask
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_assign_INT8 // C<Mask>(I,J) = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
int8_t x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C and Mask
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_assign_UINT8 // C<Mask>(I,J) = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
uint8_t x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C and Mask
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_assign_INT16 // C<Mask>(I,J) = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
int16_t x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C and Mask
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_assign_UINT16 // C<Mask>(I,J) = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
uint16_t x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C and Mask
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_assign_INT32 // C<Mask>(I,J) = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
int32_t x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C and Mask
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_assign_UINT32 // C<Mask>(I,J) = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
uint32_t x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C and Mask
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_assign_INT64 // C<Mask>(I,J) = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
int64_t x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C and Mask
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_assign_UINT64 // C<Mask>(I,J) = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
uint64_t x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C and Mask
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_assign_FP32 // C<Mask>(I,J) = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
float x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C and Mask
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_assign_FP64 // C<Mask>(I,J) = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
double x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C and Mask
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_assign_FC32 // C<Mask>(I,J) = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
GxB_FC32_t x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C and Mask
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_assign_FC64 // C<Mask>(I,J) = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
GxB_FC64_t x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C and Mask
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_assign_UDT // C<Mask>(I,J) = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
void *x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C and Mask
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_assign_Scalar // C<Mask>(I,J) = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
GrB_Scalar x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C and Mask
) ;
//------------------------------------------------------------------------------
// GrB_assign: generic submatrix/subvector assignment
//------------------------------------------------------------------------------
// GrB_assign is a generic function that provides access to all specific
// GrB_*_assign* functions:
// GrB_Vector_assign_T (w,m,acc,x,I,ni,d) // w<m>(I) = acc(w(I),x)
// GrB_Vector_assign (w,m,acc,u,I,ni,d) // w<m>(I) = acc(w(I),u)
// GrB_Matrix_assign_T (C,M,acc,x,I,ni,J,nj,d) // C<M>(I,J) = acc(C(I,J),x)
// GrB_Col_assign (C,m,acc,u,I,ni,j,d) // C<m>(I,j) = acc(C(I,j),u)
// GrB_Row_assign (C,m,acc,u,i,J,nj,d) // C<m'>(i,J) = acc(C(i,J),u')
// GrB_Matrix_assign (C,M,acc,A,I,ni,J,nj,d) // C<M>(I,J) = acc(C(I,J),A)
#if GxB_STDC_VERSION >= 201112L
#define GrB_assign(arg1,Mask,accum,arg4,arg5,...) \
_Generic \
( \
(arg1), \
GrB_Vector : \
_Generic \
( \
(arg4), \
GB_CASES (, GrB, Vector_assign) , \
const GrB_Scalar : GrB_Vector_assign_Scalar , \
GrB_Scalar : GrB_Vector_assign_Scalar , \
default: GrB_Vector_assign \
), \
default: \
_Generic \
( \
(arg4), \
GB_CASES (, GrB, Matrix_assign) , \
const GrB_Scalar : GrB_Matrix_assign_Scalar , \
GrB_Scalar : GrB_Matrix_assign_Scalar , \
const GrB_Vector : \
_Generic \
( \
(arg5), \
const GrB_Index *: GrB_Col_assign , \
GrB_Index *: GrB_Col_assign , \
default: GrB_Row_assign \
), \
GrB_Vector : \
_Generic \
( \
(arg5), \
const GrB_Index *: GrB_Col_assign , \
GrB_Index *: GrB_Col_assign , \
default: GrB_Row_assign \
), \
default: GrB_Matrix_assign \
) \
) \
(arg1, Mask, accum, arg4, arg5, __VA_ARGS__)
#endif
//==============================================================================
// GrB_apply: matrix and vector apply
//==============================================================================
// Apply a unary, index_unary, or binary operator to entries in a matrix or
// vector, C<M> = accum (C, op (A)).
GB_PUBLIC
GrB_Info GrB_Vector_apply // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_UnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply // C<Mask> = accum (C, op(A)) or op(A')
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_UnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
//-------------------------------------------
// vector apply: binaryop variants (bind 1st)
//-------------------------------------------
// Apply a binary operator to the entries in a vector, binding the first
// input to a scalar x, w<mask> = accum (w, op (x,u)).
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp1st_Scalar // w<mask> = accum (w, op(x,u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Scalar x, // first input: scalar x
const GrB_Vector u, // second input: vector u
const GrB_Descriptor desc // descriptor for w and mask
) ;
// historical: identical to GxB_Vector_apply_BinaryOp1st
GB_PUBLIC
GrB_Info GxB_Vector_apply_BinaryOp1st // w<mask> = accum (w, op(x,u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Scalar x, // first input: scalar x
const GrB_Vector u, // second input: vector u
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp1st_BOOL // w<mask> = accum (w, op(x,u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
bool x, // first input: scalar x
const GrB_Vector u, // second input: vector u
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp1st_INT8 // w<mask> = accum (w, op(x,u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
int8_t x, // first input: scalar x
const GrB_Vector u, // second input: vector u
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp1st_INT16 // w<mask> = accum (w, op(x,u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
int16_t x, // first input: scalar x
const GrB_Vector u, // second input: vector u
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp1st_INT32 // w<mask> = accum (w, op(x,u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
int32_t x, // first input: scalar x
const GrB_Vector u, // second input: vector u
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp1st_INT64 // w<mask> = accum (w, op(x,u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
int64_t x, // first input: scalar x
const GrB_Vector u, // second input: vector u
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp1st_UINT8 // w<mask> = accum (w, op(x,u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
uint8_t x, // first input: scalar x
const GrB_Vector u, // second input: vector u
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp1st_UINT16 // w<mask> = accum (w, op(x,u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
uint16_t x, // first input: scalar x
const GrB_Vector u, // second input: vector u
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp1st_UINT32 // w<mask> = accum (w, op(x,u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
uint32_t x, // first input: scalar x
const GrB_Vector u, // second input: vector u
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp1st_UINT64 // w<mask> = accum (w, op(x,u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
uint64_t x, // first input: scalar x
const GrB_Vector u, // second input: vector u
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp1st_FP32 // w<mask> = accum (w, op(x,u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
float x, // first input: scalar x
const GrB_Vector u, // second input: vector u
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp1st_FP64 // w<mask> = accum (w, op(x,u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
double x, // first input: scalar x
const GrB_Vector u, // second input: vector u
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GxB_Vector_apply_BinaryOp1st_FC32 // w<mask> = accum (w, op(x,u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
GxB_FC32_t x, // first input: scalar x
const GrB_Vector u, // second input: vector u
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GxB_Vector_apply_BinaryOp1st_FC64 // w<mask> = accum (w, op(x,u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
GxB_FC64_t x, // first input: scalar x
const GrB_Vector u, // second input: vector u
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp1st_UDT // w<mask> = accum (w, op(x,u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
const void *x, // first input: scalar x
const GrB_Vector u, // second input: vector u
const GrB_Descriptor desc // descriptor for w and mask
) ;
//-------------------------------------------
// vector apply: binaryop variants (bind 2nd)
//-------------------------------------------
// Apply a binary operator to the entries in a vector, binding the second
// input to a scalar y, w<mask> = accum (w, op (u,y)).
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp2nd_Scalar // w<mask> = accum (w, op(u,y))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
const GrB_Scalar y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
// historical: identical to GrB_Vector_apply_BinaryOp2nd_Scalar
GB_PUBLIC
GrB_Info GxB_Vector_apply_BinaryOp2nd // w<mask> = accum (w, op(u,y))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
const GrB_Scalar y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp2nd_BOOL // w<mask> = accum (w, op(u,y))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
bool y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp2nd_INT8 // w<mask> = accum (w, op(u,y))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
int8_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp2nd_INT16 // w<mask> = accum (w, op(u,y))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
int16_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp2nd_INT32 // w<mask> = accum (w, op(u,y))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
int32_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp2nd_INT64 // w<mask> = accum (w, op(u,y))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
int64_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp2nd_UINT8 // w<mask> = accum (w, op(u,y))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
uint8_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp2nd_UINT16 // w<mask> = accum (w, op(u,y))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
uint16_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp2nd_UINT32 // w<mask> = accum (w, op(u,y))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
uint32_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp2nd_UINT64 // w<mask> = accum (w, op(u,y))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
uint64_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp2nd_FP32 // w<mask> = accum (w, op(u,y))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
float y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp2nd_FP64 // w<mask> = accum (w, op(u,y))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
double y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GxB_Vector_apply_BinaryOp2nd_FC32 // w<mask> = accum (w, op(u,y))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
GxB_FC32_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GxB_Vector_apply_BinaryOp2nd_FC64 // w<mask> = accum (w, op(u,y))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
GxB_FC64_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp2nd_UDT // w<mask> = accum (w, op(u,y))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
const void *y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
//-------------------------------------------
// vector apply: IndexUnaryOp variants
//-------------------------------------------
// Apply a GrB_IndexUnaryOp to the entries in a vector
GB_PUBLIC
GrB_Info GrB_Vector_apply_IndexOp_Scalar // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
const GrB_Scalar y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_IndexOp_BOOL // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
bool y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_IndexOp_INT8 // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
int8_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_IndexOp_INT16 // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
int16_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_IndexOp_INT32 // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
int32_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_IndexOp_INT64 // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
int64_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_IndexOp_UINT8 // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
uint8_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_IndexOp_UINT16 // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
uint16_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_IndexOp_UINT32 // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
uint32_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_IndexOp_UINT64 // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
uint64_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_IndexOp_FP32 // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
float y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_IndexOp_FP64 // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
double y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GxB_Vector_apply_IndexOp_FC32 // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
GxB_FC32_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GxB_Vector_apply_IndexOp_FC64 // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
GxB_FC64_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_IndexOp_UDT // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
const void *y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
//-------------------------------------------
// matrix apply: binaryop variants (bind 1st)
//-------------------------------------------
// Apply a binary operator to the entries in a matrix, binding the first input
// to a scalar x, C<Mask> = accum (C, op (x,A)), or op(x,A').
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp1st_Scalar // C<M>=accum(C,op(x,A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Scalar x, // first input: scalar x
const GrB_Matrix A, // second input: matrix A
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
// historical: identical to GrB_Matrix_apply_BinaryOp1st_Scalar
GB_PUBLIC
GrB_Info GxB_Matrix_apply_BinaryOp1st // C<M>=accum(C,op(x,A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Scalar x, // first input: scalar x
const GrB_Matrix A, // second input: matrix A
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp1st_BOOL // C<M>=accum(C,op(x,A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
bool x, // first input: scalar x
const GrB_Matrix A, // second input: matrix A
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp1st_INT8 // C<M>=accum(C,op(x,A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
int8_t x, // first input: scalar x
const GrB_Matrix A, // second input: matrix A
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp1st_INT16 // C<M>=accum(C,op(x,A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
int16_t x, // first input: scalar x
const GrB_Matrix A, // second input: matrix A
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp1st_INT32 // C<M>=accum(C,op(x,A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
int32_t x, // first input: scalar x
const GrB_Matrix A, // second input: matrix A
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp1st_INT64 // C<M>=accum(C,op(x,A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
int64_t x, // first input: scalar x
const GrB_Matrix A, // second input: matrix A
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp1st_UINT8 // C<M>=accum(C,op(x,A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
uint8_t x, // first input: scalar x
const GrB_Matrix A, // second input: matrix A
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp1st_UINT16 // C<M>=accum(C,op(x,A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
uint16_t x, // first input: scalar x
const GrB_Matrix A, // second input: matrix A
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp1st_UINT32 // C<M>=accum(C,op(x,A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
uint32_t x, // first input: scalar x
const GrB_Matrix A, // second input: matrix A
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp1st_UINT64 // C<M>=accum(C,op(x,A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
uint64_t x, // first input: scalar x
const GrB_Matrix A, // second input: matrix A
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp1st_FP32 // C<M>=accum(C,op(x,A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
float x, // first input: scalar x
const GrB_Matrix A, // second input: matrix A
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp1st_FP64 // C<M>=accum(C,op(x,A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
double x, // first input: scalar x
const GrB_Matrix A, // second input: matrix A
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_apply_BinaryOp1st_FC32 // C<M>=accum(C,op(x,A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
GxB_FC32_t x, // first input: scalar x
const GrB_Matrix A, // second input: matrix A
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_apply_BinaryOp1st_FC64 // C<M>=accum(C,op(x,A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
GxB_FC64_t x, // first input: scalar x
const GrB_Matrix A, // second input: matrix A
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp1st_UDT // C<M>=accum(C,op(x,A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
const void *x, // first input: scalar x
const GrB_Matrix A, // second input: matrix A
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
//-------------------------------------------
// matrix apply: binaryop variants (bind 2nd)
//-------------------------------------------
// Apply a binary operator to the entries in a matrix, binding the second input
// to a scalar y, C<Mask> = accum (C, op (A,y)), or op(A',y).
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp2nd_Scalar // C<M>=accum(C,op(A,y))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
const GrB_Scalar y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
// historical: identical to GrB_Matrix_apply_BinaryOp2nd_Scalar
GB_PUBLIC
GrB_Info GxB_Matrix_apply_BinaryOp2nd // C<M>=accum(C,op(A,y))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
const GrB_Scalar y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp2nd_BOOL // C<M>=accum(C,op(A,y))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
bool y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp2nd_INT8 // C<M>=accum(C,op(A,y))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
int8_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp2nd_INT16 // C<M>=accum(C,op(A,y))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
int16_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp2nd_INT32 // C<M>=accum(C,op(A,y))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
int32_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp2nd_INT64 // C<M>=accum(C,op(A,y))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
int64_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp2nd_UINT8 // C<M>=accum(C,op(A,y))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
uint8_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp2nd_UINT16 // C<M>=accum(C,op(A,y))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
uint16_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp2nd_UINT32 // C<M>=accum(C,op(A,y))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
uint32_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp2nd_UINT64 // C<M>=accum(C,op(A,y))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
uint64_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp2nd_FP32 // C<M>=accum(C,op(A,y))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
float y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp2nd_FP64 // C<M>=accum(C,op(A,y))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
double y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_apply_BinaryOp2nd_FC32 // C<M>=accum(C,op(A,y))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
GxB_FC32_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_apply_BinaryOp2nd_FC64 // C<M>=accum(C,op(A,y))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
GxB_FC64_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp2nd_UDT // C<M>=accum(C,op(A,y))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
const void *y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
//-------------------------------------------
// matrix apply: IndexUnaryOp variants
//-------------------------------------------
// Apply a GrB_IndexUnaryOp to the entries in a matrix.
GB_PUBLIC
GrB_Info GrB_Matrix_apply_IndexOp_Scalar // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
const GrB_Scalar y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_IndexOp_BOOL // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
bool y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_IndexOp_INT8 // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
int8_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_IndexOp_INT16 // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
int16_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_IndexOp_INT32 // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
int32_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_IndexOp_INT64 // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
int64_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_IndexOp_UINT8 // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
uint8_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_IndexOp_UINT16 // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
uint16_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_IndexOp_UINT32 // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
uint32_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_IndexOp_UINT64 // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
uint64_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_IndexOp_FP32 // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
float y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_IndexOp_FP64 // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
double y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_apply_IndexOp_FC32 // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
GxB_FC32_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_apply_IndexOp_FC64 // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
GxB_FC64_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_IndexOp_UDT // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
const void *y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
//------------------------------------------------------------------------------
// GrB_apply: generic matrix/vector apply
//------------------------------------------------------------------------------
// GrB_apply is a generic function for applying a unary operator to a matrix
// or vector and provides access to these functions:
// GrB_Vector_apply (w,mask,acc,op,u,d) // w<mask> = accum (w, op(u))
// GrB_Matrix_apply (C,Mask,acc,op,A,d) // C<Mask> = accum (C, op(A))
// GrB_Vector_apply (w,m,acc,unop ,u,d)
// GrB_Vector_apply_BinaryOp1st_TYPE (w,m,acc,binop,x,u,d)
// GrB_Vector_apply_BinaryOp2nd_TYPE (w,m,acc,binop,u,y,d)
// GrB_Vector_apply_IndexOp_TYPE (w,m,acc,idxop,u,y,d)
// GrB_Matrix_apply (C,M,acc,unop ,A,d)
// GrB_Matrix_apply_BinaryOp1st_TYPE (C,M,acc,binop,x,A,d)
// GrB_Matrix_apply_BinaryOp2nd_TYPE (C,M,acc,binop,A,y,d)
// GrB_Matrix_apply_IndexOp_TYPE (C,M,acc,idxop,A,y,d)
#if GxB_STDC_VERSION >= 201112L
#define GB_BIND(kind,x,y,...) \
_Generic \
( \
(x), \
const GrB_Scalar: GB_CONCAT ( GrB,_,kind,_apply_BinaryOp1st_Scalar), \
GrB_Scalar: GB_CONCAT ( GrB,_,kind,_apply_BinaryOp1st_Scalar), \
GB_CASES (, GrB, GB_CONCAT ( kind, _apply_BinaryOp1st,, )) , \
default: \
_Generic \
( \
(y), \
GB_CASES (, GrB, GB_CONCAT ( kind , _apply_BinaryOp2nd,, )), \
default: GB_CONCAT ( GrB,_,kind,_apply_BinaryOp2nd_Scalar) \
) \
)
#define GB_IDXOP(kind,A,y,...) \
_Generic \
( \
(y), \
GB_CASES (, GrB, GB_CONCAT ( kind, _apply_IndexOp,, )), \
default: GB_CONCAT ( GrB, _, kind, _apply_IndexOp_Scalar) \
)
#define GrB_apply(C,Mask,accum,op,...) \
_Generic \
( \
(C), \
GrB_Vector : \
_Generic \
( \
(op), \
GrB_UnaryOp : GrB_Vector_apply , \
GrB_BinaryOp : GB_BIND (Vector, __VA_ARGS__), \
GrB_IndexUnaryOp : GB_IDXOP (Vector, __VA_ARGS__) \
), \
GrB_Matrix : \
_Generic \
( \
(op), \
GrB_UnaryOp : GrB_Matrix_apply , \
GrB_BinaryOp : GB_BIND (Matrix, __VA_ARGS__), \
GrB_IndexUnaryOp : GB_IDXOP (Matrix, __VA_ARGS__) \
) \
) \
(C, Mask, accum, op, __VA_ARGS__)
#endif
//==============================================================================
// GrB_select: matrix and vector selection using an IndexUnaryOp
//==============================================================================
//-------------------------------------------
// vector select using an IndexUnaryOp
//-------------------------------------------
GB_PUBLIC
GrB_Info GrB_Vector_select_Scalar // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
const GrB_Scalar y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_select_BOOL // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
bool y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_select_INT8 // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
int8_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_select_INT16 // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
int16_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_select_INT32 // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
int32_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_select_INT64 // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
int64_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_select_UINT8 // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
uint8_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_select_UINT16 // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
uint16_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_select_UINT32 // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
uint32_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_select_UINT64 // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
uint64_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_select_FP32 // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
float y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_select_FP64 // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
double y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GxB_Vector_select_FC32 // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
GxB_FC32_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GxB_Vector_select_FC64 // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
GxB_FC64_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_select_UDT // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
const void *y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
//-------------------------------------------
// matrix select using an IndexUnaryOp
//-------------------------------------------
GB_PUBLIC
GrB_Info GrB_Matrix_select_Scalar // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
const GrB_Scalar y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_select_BOOL // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
bool y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_select_INT8 // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
int8_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_select_INT16 // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
int16_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_select_INT32 // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
int32_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_select_INT64 // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
int64_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_select_UINT8 // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
uint8_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_select_UINT16 // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
uint16_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_select_UINT32 // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
uint32_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_select_UINT64 // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
uint64_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_select_FP32 // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
float y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_select_FP64 // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
double y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_select_FC32 // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
GxB_FC32_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_select_FC64 // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
GxB_FC64_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_select_UDT // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
const void *y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
// GrB_select is a generic method that applies an IndexUnaryOp to
// a matrix or vector, using any type of the scalar y.
// GrB_Vector_select_TYPE (w,m,acc,idxop,u,y,d)
// GrB_Matrix_select_TYPE (C,M,acc,idxop,A,y,d)
#if GxB_STDC_VERSION >= 201112L
#define GrB_select(C,Mask,accum,op,x,y,d) \
_Generic \
( \
(C), \
GrB_Vector : \
_Generic \
( \
(y), \
GB_CASES (, GrB, Vector_select), \
default: GrB_Vector_select_Scalar \
), \
GrB_Matrix : \
_Generic \
( \
(y), \
GB_CASES (, GrB, Matrix_select), \
default: GrB_Matrix_select_Scalar \
) \
) \
(C, Mask, accum, op, x, y, d)
#endif
//==============================================================================
// GxB_select: matrix and vector selection (historical)
//==============================================================================
// GrB_select and with the GrB_IndexUnaryOp operators should be used instead.
GB_PUBLIC
GrB_Info GxB_Vector_select // w<mask> = accum (w, op(u,k))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GxB_SelectOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
const GrB_Scalar Thunk, // optional input for the select operator
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_select // C<Mask> = accum (C, op(A,k)) or op(A',k)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GxB_SelectOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
const GrB_Scalar Thunk, // optional input for the select operator
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
#if GxB_STDC_VERSION >= 201112L
#define GxB_select(C,Mask,accum,op,A,Thunk,desc) \
_Generic \
( \
(C), \
GrB_Vector : GxB_Vector_select , \
GrB_Matrix : GxB_Matrix_select \
) \
(C, Mask, accum, op, A, Thunk, desc)
#endif
//==============================================================================
// GrB_reduce: matrix and vector reduction
//==============================================================================
// Reduce the entries in a matrix to a vector, a column vector t such that
// t(i) = sum (A (i,:)), and where "sum" is a commutative and associative
// monoid with an identity value. A can be transposed, which reduces down the
// columns instead of the rows.
// For GrB_Matrix_reduce_BinaryOp, the GrB_BinaryOp op must correspond to a
// known built-in monoid:
//
// operator data-types (all built-in)
// ---------------------- ---------------------------
// MIN, MAX INT*, UINT*, FP*
// TIMES, PLUS INT*, UINT*, FP*, FC*
// ANY INT*, UINT*, FP*, FC*, BOOL
// LOR, LAND, LXOR, EQ BOOL
// BOR, BAND, BXOR, BXNOR UINT*
GB_PUBLIC
GrB_Info GrB_Matrix_reduce_Monoid // w<mask> = accum (w,reduce(A))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_Monoid monoid, // reduce operator for t=reduce(A)
const GrB_Matrix A, // first input: matrix A
const GrB_Descriptor desc // descriptor for w, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_reduce_BinaryOp // w<mask> = accum (w,reduce(A))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // reduce operator for t=reduce(A)
const GrB_Matrix A, // first input: matrix A
const GrB_Descriptor desc // descriptor for w, mask, and A
) ;
//------------------------------------------------------------------------------
// reduce a vector to a scalar
//------------------------------------------------------------------------------
// Reduce entries in a vector to a scalar, c = accum (c, reduce_to_scalar(u))
GB_PUBLIC
GrB_Info GrB_Vector_reduce_BOOL // c = accum (c, reduce_to_scalar (u))
(
bool *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Vector u, // vector to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Vector_reduce_INT8 // c = accum (c, reduce_to_scalar (u))
(
int8_t *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Vector u, // vector to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Vector_reduce_UINT8 // c = accum (c, reduce_to_scalar (u))
(
uint8_t *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Vector u, // vector to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Vector_reduce_INT16 // c = accum (c, reduce_to_scalar (u))
(
int16_t *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Vector u, // vector to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Vector_reduce_UINT16 // c = accum (c, reduce_to_scalar (u))
(
uint16_t *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Vector u, // vector to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Vector_reduce_INT32 // c = accum (c, reduce_to_scalar (u))
(
int32_t *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Vector u, // vector to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Vector_reduce_UINT32 // c = accum (c, reduce_to_scalar (u))
(
uint32_t *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Vector u, // vector to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Vector_reduce_INT64 // c = accum (c, reduce_to_scalar (u))
(
int64_t *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Vector u, // vector to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Vector_reduce_UINT64 // c = accum (c, reduce_to_scalar (u))
(
uint64_t *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Vector u, // vector to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Vector_reduce_FP32 // c = accum (c, reduce_to_scalar (u))
(
float *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Vector u, // vector to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Vector_reduce_FP64 // c = accum (c, reduce_to_scalar (u))
(
double *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Vector u, // vector to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Vector_reduce_FC32 // c = accum (c, reduce_to_scalar (u))
(
GxB_FC32_t *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Vector u, // vector to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Vector_reduce_FC64 // c = accum (c, reduce_to_scalar (u))
(
GxB_FC64_t *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Vector u, // vector to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Vector_reduce_UDT // c = accum (c, reduce_to_scalar (u))
(
void *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Vector u, // vector to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Vector_reduce_Monoid_Scalar // c = accum(c,reduce_to_scalar(u))
(
GrB_Scalar c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Vector u, // vector to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Vector_reduce_BinaryOp_Scalar
(
GrB_Scalar c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_BinaryOp op, // binary op to do the reduction
const GrB_Vector u, // vector to reduce
const GrB_Descriptor desc
) ;
//------------------------------------------------------------------------------
// reduce a matrix to a scalar
//------------------------------------------------------------------------------
// Reduce entries in a matrix to a scalar, c = accum (c, reduce_to_scalar(A))
GB_PUBLIC
GrB_Info GrB_Matrix_reduce_BOOL // c = accum (c, reduce_to_scalar (A))
(
bool *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Matrix A, // matrix to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_reduce_INT8 // c = accum (c, reduce_to_scalar (A))
(
int8_t *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Matrix A, // matrix to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_reduce_UINT8 // c = accum (c, reduce_to_scalar (A))
(
uint8_t *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Matrix A, // matrix to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_reduce_INT16 // c = accum (c, reduce_to_scalar (A))
(
int16_t *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Matrix A, // matrix to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_reduce_UINT16 // c = accum (c, reduce_to_scalar (A))
(
uint16_t *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Matrix A, // matrix to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_reduce_INT32 // c = accum (c, reduce_to_scalar (A))
(
int32_t *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Matrix A, // matrix to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_reduce_UINT32 // c = accum (c, reduce_to_scalar (A))
(
uint32_t *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Matrix A, // matrix to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_reduce_INT64 // c = accum (c, reduce_to_scalar (A))
(
int64_t *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Matrix A, // matrix to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_reduce_UINT64 // c = accum (c, reduce_to_scalar (A))
(
uint64_t *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Matrix A, // matrix to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_reduce_FP32 // c = accum (c, reduce_to_scalar (A))
(
float *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Matrix A, // matrix to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_reduce_FP64 // c = accum (c, reduce_to_scalar (A))
(
double *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Matrix A, // matrix to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_reduce_FC32 // c = accum (c, reduce_to_scalar (A))
(
GxB_FC32_t *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Matrix A, // matrix to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_reduce_FC64 // c = accum (c, reduce_to_scalar (A))
(
GxB_FC64_t *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Matrix A, // matrix to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_reduce_UDT // c = accum (c, reduce_to_scalar (A))
(
void *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Matrix A, // matrix to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_reduce_Monoid_Scalar // c = accum(c,reduce_to_scalar(A))
(
GrB_Scalar c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Matrix A, // matrix to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_reduce_BinaryOp_Scalar
(
GrB_Scalar S, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_BinaryOp op, // binary op to do the reduction
const GrB_Matrix A, // matrix to reduce
const GrB_Descriptor desc
) ;
//------------------------------------------------------------------------------
// GrB_reduce: generic matrix/vector reduction to a vector or scalar
//------------------------------------------------------------------------------
// GrB_reduce is a generic function that provides access to all GrB_*reduce*
// functions:
// reduce matrix to vector:
// GrB_Matrix_reduce_Monoid (w,mask,acc,mo,A,d) // w<mask> = acc (w,reduce(A))
// GrB_Matrix_reduce_BinaryOp (w,mask,acc,op,A,d) // w<mask> = acc (w,reduce(A))
// reduce matrix to scalar:
// GrB_Vector_reduce_[SCALAR] (c,acc,monoid,u,d) // c = acc (c,reduce(u))
// GrB_Matrix_reduce_[SCALAR] (c,acc,monoid,A,d) // c = acc (c,reduce(A))
// GrB_Vector_reduce_Monoid_Scalar (s,acc,monoid,u,d) // s = acc (s,reduce(u))
// GrB_Matrix_reduce_Monoid_Scalar (s,acc,monoid,A,d) // s = acc (s,reduce(A))
// GrB_Vector_reduce_BinaryOp_Scalar (s,acc,op,u,d) // s = acc (s,reduce(u))
// GrB_Matrix_reduce_BinaryOp_Scalar (s,acc,op,A,d) // s = acc (s,reduce(A))
#if GxB_STDC_VERSION >= 201112L
#define GB_REDUCE_TO_SCALAR(kind,c,op) \
_Generic \
( \
(c), \
GB_CASES (*, GrB, GB_CONCAT ( kind, _reduce,, )), \
default: \
_Generic \
( \
(op), \
const GrB_BinaryOp : \
GB_CONCAT (GrB,_,kind,_reduce_BinaryOp_Scalar),\
GrB_BinaryOp : \
GB_CONCAT (GrB,_,kind,_reduce_BinaryOp_Scalar),\
default: GB_CONCAT (GrB,_,kind,_reduce_Monoid_Scalar) \
) \
)
#define GrB_reduce(arg1,arg2,arg3,arg4,...) \
_Generic \
( \
(arg4), \
const GrB_Vector : GB_REDUCE_TO_SCALAR (Vector, arg1, arg3), \
GrB_Vector : GB_REDUCE_TO_SCALAR (Vector, arg1, arg3), \
const GrB_Matrix : GB_REDUCE_TO_SCALAR (Matrix, arg1, arg3), \
GrB_Matrix : GB_REDUCE_TO_SCALAR (Matrix, arg1, arg3), \
const GrB_Monoid : GrB_Matrix_reduce_Monoid , \
GrB_Monoid : GrB_Matrix_reduce_Monoid , \
const GrB_BinaryOp : GrB_Matrix_reduce_BinaryOp , \
GrB_BinaryOp : GrB_Matrix_reduce_BinaryOp \
) \
(arg1, arg2, arg3, arg4, __VA_ARGS__)
#endif
//==============================================================================
// GrB_transpose: matrix transpose
//==============================================================================
GB_PUBLIC
GrB_Info GrB_transpose // C<Mask> = accum (C, A')
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_Matrix A, // first input: matrix A
const GrB_Descriptor desc // descriptor for C, Mask, and A
) ;
//==============================================================================
// GrB_kronecker: Kronecker product
//==============================================================================
// GxB_kron is historical; use GrB_kronecker instead
GB_PUBLIC
GrB_Info GxB_kron // C<Mask> = accum(C,kron(A,B)) (historical)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // defines '*' for T=kron(A,B)
const GrB_Matrix A, // first input: matrix A
const GrB_Matrix B, // second input: matrix B
const GrB_Descriptor desc // descriptor for C, Mask, A, and B
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_kronecker_BinaryOp // C<M> = accum (C, kron(A,B))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix M, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // defines '*' for T=kron(A,B)
const GrB_Matrix A, // first input: matrix A
const GrB_Matrix B, // second input: matrix B
const GrB_Descriptor desc // descriptor for C, M, A, and B
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_kronecker_Monoid // C<M> = accum (C, kron(A,B))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix M, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_Monoid monoid, // defines '*' for T=kron(A,B)
const GrB_Matrix A, // first input: matrix A
const GrB_Matrix B, // second input: matrix B
const GrB_Descriptor desc // descriptor for C, M, A, and B
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_kronecker_Semiring // C<M> = accum (C, kron(A,B))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix M, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_Semiring semiring, // defines '*' for T=kron(A,B)
const GrB_Matrix A, // first input: matrix A
const GrB_Matrix B, // second input: matrix B
const GrB_Descriptor desc // descriptor for C, M, A, and B
) ;
#if GxB_STDC_VERSION >= 201112L
#define GrB_kronecker(C,Mask,accum,op,A,B,desc) \
_Generic \
( \
(op), \
const GrB_Semiring : GrB_Matrix_kronecker_Semiring , \
GrB_Semiring : GrB_Matrix_kronecker_Semiring , \
const GrB_Monoid : GrB_Matrix_kronecker_Monoid , \
GrB_Monoid : GrB_Matrix_kronecker_Monoid , \
const GrB_BinaryOp : GrB_Matrix_kronecker_BinaryOp , \
GrB_BinaryOp : GrB_Matrix_kronecker_BinaryOp \
) \
(C, Mask, accum, op, A, B, desc)
#endif
//==============================================================================
// GrB_Monoid: built-in monoids
//==============================================================================
GB_PUBLIC GrB_Monoid
//--------------------------------------------------------------------------
// 10 MIN monoids: (not for complex types)
//--------------------------------------------------------------------------
// GxB_MIN monoids, historical, use GrB_MIN_MONOID_* instead:
GxB_MIN_INT8_MONOID, // identity: INT8_MAX terminal: INT8_MIN
GxB_MIN_INT16_MONOID, // identity: INT16_MAX terminal: INT16_MIN
GxB_MIN_INT32_MONOID, // identity: INT32_MAX terminal: INT32_MIN
GxB_MIN_INT64_MONOID, // identity: INT64_MAX terminal: INT32_MIN
GxB_MIN_UINT8_MONOID, // identity: UINT8_MAX terminal: 0
GxB_MIN_UINT16_MONOID, // identity: UINT16_MAX terminal: 0
GxB_MIN_UINT32_MONOID, // identity: UINT32_MAX terminal: 0
GxB_MIN_UINT64_MONOID, // identity: UINT64_MAX terminal: 0
GxB_MIN_FP32_MONOID, // identity: INFINITY terminal: -INFINITY
GxB_MIN_FP64_MONOID, // identity: INFINITY terminal: -INFINITY
// preferred names from the v1.3 spec:
GrB_MIN_MONOID_INT8, // identity: INT8_MAX terminal: INT8_MIN
GrB_MIN_MONOID_INT16, // identity: INT16_MAX terminal: INT16_MIN
GrB_MIN_MONOID_INT32, // identity: INT32_MAX terminal: INT32_MIN
GrB_MIN_MONOID_INT64, // identity: INT64_MAX terminal: INT32_MIN
GrB_MIN_MONOID_UINT8, // identity: UINT8_MAX terminal: 0
GrB_MIN_MONOID_UINT16, // identity: UINT16_MAX terminal: 0
GrB_MIN_MONOID_UINT32, // identity: UINT32_MAX terminal: 0
GrB_MIN_MONOID_UINT64, // identity: UINT64_MAX terminal: 0
GrB_MIN_MONOID_FP32, // identity: INFINITY terminal: -INFINITY
GrB_MIN_MONOID_FP64, // identity: INFINITY terminal: -INFINITY
//--------------------------------------------------------------------------
// 10 MAX monoids:
//--------------------------------------------------------------------------
// GxB_MAX monoids, historical, use GrB_MAX_MONOID_* instead:
GxB_MAX_INT8_MONOID, // identity: INT8_MIN terminal: INT8_MAX
GxB_MAX_INT16_MONOID, // identity: INT16_MIN terminal: INT16_MAX
GxB_MAX_INT32_MONOID, // identity: INT32_MIN terminal: INT32_MAX
GxB_MAX_INT64_MONOID, // identity: INT64_MIN terminal: INT64_MAX
GxB_MAX_UINT8_MONOID, // identity: 0 terminal: UINT8_MAX
GxB_MAX_UINT16_MONOID, // identity: 0 terminal: UINT16_MAX
GxB_MAX_UINT32_MONOID, // identity: 0 terminal: UINT32_MAX
GxB_MAX_UINT64_MONOID, // identity: 0 terminal: UINT64_MAX
GxB_MAX_FP32_MONOID, // identity: -INFINITY terminal: INFINITY
GxB_MAX_FP64_MONOID, // identity: -INFINITY terminal: INFINITY
// preferred names from the v1.3 spec:
GrB_MAX_MONOID_INT8, // identity: INT8_MIN terminal: INT8_MAX
GrB_MAX_MONOID_INT16, // identity: INT16_MIN terminal: INT16_MAX
GrB_MAX_MONOID_INT32, // identity: INT32_MIN terminal: INT32_MAX
GrB_MAX_MONOID_INT64, // identity: INT64_MIN terminal: INT64_MAX
GrB_MAX_MONOID_UINT8, // identity: 0 terminal: UINT8_MAX
GrB_MAX_MONOID_UINT16, // identity: 0 terminal: UINT16_MAX
GrB_MAX_MONOID_UINT32, // identity: 0 terminal: UINT32_MAX
GrB_MAX_MONOID_UINT64, // identity: 0 terminal: UINT64_MAX
GrB_MAX_MONOID_FP32, // identity: -INFINITY terminal: INFINITY
GrB_MAX_MONOID_FP64, // identity: -INFINITY terminal: INFINITY
//--------------------------------------------------------------------------
// 12 PLUS monoids:
//--------------------------------------------------------------------------
// GxB_PLUS monoids, historical, use GrB_PLUS_MONOID_* instead:
GxB_PLUS_INT8_MONOID, // identity: 0
GxB_PLUS_INT16_MONOID, // identity: 0
GxB_PLUS_INT32_MONOID, // identity: 0
GxB_PLUS_INT64_MONOID, // identity: 0
GxB_PLUS_UINT8_MONOID, // identity: 0
GxB_PLUS_UINT16_MONOID, // identity: 0
GxB_PLUS_UINT32_MONOID, // identity: 0
GxB_PLUS_UINT64_MONOID, // identity: 0
GxB_PLUS_FP32_MONOID, // identity: 0
GxB_PLUS_FP64_MONOID, // identity: 0
// preferred names from the v1.3 spec:
GrB_PLUS_MONOID_INT8, // identity: 0
GrB_PLUS_MONOID_INT16, // identity: 0
GrB_PLUS_MONOID_INT32, // identity: 0
GrB_PLUS_MONOID_INT64, // identity: 0
GrB_PLUS_MONOID_UINT8, // identity: 0
GrB_PLUS_MONOID_UINT16, // identity: 0
GrB_PLUS_MONOID_UINT32, // identity: 0
GrB_PLUS_MONOID_UINT64, // identity: 0
GrB_PLUS_MONOID_FP32, // identity: 0
GrB_PLUS_MONOID_FP64, // identity: 0
// complex monoids:
GxB_PLUS_FC32_MONOID, // identity: 0
GxB_PLUS_FC64_MONOID, // identity: 0
//--------------------------------------------------------------------------
// 12 TIMES monoids: identity value is 1, int* and uint* are terminal
//--------------------------------------------------------------------------
// GxB_TIMES monoids, historical, use GrB_TIMES_MONOID_* instead:
GxB_TIMES_INT8_MONOID, // identity: 1 terminal: 0
GxB_TIMES_INT16_MONOID, // identity: 1 terminal: 0
GxB_TIMES_INT32_MONOID, // identity: 1 terminal: 0
GxB_TIMES_INT64_MONOID, // identity: 1 terminal: 0
GxB_TIMES_UINT8_MONOID, // identity: 1 terminal: 0
GxB_TIMES_UINT16_MONOID, // identity: 1 terminal: 0
GxB_TIMES_UINT32_MONOID, // identity: 1 terminal: 0
GxB_TIMES_UINT64_MONOID, // identity: 1 terminal: 0
GxB_TIMES_FP32_MONOID, // identity: 1
GxB_TIMES_FP64_MONOID, // identity: 1
// preferred names from the v1.3 spec:
GrB_TIMES_MONOID_INT8, // identity: 1 terminal: 0
GrB_TIMES_MONOID_INT16, // identity: 1 terminal: 0
GrB_TIMES_MONOID_INT32, // identity: 1 terminal: 0
GrB_TIMES_MONOID_INT64, // identity: 1 terminal: 0
GrB_TIMES_MONOID_UINT8, // identity: 1 terminal: 0
GrB_TIMES_MONOID_UINT16, // identity: 1 terminal: 0
GrB_TIMES_MONOID_UINT32, // identity: 1 terminal: 0
GrB_TIMES_MONOID_UINT64, // identity: 1 terminal: 0
GrB_TIMES_MONOID_FP32, // identity: 1
GrB_TIMES_MONOID_FP64, // identity: 1
// complex monoids:
GxB_TIMES_FC32_MONOID, // identity: 1
GxB_TIMES_FC64_MONOID, // identity: 1
//--------------------------------------------------------------------------
// 13 ANY monoids:
//--------------------------------------------------------------------------
GxB_ANY_BOOL_MONOID, // identity: any value terminal: any value
GxB_ANY_INT8_MONOID, // identity: any value terminal: any value
GxB_ANY_INT16_MONOID, // identity: any value terminal: any value
GxB_ANY_INT32_MONOID, // identity: any value terminal: any value
GxB_ANY_INT64_MONOID, // identity: any value terminal: any value
GxB_ANY_UINT8_MONOID, // identity: any value terminal: any value
GxB_ANY_UINT16_MONOID, // identity: any value terminal: any value
GxB_ANY_UINT32_MONOID, // identity: any value terminal: any value
GxB_ANY_UINT64_MONOID, // identity: any value terminal: any value
GxB_ANY_FP32_MONOID, // identity: any value terminal: any value
GxB_ANY_FP64_MONOID, // identity: any value terminal: any value
GxB_ANY_FC32_MONOID, // identity: any value terminal: any value
GxB_ANY_FC64_MONOID, // identity: any value terminal: any value
//--------------------------------------------------------------------------
// 4 Boolean monoids: (see also the GxB_ANY_BOOL_MONOID above)
//--------------------------------------------------------------------------
// GxB_* boolean monoids, historical, use GrB_* instead:
GxB_LOR_BOOL_MONOID, // identity: false terminal: true
GxB_LAND_BOOL_MONOID, // identity: true terminal: false
GxB_LXOR_BOOL_MONOID, // identity: false
GxB_LXNOR_BOOL_MONOID, // identity: true
GxB_EQ_BOOL_MONOID, // (alternative name for GrB_LXNOR_MONOID_BOOL)
// preferred names from the v1.3 spec:
GrB_LOR_MONOID_BOOL, // identity: false terminal: true
GrB_LAND_MONOID_BOOL, // identity: true terminal: false
GrB_LXOR_MONOID_BOOL, // identity: false
GrB_LXNOR_MONOID_BOOL, // identity: true
//--------------------------------------------------------------------------
// 16 Bitwise-or monoids:
//--------------------------------------------------------------------------
// BOR monoids (bitwise or):
GxB_BOR_UINT8_MONOID, // identity: 0 terminal: 0xFF
GxB_BOR_UINT16_MONOID, // identity: 0 terminal: 0xFFFF
GxB_BOR_UINT32_MONOID, // identity: 0 terminal: 0xFFFFFFFF
GxB_BOR_UINT64_MONOID, // identity: 0 terminal: 0xFFFFFFFFFFFFFFFF
// BAND monoids (bitwise and):
GxB_BAND_UINT8_MONOID, // identity: 0xFF terminal: 0
GxB_BAND_UINT16_MONOID, // identity: 0xFFFF terminal: 0
GxB_BAND_UINT32_MONOID, // identity: 0xFFFFFFFF terminal: 0
GxB_BAND_UINT64_MONOID, // identity: 0xFFFFFFFFFFFFFFFF terminal: 0
// BXOR monoids (bitwise xor):
GxB_BXOR_UINT8_MONOID, // identity: 0
GxB_BXOR_UINT16_MONOID, // identity: 0
GxB_BXOR_UINT32_MONOID, // identity: 0
GxB_BXOR_UINT64_MONOID, // identity: 0
// BXNOR monoids (bitwise xnor):
GxB_BXNOR_UINT8_MONOID, // identity: 0xFF
GxB_BXNOR_UINT16_MONOID, // identity: 0xFFFF
GxB_BXNOR_UINT32_MONOID, // identity: 0xFFFFFFFF
GxB_BXNOR_UINT64_MONOID ; // identity: 0xFFFFFFFFFFFFFFFF
//==============================================================================
// GrB_Semiring: built-in semirings
//==============================================================================
// Using built-in types and operators, SuiteSparse:GraphBLAS provides
// 1553 pre-defined, built-in semirings:
// 1000 semirings with a multiply operator TxT -> T where T is non-Boolean,
// from the complete cross product of:
// 5 monoids: MIN, MAX, PLUS, TIMES, ANY
// 20 multiply operators:
// FIRST, SECOND, PAIR (=ONEB), MIN, MAX, PLUS, MINUS, TIMES, DIV,
// RDIV, RMINUS
// ISEQ, ISNE, ISGT, ISLT, ISGE, ISLE,
// LOR, LAND, LXOR
// 10 non-Boolean real types, T
//
// Note that min_pair, max_pair, times_pair are all identical to any_pair.
// These 30 semirings are named below, but are internally remapped to
// their corresponding any_pair semiring.
// 300 semirings with a comparator TxT -> bool, where T is
// non-Boolean, from the complete cross product of:
// 5 Boolean monoids: LAND, LOR, LXOR, EQ (=LXNOR), ANY
// 6 multiply operators: EQ, NE, GT, LT, GE, LE
// 10 non-Boolean real types, T
// 55 semirings with purely Boolean types, bool x bool -> bool, from the
// complete cross product of:
// 5 Boolean monoids LAND, LOR, LXOR, EQ (=LXNOR), ANY
// 11 multiply operators:
// FIRST, SECOND, LOR, LAND, LXOR, EQ (=LXNOR), GT, LT, GE, LE,
// PAIR (=ONEB)
//
// Note that lor_pair, land_pair, and eq_pair are all identical to
// any_pair. These 3 semirings are named below, but are internally
// remapped to any_pair_bool semiring.
// 54 complex semirings: TxT -> T where T is float complex or double complex:
// 3 complex monoids: PLUS, TIMES, ANY
// 9 complex multiply operators:
// FIRST, SECOND, PAIR (=ONEB), PLUS, MINUS, TIMES, DIV, RDIV, RMINUS
// 2 complex types
//
// Note that times_pair is identical to any_pair.
// These 2 semirings are named below, but are internally remapped to
// their corresponding any_pair semiring.
// 64 bitwise semirings: TxT -> T where T is an unsigned integer:
// 4 bitwise monoids: BOR, BAND, BXOR, BXNOR
// 4 bitwise multiply operators: BOR, BAND, BXOR, BXNOR
// 4 unsigned integer types: UINT8, UINT16, UINT32, UINT64
// 80 positional semirings: XxX -> T where T is int64 or int32, and the type of
// X is ignored:
// 5 monoids: MIN, MAX, PLUS, TIMES, ANY
// 8 multiply operators:
// FIRSTI, FIRSTI1, FIRSTJ, FIRSTJ1,
// SECONDI, SECONDI1, SECONDJ, SECONDJ1
// 2 types: int32, int64
// The ANY operator is also valid to use as a multiplicative operator in a
// semiring, but serves no purpose in that case. The ANY operator is meant as
// a fast additive operator for a monoid, that terminates, or short-circuits,
// as soon as any value is found. A valid user semiring can be constructed
// with ANY as the multiply operator, but they are not predefined below.
// Likewise, additional built-in operators can be used as multiplicative
// operators for floating-point semirings (POW, ATAN2, HYPOT, ...) and many
// more semirings can be constructed from bitwise monoids and many integer
// binary (non-bitwise) multiplicative operators, but these are not
// pre-defined.
// In the names below, each semiring has a name of the form GxB_add_mult_T
// where add is the additive monoid, mult is the multiply operator, and T is
// the type. The type T is always the type of x and y for the z=mult(x,y)
// operator. The monoid's three types and the ztype of the mult operator are
// always the same. This is the type T for the first set, and Boolean for
// the second and third sets of semirngs.
// 1553 = 1000 + 300 + 55 + 54 + 64 + 80 semirings are named below, but 35 = 30
// + 3 + 2 are identical to the corresponding any_pair semirings of the same
// type. For positional semirings, the mulitiply ops FIRSTJ and SECONDI are
// identical, as are FIRSTJ1 and SECONDI1. These semirings still appear as
// predefined, for convenience.
GB_PUBLIC GrB_Semiring
//------------------------------------------------------------------------------
// 1000 non-Boolean semirings where all types are the same, given by suffix _T
//------------------------------------------------------------------------------
// semirings with multiply op: z = FIRST (x,y), all types x,y,z the same:
GxB_MIN_FIRST_INT8 , GxB_MAX_FIRST_INT8 , GxB_PLUS_FIRST_INT8 , GxB_TIMES_FIRST_INT8 , GxB_ANY_FIRST_INT8 ,
GxB_MIN_FIRST_INT16 , GxB_MAX_FIRST_INT16 , GxB_PLUS_FIRST_INT16 , GxB_TIMES_FIRST_INT16 , GxB_ANY_FIRST_INT16 ,
GxB_MIN_FIRST_INT32 , GxB_MAX_FIRST_INT32 , GxB_PLUS_FIRST_INT32 , GxB_TIMES_FIRST_INT32 , GxB_ANY_FIRST_INT32 ,
GxB_MIN_FIRST_INT64 , GxB_MAX_FIRST_INT64 , GxB_PLUS_FIRST_INT64 , GxB_TIMES_FIRST_INT64 , GxB_ANY_FIRST_INT64 ,
GxB_MIN_FIRST_UINT8 , GxB_MAX_FIRST_UINT8 , GxB_PLUS_FIRST_UINT8 , GxB_TIMES_FIRST_UINT8 , GxB_ANY_FIRST_UINT8 ,
GxB_MIN_FIRST_UINT16 , GxB_MAX_FIRST_UINT16 , GxB_PLUS_FIRST_UINT16 , GxB_TIMES_FIRST_UINT16 , GxB_ANY_FIRST_UINT16 ,
GxB_MIN_FIRST_UINT32 , GxB_MAX_FIRST_UINT32 , GxB_PLUS_FIRST_UINT32 , GxB_TIMES_FIRST_UINT32 , GxB_ANY_FIRST_UINT32 ,
GxB_MIN_FIRST_UINT64 , GxB_MAX_FIRST_UINT64 , GxB_PLUS_FIRST_UINT64 , GxB_TIMES_FIRST_UINT64 , GxB_ANY_FIRST_UINT64 ,
GxB_MIN_FIRST_FP32 , GxB_MAX_FIRST_FP32 , GxB_PLUS_FIRST_FP32 , GxB_TIMES_FIRST_FP32 , GxB_ANY_FIRST_FP32 ,
GxB_MIN_FIRST_FP64 , GxB_MAX_FIRST_FP64 , GxB_PLUS_FIRST_FP64 , GxB_TIMES_FIRST_FP64 , GxB_ANY_FIRST_FP64 ,
// semirings with multiply op: z = SECOND (x,y), all types x,y,z the same:
GxB_MIN_SECOND_INT8 , GxB_MAX_SECOND_INT8 , GxB_PLUS_SECOND_INT8 , GxB_TIMES_SECOND_INT8 , GxB_ANY_SECOND_INT8 ,
GxB_MIN_SECOND_INT16 , GxB_MAX_SECOND_INT16 , GxB_PLUS_SECOND_INT16 , GxB_TIMES_SECOND_INT16 , GxB_ANY_SECOND_INT16 ,
GxB_MIN_SECOND_INT32 , GxB_MAX_SECOND_INT32 , GxB_PLUS_SECOND_INT32 , GxB_TIMES_SECOND_INT32 , GxB_ANY_SECOND_INT32 ,
GxB_MIN_SECOND_INT64 , GxB_MAX_SECOND_INT64 , GxB_PLUS_SECOND_INT64 , GxB_TIMES_SECOND_INT64 , GxB_ANY_SECOND_INT64 ,
GxB_MIN_SECOND_UINT8 , GxB_MAX_SECOND_UINT8 , GxB_PLUS_SECOND_UINT8 , GxB_TIMES_SECOND_UINT8 , GxB_ANY_SECOND_UINT8 ,
GxB_MIN_SECOND_UINT16 , GxB_MAX_SECOND_UINT16 , GxB_PLUS_SECOND_UINT16 , GxB_TIMES_SECOND_UINT16, GxB_ANY_SECOND_UINT16 ,
GxB_MIN_SECOND_UINT32 , GxB_MAX_SECOND_UINT32 , GxB_PLUS_SECOND_UINT32 , GxB_TIMES_SECOND_UINT32, GxB_ANY_SECOND_UINT32 ,
GxB_MIN_SECOND_UINT64 , GxB_MAX_SECOND_UINT64 , GxB_PLUS_SECOND_UINT64 , GxB_TIMES_SECOND_UINT64, GxB_ANY_SECOND_UINT64 ,
GxB_MIN_SECOND_FP32 , GxB_MAX_SECOND_FP32 , GxB_PLUS_SECOND_FP32 , GxB_TIMES_SECOND_FP32 , GxB_ANY_SECOND_FP32 ,
GxB_MIN_SECOND_FP64 , GxB_MAX_SECOND_FP64 , GxB_PLUS_SECOND_FP64 , GxB_TIMES_SECOND_FP64 , GxB_ANY_SECOND_FP64 ,
// semirings with multiply op: z = PAIR (x,y), all types x,y,z the same:
// (note that min_pair, max_pair, times_pair are all identical to any_pair, and are marked below)
GxB_MIN_PAIR_INT8 /**/, GxB_MAX_PAIR_INT8 /**/, GxB_PLUS_PAIR_INT8 , GxB_TIMES_PAIR_INT8 /**/, GxB_ANY_PAIR_INT8 ,
GxB_MIN_PAIR_INT16 /**/, GxB_MAX_PAIR_INT16 /**/, GxB_PLUS_PAIR_INT16 , GxB_TIMES_PAIR_INT16 /**/, GxB_ANY_PAIR_INT16 ,
GxB_MIN_PAIR_INT32 /**/, GxB_MAX_PAIR_INT32 /**/, GxB_PLUS_PAIR_INT32 , GxB_TIMES_PAIR_INT32 /**/, GxB_ANY_PAIR_INT32 ,
GxB_MIN_PAIR_INT64 /**/, GxB_MAX_PAIR_INT64 /**/, GxB_PLUS_PAIR_INT64 , GxB_TIMES_PAIR_INT64 /**/, GxB_ANY_PAIR_INT64 ,
GxB_MIN_PAIR_UINT8 /**/, GxB_MAX_PAIR_UINT8 /**/, GxB_PLUS_PAIR_UINT8 , GxB_TIMES_PAIR_UINT8 /**/, GxB_ANY_PAIR_UINT8 ,
GxB_MIN_PAIR_UINT16/**/, GxB_MAX_PAIR_UINT16/**/, GxB_PLUS_PAIR_UINT16 , GxB_TIMES_PAIR_UINT16/**/, GxB_ANY_PAIR_UINT16 ,
GxB_MIN_PAIR_UINT32/**/, GxB_MAX_PAIR_UINT32/**/, GxB_PLUS_PAIR_UINT32 , GxB_TIMES_PAIR_UINT32/**/, GxB_ANY_PAIR_UINT32 ,
GxB_MIN_PAIR_UINT64/**/, GxB_MAX_PAIR_UINT64/**/, GxB_PLUS_PAIR_UINT64 , GxB_TIMES_PAIR_UINT64/**/, GxB_ANY_PAIR_UINT64 ,
GxB_MIN_PAIR_FP32 /**/, GxB_MAX_PAIR_FP32 /**/, GxB_PLUS_PAIR_FP32 , GxB_TIMES_PAIR_FP32 /**/, GxB_ANY_PAIR_FP32 ,
GxB_MIN_PAIR_FP64 /**/, GxB_MAX_PAIR_FP64 /**/, GxB_PLUS_PAIR_FP64 , GxB_TIMES_PAIR_FP64 /**/, GxB_ANY_PAIR_FP64 ,
// semirings with multiply op: z = MIN (x,y), all types x,y,z the same:
GxB_MIN_MIN_INT8 , GxB_MAX_MIN_INT8 , GxB_PLUS_MIN_INT8 , GxB_TIMES_MIN_INT8 , GxB_ANY_MIN_INT8 ,
GxB_MIN_MIN_INT16 , GxB_MAX_MIN_INT16 , GxB_PLUS_MIN_INT16 , GxB_TIMES_MIN_INT16 , GxB_ANY_MIN_INT16 ,
GxB_MIN_MIN_INT32 , GxB_MAX_MIN_INT32 , GxB_PLUS_MIN_INT32 , GxB_TIMES_MIN_INT32 , GxB_ANY_MIN_INT32 ,
GxB_MIN_MIN_INT64 , GxB_MAX_MIN_INT64 , GxB_PLUS_MIN_INT64 , GxB_TIMES_MIN_INT64 , GxB_ANY_MIN_INT64 ,
GxB_MIN_MIN_UINT8 , GxB_MAX_MIN_UINT8 , GxB_PLUS_MIN_UINT8 , GxB_TIMES_MIN_UINT8 , GxB_ANY_MIN_UINT8 ,
GxB_MIN_MIN_UINT16 , GxB_MAX_MIN_UINT16 , GxB_PLUS_MIN_UINT16 , GxB_TIMES_MIN_UINT16 , GxB_ANY_MIN_UINT16 ,
GxB_MIN_MIN_UINT32 , GxB_MAX_MIN_UINT32 , GxB_PLUS_MIN_UINT32 , GxB_TIMES_MIN_UINT32 , GxB_ANY_MIN_UINT32 ,
GxB_MIN_MIN_UINT64 , GxB_MAX_MIN_UINT64 , GxB_PLUS_MIN_UINT64 , GxB_TIMES_MIN_UINT64 , GxB_ANY_MIN_UINT64 ,
GxB_MIN_MIN_FP32 , GxB_MAX_MIN_FP32 , GxB_PLUS_MIN_FP32 , GxB_TIMES_MIN_FP32 , GxB_ANY_MIN_FP32 ,
GxB_MIN_MIN_FP64 , GxB_MAX_MIN_FP64 , GxB_PLUS_MIN_FP64 , GxB_TIMES_MIN_FP64 , GxB_ANY_MIN_FP64 ,
// semirings with multiply op: z = MAX (x,y), all types x,y,z the same:
GxB_MIN_MAX_INT8 , GxB_MAX_MAX_INT8 , GxB_PLUS_MAX_INT8 , GxB_TIMES_MAX_INT8 , GxB_ANY_MAX_INT8 ,
GxB_MIN_MAX_INT16 , GxB_MAX_MAX_INT16 , GxB_PLUS_MAX_INT16 , GxB_TIMES_MAX_INT16 , GxB_ANY_MAX_INT16 ,
GxB_MIN_MAX_INT32 , GxB_MAX_MAX_INT32 , GxB_PLUS_MAX_INT32 , GxB_TIMES_MAX_INT32 , GxB_ANY_MAX_INT32 ,
GxB_MIN_MAX_INT64 , GxB_MAX_MAX_INT64 , GxB_PLUS_MAX_INT64 , GxB_TIMES_MAX_INT64 , GxB_ANY_MAX_INT64 ,
GxB_MIN_MAX_UINT8 , GxB_MAX_MAX_UINT8 , GxB_PLUS_MAX_UINT8 , GxB_TIMES_MAX_UINT8 , GxB_ANY_MAX_UINT8 ,
GxB_MIN_MAX_UINT16 , GxB_MAX_MAX_UINT16 , GxB_PLUS_MAX_UINT16 , GxB_TIMES_MAX_UINT16 , GxB_ANY_MAX_UINT16 ,
GxB_MIN_MAX_UINT32 , GxB_MAX_MAX_UINT32 , GxB_PLUS_MAX_UINT32 , GxB_TIMES_MAX_UINT32 , GxB_ANY_MAX_UINT32 ,
GxB_MIN_MAX_UINT64 , GxB_MAX_MAX_UINT64 , GxB_PLUS_MAX_UINT64 , GxB_TIMES_MAX_UINT64 , GxB_ANY_MAX_UINT64 ,
GxB_MIN_MAX_FP32 , GxB_MAX_MAX_FP32 , GxB_PLUS_MAX_FP32 , GxB_TIMES_MAX_FP32 , GxB_ANY_MAX_FP32 ,
GxB_MIN_MAX_FP64 , GxB_MAX_MAX_FP64 , GxB_PLUS_MAX_FP64 , GxB_TIMES_MAX_FP64 , GxB_ANY_MAX_FP64 ,
// semirings with multiply op: z = PLUS (x,y), all types x,y,z the same:
GxB_MIN_PLUS_INT8 , GxB_MAX_PLUS_INT8 , GxB_PLUS_PLUS_INT8 , GxB_TIMES_PLUS_INT8 , GxB_ANY_PLUS_INT8 ,
GxB_MIN_PLUS_INT16 , GxB_MAX_PLUS_INT16 , GxB_PLUS_PLUS_INT16 , GxB_TIMES_PLUS_INT16 , GxB_ANY_PLUS_INT16 ,
GxB_MIN_PLUS_INT32 , GxB_MAX_PLUS_INT32 , GxB_PLUS_PLUS_INT32 , GxB_TIMES_PLUS_INT32 , GxB_ANY_PLUS_INT32 ,
GxB_MIN_PLUS_INT64 , GxB_MAX_PLUS_INT64 , GxB_PLUS_PLUS_INT64 , GxB_TIMES_PLUS_INT64 , GxB_ANY_PLUS_INT64 ,
GxB_MIN_PLUS_UINT8 , GxB_MAX_PLUS_UINT8 , GxB_PLUS_PLUS_UINT8 , GxB_TIMES_PLUS_UINT8 , GxB_ANY_PLUS_UINT8 ,
GxB_MIN_PLUS_UINT16 , GxB_MAX_PLUS_UINT16 , GxB_PLUS_PLUS_UINT16 , GxB_TIMES_PLUS_UINT16 , GxB_ANY_PLUS_UINT16 ,
GxB_MIN_PLUS_UINT32 , GxB_MAX_PLUS_UINT32 , GxB_PLUS_PLUS_UINT32 , GxB_TIMES_PLUS_UINT32 , GxB_ANY_PLUS_UINT32 ,
GxB_MIN_PLUS_UINT64 , GxB_MAX_PLUS_UINT64 , GxB_PLUS_PLUS_UINT64 , GxB_TIMES_PLUS_UINT64 , GxB_ANY_PLUS_UINT64 ,
GxB_MIN_PLUS_FP32 , GxB_MAX_PLUS_FP32 , GxB_PLUS_PLUS_FP32 , GxB_TIMES_PLUS_FP32 , GxB_ANY_PLUS_FP32 ,
GxB_MIN_PLUS_FP64 , GxB_MAX_PLUS_FP64 , GxB_PLUS_PLUS_FP64 , GxB_TIMES_PLUS_FP64 , GxB_ANY_PLUS_FP64 ,
// semirings with multiply op: z = MINUS (x,y), all types x,y,z the same:
GxB_MIN_MINUS_INT8 , GxB_MAX_MINUS_INT8 , GxB_PLUS_MINUS_INT8 , GxB_TIMES_MINUS_INT8 , GxB_ANY_MINUS_INT8 ,
GxB_MIN_MINUS_INT16 , GxB_MAX_MINUS_INT16 , GxB_PLUS_MINUS_INT16 , GxB_TIMES_MINUS_INT16 , GxB_ANY_MINUS_INT16 ,
GxB_MIN_MINUS_INT32 , GxB_MAX_MINUS_INT32 , GxB_PLUS_MINUS_INT32 , GxB_TIMES_MINUS_INT32 , GxB_ANY_MINUS_INT32 ,
GxB_MIN_MINUS_INT64 , GxB_MAX_MINUS_INT64 , GxB_PLUS_MINUS_INT64 , GxB_TIMES_MINUS_INT64 , GxB_ANY_MINUS_INT64 ,
GxB_MIN_MINUS_UINT8 , GxB_MAX_MINUS_UINT8 , GxB_PLUS_MINUS_UINT8 , GxB_TIMES_MINUS_UINT8 , GxB_ANY_MINUS_UINT8 ,
GxB_MIN_MINUS_UINT16 , GxB_MAX_MINUS_UINT16 , GxB_PLUS_MINUS_UINT16 , GxB_TIMES_MINUS_UINT16 , GxB_ANY_MINUS_UINT16 ,
GxB_MIN_MINUS_UINT32 , GxB_MAX_MINUS_UINT32 , GxB_PLUS_MINUS_UINT32 , GxB_TIMES_MINUS_UINT32 , GxB_ANY_MINUS_UINT32 ,
GxB_MIN_MINUS_UINT64 , GxB_MAX_MINUS_UINT64 , GxB_PLUS_MINUS_UINT64 , GxB_TIMES_MINUS_UINT64 , GxB_ANY_MINUS_UINT64 ,
GxB_MIN_MINUS_FP32 , GxB_MAX_MINUS_FP32 , GxB_PLUS_MINUS_FP32 , GxB_TIMES_MINUS_FP32 , GxB_ANY_MINUS_FP32 ,
GxB_MIN_MINUS_FP64 , GxB_MAX_MINUS_FP64 , GxB_PLUS_MINUS_FP64 , GxB_TIMES_MINUS_FP64 , GxB_ANY_MINUS_FP64 ,
// semirings with multiply op: z = TIMES (x,y), all types x,y,z the same:
GxB_MIN_TIMES_INT8 , GxB_MAX_TIMES_INT8 , GxB_PLUS_TIMES_INT8 , GxB_TIMES_TIMES_INT8 , GxB_ANY_TIMES_INT8 ,
GxB_MIN_TIMES_INT16 , GxB_MAX_TIMES_INT16 , GxB_PLUS_TIMES_INT16 , GxB_TIMES_TIMES_INT16 , GxB_ANY_TIMES_INT16 ,
GxB_MIN_TIMES_INT32 , GxB_MAX_TIMES_INT32 , GxB_PLUS_TIMES_INT32 , GxB_TIMES_TIMES_INT32 , GxB_ANY_TIMES_INT32 ,
GxB_MIN_TIMES_INT64 , GxB_MAX_TIMES_INT64 , GxB_PLUS_TIMES_INT64 , GxB_TIMES_TIMES_INT64 , GxB_ANY_TIMES_INT64 ,
GxB_MIN_TIMES_UINT8 , GxB_MAX_TIMES_UINT8 , GxB_PLUS_TIMES_UINT8 , GxB_TIMES_TIMES_UINT8 , GxB_ANY_TIMES_UINT8 ,
GxB_MIN_TIMES_UINT16 , GxB_MAX_TIMES_UINT16 , GxB_PLUS_TIMES_UINT16 , GxB_TIMES_TIMES_UINT16 , GxB_ANY_TIMES_UINT16 ,
GxB_MIN_TIMES_UINT32 , GxB_MAX_TIMES_UINT32 , GxB_PLUS_TIMES_UINT32 , GxB_TIMES_TIMES_UINT32 , GxB_ANY_TIMES_UINT32 ,
GxB_MIN_TIMES_UINT64 , GxB_MAX_TIMES_UINT64 , GxB_PLUS_TIMES_UINT64 , GxB_TIMES_TIMES_UINT64 , GxB_ANY_TIMES_UINT64 ,
GxB_MIN_TIMES_FP32 , GxB_MAX_TIMES_FP32 , GxB_PLUS_TIMES_FP32 , GxB_TIMES_TIMES_FP32 , GxB_ANY_TIMES_FP32 ,
GxB_MIN_TIMES_FP64 , GxB_MAX_TIMES_FP64 , GxB_PLUS_TIMES_FP64 , GxB_TIMES_TIMES_FP64 , GxB_ANY_TIMES_FP64 ,
// semirings with multiply op: z = DIV (x,y), all types x,y,z the same:
GxB_MIN_DIV_INT8 , GxB_MAX_DIV_INT8 , GxB_PLUS_DIV_INT8 , GxB_TIMES_DIV_INT8 , GxB_ANY_DIV_INT8 ,
GxB_MIN_DIV_INT16 , GxB_MAX_DIV_INT16 , GxB_PLUS_DIV_INT16 , GxB_TIMES_DIV_INT16 , GxB_ANY_DIV_INT16 ,
GxB_MIN_DIV_INT32 , GxB_MAX_DIV_INT32 , GxB_PLUS_DIV_INT32 , GxB_TIMES_DIV_INT32 , GxB_ANY_DIV_INT32 ,
GxB_MIN_DIV_INT64 , GxB_MAX_DIV_INT64 , GxB_PLUS_DIV_INT64 , GxB_TIMES_DIV_INT64 , GxB_ANY_DIV_INT64 ,
GxB_MIN_DIV_UINT8 , GxB_MAX_DIV_UINT8 , GxB_PLUS_DIV_UINT8 , GxB_TIMES_DIV_UINT8 , GxB_ANY_DIV_UINT8 ,
GxB_MIN_DIV_UINT16 , GxB_MAX_DIV_UINT16 , GxB_PLUS_DIV_UINT16 , GxB_TIMES_DIV_UINT16 , GxB_ANY_DIV_UINT16 ,
GxB_MIN_DIV_UINT32 , GxB_MAX_DIV_UINT32 , GxB_PLUS_DIV_UINT32 , GxB_TIMES_DIV_UINT32 , GxB_ANY_DIV_UINT32 ,
GxB_MIN_DIV_UINT64 , GxB_MAX_DIV_UINT64 , GxB_PLUS_DIV_UINT64 , GxB_TIMES_DIV_UINT64 , GxB_ANY_DIV_UINT64 ,
GxB_MIN_DIV_FP32 , GxB_MAX_DIV_FP32 , GxB_PLUS_DIV_FP32 , GxB_TIMES_DIV_FP32 , GxB_ANY_DIV_FP32 ,
GxB_MIN_DIV_FP64 , GxB_MAX_DIV_FP64 , GxB_PLUS_DIV_FP64 , GxB_TIMES_DIV_FP64 , GxB_ANY_DIV_FP64 ,
// semirings with multiply op: z = RDIV (x,y), all types x,y,z the same:
GxB_MIN_RDIV_INT8 , GxB_MAX_RDIV_INT8 , GxB_PLUS_RDIV_INT8 , GxB_TIMES_RDIV_INT8 , GxB_ANY_RDIV_INT8 ,
GxB_MIN_RDIV_INT16 , GxB_MAX_RDIV_INT16 , GxB_PLUS_RDIV_INT16 , GxB_TIMES_RDIV_INT16 , GxB_ANY_RDIV_INT16 ,
GxB_MIN_RDIV_INT32 , GxB_MAX_RDIV_INT32 , GxB_PLUS_RDIV_INT32 , GxB_TIMES_RDIV_INT32 , GxB_ANY_RDIV_INT32 ,
GxB_MIN_RDIV_INT64 , GxB_MAX_RDIV_INT64 , GxB_PLUS_RDIV_INT64 , GxB_TIMES_RDIV_INT64 , GxB_ANY_RDIV_INT64 ,
GxB_MIN_RDIV_UINT8 , GxB_MAX_RDIV_UINT8 , GxB_PLUS_RDIV_UINT8 , GxB_TIMES_RDIV_UINT8 , GxB_ANY_RDIV_UINT8 ,
GxB_MIN_RDIV_UINT16 , GxB_MAX_RDIV_UINT16 , GxB_PLUS_RDIV_UINT16 , GxB_TIMES_RDIV_UINT16 , GxB_ANY_RDIV_UINT16 ,
GxB_MIN_RDIV_UINT32 , GxB_MAX_RDIV_UINT32 , GxB_PLUS_RDIV_UINT32 , GxB_TIMES_RDIV_UINT32 , GxB_ANY_RDIV_UINT32 ,
GxB_MIN_RDIV_UINT64 , GxB_MAX_RDIV_UINT64 , GxB_PLUS_RDIV_UINT64 , GxB_TIMES_RDIV_UINT64 , GxB_ANY_RDIV_UINT64 ,
GxB_MIN_RDIV_FP32 , GxB_MAX_RDIV_FP32 , GxB_PLUS_RDIV_FP32 , GxB_TIMES_RDIV_FP32 , GxB_ANY_RDIV_FP32 ,
GxB_MIN_RDIV_FP64 , GxB_MAX_RDIV_FP64 , GxB_PLUS_RDIV_FP64 , GxB_TIMES_RDIV_FP64 , GxB_ANY_RDIV_FP64 ,
// semirings with multiply op: z = RMINUS (x,y), all types x,y,z the same:
GxB_MIN_RMINUS_INT8 , GxB_MAX_RMINUS_INT8 , GxB_PLUS_RMINUS_INT8 , GxB_TIMES_RMINUS_INT8 , GxB_ANY_RMINUS_INT8 ,
GxB_MIN_RMINUS_INT16 , GxB_MAX_RMINUS_INT16 , GxB_PLUS_RMINUS_INT16 , GxB_TIMES_RMINUS_INT16 , GxB_ANY_RMINUS_INT16 ,
GxB_MIN_RMINUS_INT32 , GxB_MAX_RMINUS_INT32 , GxB_PLUS_RMINUS_INT32 , GxB_TIMES_RMINUS_INT32 , GxB_ANY_RMINUS_INT32 ,
GxB_MIN_RMINUS_INT64 , GxB_MAX_RMINUS_INT64 , GxB_PLUS_RMINUS_INT64 , GxB_TIMES_RMINUS_INT64 , GxB_ANY_RMINUS_INT64 ,
GxB_MIN_RMINUS_UINT8 , GxB_MAX_RMINUS_UINT8 , GxB_PLUS_RMINUS_UINT8 , GxB_TIMES_RMINUS_UINT8 , GxB_ANY_RMINUS_UINT8 ,
GxB_MIN_RMINUS_UINT16 , GxB_MAX_RMINUS_UINT16 , GxB_PLUS_RMINUS_UINT16 , GxB_TIMES_RMINUS_UINT16, GxB_ANY_RMINUS_UINT16 ,
GxB_MIN_RMINUS_UINT32 , GxB_MAX_RMINUS_UINT32 , GxB_PLUS_RMINUS_UINT32 , GxB_TIMES_RMINUS_UINT32, GxB_ANY_RMINUS_UINT32 ,
GxB_MIN_RMINUS_UINT64 , GxB_MAX_RMINUS_UINT64 , GxB_PLUS_RMINUS_UINT64 , GxB_TIMES_RMINUS_UINT64, GxB_ANY_RMINUS_UINT64 ,
GxB_MIN_RMINUS_FP32 , GxB_MAX_RMINUS_FP32 , GxB_PLUS_RMINUS_FP32 , GxB_TIMES_RMINUS_FP32 , GxB_ANY_RMINUS_FP32 ,
GxB_MIN_RMINUS_FP64 , GxB_MAX_RMINUS_FP64 , GxB_PLUS_RMINUS_FP64 , GxB_TIMES_RMINUS_FP64 , GxB_ANY_RMINUS_FP64 ,
// semirings with multiply op: z = ISEQ (x,y), all types x,y,z the same:
GxB_MIN_ISEQ_INT8 , GxB_MAX_ISEQ_INT8 , GxB_PLUS_ISEQ_INT8 , GxB_TIMES_ISEQ_INT8 , GxB_ANY_ISEQ_INT8 ,
GxB_MIN_ISEQ_INT16 , GxB_MAX_ISEQ_INT16 , GxB_PLUS_ISEQ_INT16 , GxB_TIMES_ISEQ_INT16 , GxB_ANY_ISEQ_INT16 ,
GxB_MIN_ISEQ_INT32 , GxB_MAX_ISEQ_INT32 , GxB_PLUS_ISEQ_INT32 , GxB_TIMES_ISEQ_INT32 , GxB_ANY_ISEQ_INT32 ,
GxB_MIN_ISEQ_INT64 , GxB_MAX_ISEQ_INT64 , GxB_PLUS_ISEQ_INT64 , GxB_TIMES_ISEQ_INT64 , GxB_ANY_ISEQ_INT64 ,
GxB_MIN_ISEQ_UINT8 , GxB_MAX_ISEQ_UINT8 , GxB_PLUS_ISEQ_UINT8 , GxB_TIMES_ISEQ_UINT8 , GxB_ANY_ISEQ_UINT8 ,
GxB_MIN_ISEQ_UINT16 , GxB_MAX_ISEQ_UINT16 , GxB_PLUS_ISEQ_UINT16 , GxB_TIMES_ISEQ_UINT16 , GxB_ANY_ISEQ_UINT16 ,
GxB_MIN_ISEQ_UINT32 , GxB_MAX_ISEQ_UINT32 , GxB_PLUS_ISEQ_UINT32 , GxB_TIMES_ISEQ_UINT32 , GxB_ANY_ISEQ_UINT32 ,
GxB_MIN_ISEQ_UINT64 , GxB_MAX_ISEQ_UINT64 , GxB_PLUS_ISEQ_UINT64 , GxB_TIMES_ISEQ_UINT64 , GxB_ANY_ISEQ_UINT64 ,
GxB_MIN_ISEQ_FP32 , GxB_MAX_ISEQ_FP32 , GxB_PLUS_ISEQ_FP32 , GxB_TIMES_ISEQ_FP32 , GxB_ANY_ISEQ_FP32 ,
GxB_MIN_ISEQ_FP64 , GxB_MAX_ISEQ_FP64 , GxB_PLUS_ISEQ_FP64 , GxB_TIMES_ISEQ_FP64 , GxB_ANY_ISEQ_FP64 ,
// semirings with multiply op: z = ISNE (x,y), all types x,y,z the same:
GxB_MIN_ISNE_INT8 , GxB_MAX_ISNE_INT8 , GxB_PLUS_ISNE_INT8 , GxB_TIMES_ISNE_INT8 , GxB_ANY_ISNE_INT8 ,
GxB_MIN_ISNE_INT16 , GxB_MAX_ISNE_INT16 , GxB_PLUS_ISNE_INT16 , GxB_TIMES_ISNE_INT16 , GxB_ANY_ISNE_INT16 ,
GxB_MIN_ISNE_INT32 , GxB_MAX_ISNE_INT32 , GxB_PLUS_ISNE_INT32 , GxB_TIMES_ISNE_INT32 , GxB_ANY_ISNE_INT32 ,
GxB_MIN_ISNE_INT64 , GxB_MAX_ISNE_INT64 , GxB_PLUS_ISNE_INT64 , GxB_TIMES_ISNE_INT64 , GxB_ANY_ISNE_INT64 ,
GxB_MIN_ISNE_UINT8 , GxB_MAX_ISNE_UINT8 , GxB_PLUS_ISNE_UINT8 , GxB_TIMES_ISNE_UINT8 , GxB_ANY_ISNE_UINT8 ,
GxB_MIN_ISNE_UINT16 , GxB_MAX_ISNE_UINT16 , GxB_PLUS_ISNE_UINT16 , GxB_TIMES_ISNE_UINT16 , GxB_ANY_ISNE_UINT16 ,
GxB_MIN_ISNE_UINT32 , GxB_MAX_ISNE_UINT32 , GxB_PLUS_ISNE_UINT32 , GxB_TIMES_ISNE_UINT32 , GxB_ANY_ISNE_UINT32 ,
GxB_MIN_ISNE_UINT64 , GxB_MAX_ISNE_UINT64 , GxB_PLUS_ISNE_UINT64 , GxB_TIMES_ISNE_UINT64 , GxB_ANY_ISNE_UINT64 ,
GxB_MIN_ISNE_FP32 , GxB_MAX_ISNE_FP32 , GxB_PLUS_ISNE_FP32 , GxB_TIMES_ISNE_FP32 , GxB_ANY_ISNE_FP32 ,
GxB_MIN_ISNE_FP64 , GxB_MAX_ISNE_FP64 , GxB_PLUS_ISNE_FP64 , GxB_TIMES_ISNE_FP64 , GxB_ANY_ISNE_FP64 ,
// semirings with multiply op: z = ISGT (x,y), all types x,y,z the same:
GxB_MIN_ISGT_INT8 , GxB_MAX_ISGT_INT8 , GxB_PLUS_ISGT_INT8 , GxB_TIMES_ISGT_INT8 , GxB_ANY_ISGT_INT8 ,
GxB_MIN_ISGT_INT16 , GxB_MAX_ISGT_INT16 , GxB_PLUS_ISGT_INT16 , GxB_TIMES_ISGT_INT16 , GxB_ANY_ISGT_INT16 ,
GxB_MIN_ISGT_INT32 , GxB_MAX_ISGT_INT32 , GxB_PLUS_ISGT_INT32 , GxB_TIMES_ISGT_INT32 , GxB_ANY_ISGT_INT32 ,
GxB_MIN_ISGT_INT64 , GxB_MAX_ISGT_INT64 , GxB_PLUS_ISGT_INT64 , GxB_TIMES_ISGT_INT64 , GxB_ANY_ISGT_INT64 ,
GxB_MIN_ISGT_UINT8 , GxB_MAX_ISGT_UINT8 , GxB_PLUS_ISGT_UINT8 , GxB_TIMES_ISGT_UINT8 , GxB_ANY_ISGT_UINT8 ,
GxB_MIN_ISGT_UINT16 , GxB_MAX_ISGT_UINT16 , GxB_PLUS_ISGT_UINT16 , GxB_TIMES_ISGT_UINT16 , GxB_ANY_ISGT_UINT16 ,
GxB_MIN_ISGT_UINT32 , GxB_MAX_ISGT_UINT32 , GxB_PLUS_ISGT_UINT32 , GxB_TIMES_ISGT_UINT32 , GxB_ANY_ISGT_UINT32 ,
GxB_MIN_ISGT_UINT64 , GxB_MAX_ISGT_UINT64 , GxB_PLUS_ISGT_UINT64 , GxB_TIMES_ISGT_UINT64 , GxB_ANY_ISGT_UINT64 ,
GxB_MIN_ISGT_FP32 , GxB_MAX_ISGT_FP32 , GxB_PLUS_ISGT_FP32 , GxB_TIMES_ISGT_FP32 , GxB_ANY_ISGT_FP32 ,
GxB_MIN_ISGT_FP64 , GxB_MAX_ISGT_FP64 , GxB_PLUS_ISGT_FP64 , GxB_TIMES_ISGT_FP64 , GxB_ANY_ISGT_FP64 ,
// semirings with multiply op: z = ISLT (x,y), all types x,y,z the same:
GxB_MIN_ISLT_INT8 , GxB_MAX_ISLT_INT8 , GxB_PLUS_ISLT_INT8 , GxB_TIMES_ISLT_INT8 , GxB_ANY_ISLT_INT8 ,
GxB_MIN_ISLT_INT16 , GxB_MAX_ISLT_INT16 , GxB_PLUS_ISLT_INT16 , GxB_TIMES_ISLT_INT16 , GxB_ANY_ISLT_INT16 ,
GxB_MIN_ISLT_INT32 , GxB_MAX_ISLT_INT32 , GxB_PLUS_ISLT_INT32 , GxB_TIMES_ISLT_INT32 , GxB_ANY_ISLT_INT32 ,
GxB_MIN_ISLT_INT64 , GxB_MAX_ISLT_INT64 , GxB_PLUS_ISLT_INT64 , GxB_TIMES_ISLT_INT64 , GxB_ANY_ISLT_INT64 ,
GxB_MIN_ISLT_UINT8 , GxB_MAX_ISLT_UINT8 , GxB_PLUS_ISLT_UINT8 , GxB_TIMES_ISLT_UINT8 , GxB_ANY_ISLT_UINT8 ,
GxB_MIN_ISLT_UINT16 , GxB_MAX_ISLT_UINT16 , GxB_PLUS_ISLT_UINT16 , GxB_TIMES_ISLT_UINT16 , GxB_ANY_ISLT_UINT16 ,
GxB_MIN_ISLT_UINT32 , GxB_MAX_ISLT_UINT32 , GxB_PLUS_ISLT_UINT32 , GxB_TIMES_ISLT_UINT32 , GxB_ANY_ISLT_UINT32 ,
GxB_MIN_ISLT_UINT64 , GxB_MAX_ISLT_UINT64 , GxB_PLUS_ISLT_UINT64 , GxB_TIMES_ISLT_UINT64 , GxB_ANY_ISLT_UINT64 ,
GxB_MIN_ISLT_FP32 , GxB_MAX_ISLT_FP32 , GxB_PLUS_ISLT_FP32 , GxB_TIMES_ISLT_FP32 , GxB_ANY_ISLT_FP32 ,
GxB_MIN_ISLT_FP64 , GxB_MAX_ISLT_FP64 , GxB_PLUS_ISLT_FP64 , GxB_TIMES_ISLT_FP64 , GxB_ANY_ISLT_FP64 ,
// semirings with multiply op: z = ISGE (x,y), all types x,y,z the same:
GxB_MIN_ISGE_INT8 , GxB_MAX_ISGE_INT8 , GxB_PLUS_ISGE_INT8 , GxB_TIMES_ISGE_INT8 , GxB_ANY_ISGE_INT8 ,
GxB_MIN_ISGE_INT16 , GxB_MAX_ISGE_INT16 , GxB_PLUS_ISGE_INT16 , GxB_TIMES_ISGE_INT16 , GxB_ANY_ISGE_INT16 ,
GxB_MIN_ISGE_INT32 , GxB_MAX_ISGE_INT32 , GxB_PLUS_ISGE_INT32 , GxB_TIMES_ISGE_INT32 , GxB_ANY_ISGE_INT32 ,
GxB_MIN_ISGE_INT64 , GxB_MAX_ISGE_INT64 , GxB_PLUS_ISGE_INT64 , GxB_TIMES_ISGE_INT64 , GxB_ANY_ISGE_INT64 ,
GxB_MIN_ISGE_UINT8 , GxB_MAX_ISGE_UINT8 , GxB_PLUS_ISGE_UINT8 , GxB_TIMES_ISGE_UINT8 , GxB_ANY_ISGE_UINT8 ,
GxB_MIN_ISGE_UINT16 , GxB_MAX_ISGE_UINT16 , GxB_PLUS_ISGE_UINT16 , GxB_TIMES_ISGE_UINT16 , GxB_ANY_ISGE_UINT16 ,
GxB_MIN_ISGE_UINT32 , GxB_MAX_ISGE_UINT32 , GxB_PLUS_ISGE_UINT32 , GxB_TIMES_ISGE_UINT32 , GxB_ANY_ISGE_UINT32 ,
GxB_MIN_ISGE_UINT64 , GxB_MAX_ISGE_UINT64 , GxB_PLUS_ISGE_UINT64 , GxB_TIMES_ISGE_UINT64 , GxB_ANY_ISGE_UINT64 ,
GxB_MIN_ISGE_FP32 , GxB_MAX_ISGE_FP32 , GxB_PLUS_ISGE_FP32 , GxB_TIMES_ISGE_FP32 , GxB_ANY_ISGE_FP32 ,
GxB_MIN_ISGE_FP64 , GxB_MAX_ISGE_FP64 , GxB_PLUS_ISGE_FP64 , GxB_TIMES_ISGE_FP64 , GxB_ANY_ISGE_FP64 ,
// semirings with multiply op: z = ISLE (x,y), all types x,y,z the same:
GxB_MIN_ISLE_INT8 , GxB_MAX_ISLE_INT8 , GxB_PLUS_ISLE_INT8 , GxB_TIMES_ISLE_INT8 , GxB_ANY_ISLE_INT8 ,
GxB_MIN_ISLE_INT16 , GxB_MAX_ISLE_INT16 , GxB_PLUS_ISLE_INT16 , GxB_TIMES_ISLE_INT16 , GxB_ANY_ISLE_INT16 ,
GxB_MIN_ISLE_INT32 , GxB_MAX_ISLE_INT32 , GxB_PLUS_ISLE_INT32 , GxB_TIMES_ISLE_INT32 , GxB_ANY_ISLE_INT32 ,
GxB_MIN_ISLE_INT64 , GxB_MAX_ISLE_INT64 , GxB_PLUS_ISLE_INT64 , GxB_TIMES_ISLE_INT64 , GxB_ANY_ISLE_INT64 ,
GxB_MIN_ISLE_UINT8 , GxB_MAX_ISLE_UINT8 , GxB_PLUS_ISLE_UINT8 , GxB_TIMES_ISLE_UINT8 , GxB_ANY_ISLE_UINT8 ,
GxB_MIN_ISLE_UINT16 , GxB_MAX_ISLE_UINT16 , GxB_PLUS_ISLE_UINT16 , GxB_TIMES_ISLE_UINT16 , GxB_ANY_ISLE_UINT16 ,
GxB_MIN_ISLE_UINT32 , GxB_MAX_ISLE_UINT32 , GxB_PLUS_ISLE_UINT32 , GxB_TIMES_ISLE_UINT32 , GxB_ANY_ISLE_UINT32 ,
GxB_MIN_ISLE_UINT64 , GxB_MAX_ISLE_UINT64 , GxB_PLUS_ISLE_UINT64 , GxB_TIMES_ISLE_UINT64 , GxB_ANY_ISLE_UINT64 ,
GxB_MIN_ISLE_FP32 , GxB_MAX_ISLE_FP32 , GxB_PLUS_ISLE_FP32 , GxB_TIMES_ISLE_FP32 , GxB_ANY_ISLE_FP32 ,
GxB_MIN_ISLE_FP64 , GxB_MAX_ISLE_FP64 , GxB_PLUS_ISLE_FP64 , GxB_TIMES_ISLE_FP64 , GxB_ANY_ISLE_FP64 ,
// semirings with multiply op: z = LOR (x,y), all types x,y,z the same:
GxB_MIN_LOR_INT8 , GxB_MAX_LOR_INT8 , GxB_PLUS_LOR_INT8 , GxB_TIMES_LOR_INT8 , GxB_ANY_LOR_INT8 ,
GxB_MIN_LOR_INT16 , GxB_MAX_LOR_INT16 , GxB_PLUS_LOR_INT16 , GxB_TIMES_LOR_INT16 , GxB_ANY_LOR_INT16 ,
GxB_MIN_LOR_INT32 , GxB_MAX_LOR_INT32 , GxB_PLUS_LOR_INT32 , GxB_TIMES_LOR_INT32 , GxB_ANY_LOR_INT32 ,
GxB_MIN_LOR_INT64 , GxB_MAX_LOR_INT64 , GxB_PLUS_LOR_INT64 , GxB_TIMES_LOR_INT64 , GxB_ANY_LOR_INT64 ,
GxB_MIN_LOR_UINT8 , GxB_MAX_LOR_UINT8 , GxB_PLUS_LOR_UINT8 , GxB_TIMES_LOR_UINT8 , GxB_ANY_LOR_UINT8 ,
GxB_MIN_LOR_UINT16 , GxB_MAX_LOR_UINT16 , GxB_PLUS_LOR_UINT16 , GxB_TIMES_LOR_UINT16 , GxB_ANY_LOR_UINT16 ,
GxB_MIN_LOR_UINT32 , GxB_MAX_LOR_UINT32 , GxB_PLUS_LOR_UINT32 , GxB_TIMES_LOR_UINT32 , GxB_ANY_LOR_UINT32 ,
GxB_MIN_LOR_UINT64 , GxB_MAX_LOR_UINT64 , GxB_PLUS_LOR_UINT64 , GxB_TIMES_LOR_UINT64 , GxB_ANY_LOR_UINT64 ,
GxB_MIN_LOR_FP32 , GxB_MAX_LOR_FP32 , GxB_PLUS_LOR_FP32 , GxB_TIMES_LOR_FP32 , GxB_ANY_LOR_FP32 ,
GxB_MIN_LOR_FP64 , GxB_MAX_LOR_FP64 , GxB_PLUS_LOR_FP64 , GxB_TIMES_LOR_FP64 , GxB_ANY_LOR_FP64 ,
// semirings with multiply op: z = LAND (x,y), all types x,y,z the same:
GxB_MIN_LAND_INT8 , GxB_MAX_LAND_INT8 , GxB_PLUS_LAND_INT8 , GxB_TIMES_LAND_INT8 , GxB_ANY_LAND_INT8 ,
GxB_MIN_LAND_INT16 , GxB_MAX_LAND_INT16 , GxB_PLUS_LAND_INT16 , GxB_TIMES_LAND_INT16 , GxB_ANY_LAND_INT16 ,
GxB_MIN_LAND_INT32 , GxB_MAX_LAND_INT32 , GxB_PLUS_LAND_INT32 , GxB_TIMES_LAND_INT32 , GxB_ANY_LAND_INT32 ,
GxB_MIN_LAND_INT64 , GxB_MAX_LAND_INT64 , GxB_PLUS_LAND_INT64 , GxB_TIMES_LAND_INT64 , GxB_ANY_LAND_INT64 ,
GxB_MIN_LAND_UINT8 , GxB_MAX_LAND_UINT8 , GxB_PLUS_LAND_UINT8 , GxB_TIMES_LAND_UINT8 , GxB_ANY_LAND_UINT8 ,
GxB_MIN_LAND_UINT16 , GxB_MAX_LAND_UINT16 , GxB_PLUS_LAND_UINT16 , GxB_TIMES_LAND_UINT16 , GxB_ANY_LAND_UINT16 ,
GxB_MIN_LAND_UINT32 , GxB_MAX_LAND_UINT32 , GxB_PLUS_LAND_UINT32 , GxB_TIMES_LAND_UINT32 , GxB_ANY_LAND_UINT32 ,
GxB_MIN_LAND_UINT64 , GxB_MAX_LAND_UINT64 , GxB_PLUS_LAND_UINT64 , GxB_TIMES_LAND_UINT64 , GxB_ANY_LAND_UINT64 ,
GxB_MIN_LAND_FP32 , GxB_MAX_LAND_FP32 , GxB_PLUS_LAND_FP32 , GxB_TIMES_LAND_FP32 , GxB_ANY_LAND_FP32 ,
GxB_MIN_LAND_FP64 , GxB_MAX_LAND_FP64 , GxB_PLUS_LAND_FP64 , GxB_TIMES_LAND_FP64 , GxB_ANY_LAND_FP64 ,
// semirings with multiply op: z = LXOR (x,y), all types x,y,z the same:
GxB_MIN_LXOR_INT8 , GxB_MAX_LXOR_INT8 , GxB_PLUS_LXOR_INT8 , GxB_TIMES_LXOR_INT8 , GxB_ANY_LXOR_INT8 ,
GxB_MIN_LXOR_INT16 , GxB_MAX_LXOR_INT16 , GxB_PLUS_LXOR_INT16 , GxB_TIMES_LXOR_INT16 , GxB_ANY_LXOR_INT16 ,
GxB_MIN_LXOR_INT32 , GxB_MAX_LXOR_INT32 , GxB_PLUS_LXOR_INT32 , GxB_TIMES_LXOR_INT32 , GxB_ANY_LXOR_INT32 ,
GxB_MIN_LXOR_INT64 , GxB_MAX_LXOR_INT64 , GxB_PLUS_LXOR_INT64 , GxB_TIMES_LXOR_INT64 , GxB_ANY_LXOR_INT64 ,
GxB_MIN_LXOR_UINT8 , GxB_MAX_LXOR_UINT8 , GxB_PLUS_LXOR_UINT8 , GxB_TIMES_LXOR_UINT8 , GxB_ANY_LXOR_UINT8 ,
GxB_MIN_LXOR_UINT16 , GxB_MAX_LXOR_UINT16 , GxB_PLUS_LXOR_UINT16 , GxB_TIMES_LXOR_UINT16 , GxB_ANY_LXOR_UINT16 ,
GxB_MIN_LXOR_UINT32 , GxB_MAX_LXOR_UINT32 , GxB_PLUS_LXOR_UINT32 , GxB_TIMES_LXOR_UINT32 , GxB_ANY_LXOR_UINT32 ,
GxB_MIN_LXOR_UINT64 , GxB_MAX_LXOR_UINT64 , GxB_PLUS_LXOR_UINT64 , GxB_TIMES_LXOR_UINT64 , GxB_ANY_LXOR_UINT64 ,
GxB_MIN_LXOR_FP32 , GxB_MAX_LXOR_FP32 , GxB_PLUS_LXOR_FP32 , GxB_TIMES_LXOR_FP32 , GxB_ANY_LXOR_FP32 ,
GxB_MIN_LXOR_FP64 , GxB_MAX_LXOR_FP64 , GxB_PLUS_LXOR_FP64 , GxB_TIMES_LXOR_FP64 , GxB_ANY_LXOR_FP64 ,
//------------------------------------------------------------------------------
// 300 semirings with a comparator TxT -> bool, where T is non-Boolean
//------------------------------------------------------------------------------
// In the 4th column the GxB_EQ_*_* semirings could also be called
// GxB_LXNOR_*_*, since the EQ and LXNOR boolean operators are identical
// but those names are not included.
// semirings with multiply op: z = EQ (x,y), where z is boolean and x,y are given by the suffix:
GxB_LOR_EQ_INT8 , GxB_LAND_EQ_INT8 , GxB_LXOR_EQ_INT8 , GxB_EQ_EQ_INT8 , GxB_ANY_EQ_INT8 ,
GxB_LOR_EQ_INT16 , GxB_LAND_EQ_INT16 , GxB_LXOR_EQ_INT16 , GxB_EQ_EQ_INT16 , GxB_ANY_EQ_INT16 ,
GxB_LOR_EQ_INT32 , GxB_LAND_EQ_INT32 , GxB_LXOR_EQ_INT32 , GxB_EQ_EQ_INT32 , GxB_ANY_EQ_INT32 ,
GxB_LOR_EQ_INT64 , GxB_LAND_EQ_INT64 , GxB_LXOR_EQ_INT64 , GxB_EQ_EQ_INT64 , GxB_ANY_EQ_INT64 ,
GxB_LOR_EQ_UINT8 , GxB_LAND_EQ_UINT8 , GxB_LXOR_EQ_UINT8 , GxB_EQ_EQ_UINT8 , GxB_ANY_EQ_UINT8 ,
GxB_LOR_EQ_UINT16 , GxB_LAND_EQ_UINT16 , GxB_LXOR_EQ_UINT16 , GxB_EQ_EQ_UINT16 , GxB_ANY_EQ_UINT16 ,
GxB_LOR_EQ_UINT32 , GxB_LAND_EQ_UINT32 , GxB_LXOR_EQ_UINT32 , GxB_EQ_EQ_UINT32 , GxB_ANY_EQ_UINT32 ,
GxB_LOR_EQ_UINT64 , GxB_LAND_EQ_UINT64 , GxB_LXOR_EQ_UINT64 , GxB_EQ_EQ_UINT64 , GxB_ANY_EQ_UINT64 ,
GxB_LOR_EQ_FP32 , GxB_LAND_EQ_FP32 , GxB_LXOR_EQ_FP32 , GxB_EQ_EQ_FP32 , GxB_ANY_EQ_FP32 ,
GxB_LOR_EQ_FP64 , GxB_LAND_EQ_FP64 , GxB_LXOR_EQ_FP64 , GxB_EQ_EQ_FP64 , GxB_ANY_EQ_FP64 ,
// semirings with multiply op: z = NE (x,y), where z is boolean and x,y are given by the suffix:
GxB_LOR_NE_INT8 , GxB_LAND_NE_INT8 , GxB_LXOR_NE_INT8 , GxB_EQ_NE_INT8 , GxB_ANY_NE_INT8 ,
GxB_LOR_NE_INT16 , GxB_LAND_NE_INT16 , GxB_LXOR_NE_INT16 , GxB_EQ_NE_INT16 , GxB_ANY_NE_INT16 ,
GxB_LOR_NE_INT32 , GxB_LAND_NE_INT32 , GxB_LXOR_NE_INT32 , GxB_EQ_NE_INT32 , GxB_ANY_NE_INT32 ,
GxB_LOR_NE_INT64 , GxB_LAND_NE_INT64 , GxB_LXOR_NE_INT64 , GxB_EQ_NE_INT64 , GxB_ANY_NE_INT64 ,
GxB_LOR_NE_UINT8 , GxB_LAND_NE_UINT8 , GxB_LXOR_NE_UINT8 , GxB_EQ_NE_UINT8 , GxB_ANY_NE_UINT8 ,
GxB_LOR_NE_UINT16 , GxB_LAND_NE_UINT16 , GxB_LXOR_NE_UINT16 , GxB_EQ_NE_UINT16 , GxB_ANY_NE_UINT16 ,
GxB_LOR_NE_UINT32 , GxB_LAND_NE_UINT32 , GxB_LXOR_NE_UINT32 , GxB_EQ_NE_UINT32 , GxB_ANY_NE_UINT32 ,
GxB_LOR_NE_UINT64 , GxB_LAND_NE_UINT64 , GxB_LXOR_NE_UINT64 , GxB_EQ_NE_UINT64 , GxB_ANY_NE_UINT64 ,
GxB_LOR_NE_FP32 , GxB_LAND_NE_FP32 , GxB_LXOR_NE_FP32 , GxB_EQ_NE_FP32 , GxB_ANY_NE_FP32 ,
GxB_LOR_NE_FP64 , GxB_LAND_NE_FP64 , GxB_LXOR_NE_FP64 , GxB_EQ_NE_FP64 , GxB_ANY_NE_FP64 ,
// semirings with multiply op: z = GT (x,y), where z is boolean and x,y are given by the suffix:
GxB_LOR_GT_INT8 , GxB_LAND_GT_INT8 , GxB_LXOR_GT_INT8 , GxB_EQ_GT_INT8 , GxB_ANY_GT_INT8 ,
GxB_LOR_GT_INT16 , GxB_LAND_GT_INT16 , GxB_LXOR_GT_INT16 , GxB_EQ_GT_INT16 , GxB_ANY_GT_INT16 ,
GxB_LOR_GT_INT32 , GxB_LAND_GT_INT32 , GxB_LXOR_GT_INT32 , GxB_EQ_GT_INT32 , GxB_ANY_GT_INT32 ,
GxB_LOR_GT_INT64 , GxB_LAND_GT_INT64 , GxB_LXOR_GT_INT64 , GxB_EQ_GT_INT64 , GxB_ANY_GT_INT64 ,
GxB_LOR_GT_UINT8 , GxB_LAND_GT_UINT8 , GxB_LXOR_GT_UINT8 , GxB_EQ_GT_UINT8 , GxB_ANY_GT_UINT8 ,
GxB_LOR_GT_UINT16 , GxB_LAND_GT_UINT16 , GxB_LXOR_GT_UINT16 , GxB_EQ_GT_UINT16 , GxB_ANY_GT_UINT16 ,
GxB_LOR_GT_UINT32 , GxB_LAND_GT_UINT32 , GxB_LXOR_GT_UINT32 , GxB_EQ_GT_UINT32 , GxB_ANY_GT_UINT32 ,
GxB_LOR_GT_UINT64 , GxB_LAND_GT_UINT64 , GxB_LXOR_GT_UINT64 , GxB_EQ_GT_UINT64 , GxB_ANY_GT_UINT64 ,
GxB_LOR_GT_FP32 , GxB_LAND_GT_FP32 , GxB_LXOR_GT_FP32 , GxB_EQ_GT_FP32 , GxB_ANY_GT_FP32 ,
GxB_LOR_GT_FP64 , GxB_LAND_GT_FP64 , GxB_LXOR_GT_FP64 , GxB_EQ_GT_FP64 , GxB_ANY_GT_FP64 ,
// semirings with multiply op: z = LT (x,y), where z is boolean and x,y are given by the suffix:
GxB_LOR_LT_INT8 , GxB_LAND_LT_INT8 , GxB_LXOR_LT_INT8 , GxB_EQ_LT_INT8 , GxB_ANY_LT_INT8 ,
GxB_LOR_LT_INT16 , GxB_LAND_LT_INT16 , GxB_LXOR_LT_INT16 , GxB_EQ_LT_INT16 , GxB_ANY_LT_INT16 ,
GxB_LOR_LT_INT32 , GxB_LAND_LT_INT32 , GxB_LXOR_LT_INT32 , GxB_EQ_LT_INT32 , GxB_ANY_LT_INT32 ,
GxB_LOR_LT_INT64 , GxB_LAND_LT_INT64 , GxB_LXOR_LT_INT64 , GxB_EQ_LT_INT64 , GxB_ANY_LT_INT64 ,
GxB_LOR_LT_UINT8 , GxB_LAND_LT_UINT8 , GxB_LXOR_LT_UINT8 , GxB_EQ_LT_UINT8 , GxB_ANY_LT_UINT8 ,
GxB_LOR_LT_UINT16 , GxB_LAND_LT_UINT16 , GxB_LXOR_LT_UINT16 , GxB_EQ_LT_UINT16 , GxB_ANY_LT_UINT16 ,
GxB_LOR_LT_UINT32 , GxB_LAND_LT_UINT32 , GxB_LXOR_LT_UINT32 , GxB_EQ_LT_UINT32 , GxB_ANY_LT_UINT32 ,
GxB_LOR_LT_UINT64 , GxB_LAND_LT_UINT64 , GxB_LXOR_LT_UINT64 , GxB_EQ_LT_UINT64 , GxB_ANY_LT_UINT64 ,
GxB_LOR_LT_FP32 , GxB_LAND_LT_FP32 , GxB_LXOR_LT_FP32 , GxB_EQ_LT_FP32 , GxB_ANY_LT_FP32 ,
GxB_LOR_LT_FP64 , GxB_LAND_LT_FP64 , GxB_LXOR_LT_FP64 , GxB_EQ_LT_FP64 , GxB_ANY_LT_FP64 ,
// semirings with multiply op: z = GE (x,y), where z is boolean and x,y are given by the suffix:
GxB_LOR_GE_INT8 , GxB_LAND_GE_INT8 , GxB_LXOR_GE_INT8 , GxB_EQ_GE_INT8 , GxB_ANY_GE_INT8 ,
GxB_LOR_GE_INT16 , GxB_LAND_GE_INT16 , GxB_LXOR_GE_INT16 , GxB_EQ_GE_INT16 , GxB_ANY_GE_INT16 ,
GxB_LOR_GE_INT32 , GxB_LAND_GE_INT32 , GxB_LXOR_GE_INT32 , GxB_EQ_GE_INT32 , GxB_ANY_GE_INT32 ,
GxB_LOR_GE_INT64 , GxB_LAND_GE_INT64 , GxB_LXOR_GE_INT64 , GxB_EQ_GE_INT64 , GxB_ANY_GE_INT64 ,
GxB_LOR_GE_UINT8 , GxB_LAND_GE_UINT8 , GxB_LXOR_GE_UINT8 , GxB_EQ_GE_UINT8 , GxB_ANY_GE_UINT8 ,
GxB_LOR_GE_UINT16 , GxB_LAND_GE_UINT16 , GxB_LXOR_GE_UINT16 , GxB_EQ_GE_UINT16 , GxB_ANY_GE_UINT16 ,
GxB_LOR_GE_UINT32 , GxB_LAND_GE_UINT32 , GxB_LXOR_GE_UINT32 , GxB_EQ_GE_UINT32 , GxB_ANY_GE_UINT32 ,
GxB_LOR_GE_UINT64 , GxB_LAND_GE_UINT64 , GxB_LXOR_GE_UINT64 , GxB_EQ_GE_UINT64 , GxB_ANY_GE_UINT64 ,
GxB_LOR_GE_FP32 , GxB_LAND_GE_FP32 , GxB_LXOR_GE_FP32 , GxB_EQ_GE_FP32 , GxB_ANY_GE_FP32 ,
GxB_LOR_GE_FP64 , GxB_LAND_GE_FP64 , GxB_LXOR_GE_FP64 , GxB_EQ_GE_FP64 , GxB_ANY_GE_FP64 ,
// semirings with multiply op: z = LE (x,y), where z is boolean and x,y are given by the suffix:
GxB_LOR_LE_INT8 , GxB_LAND_LE_INT8 , GxB_LXOR_LE_INT8 , GxB_EQ_LE_INT8 , GxB_ANY_LE_INT8 ,
GxB_LOR_LE_INT16 , GxB_LAND_LE_INT16 , GxB_LXOR_LE_INT16 , GxB_EQ_LE_INT16 , GxB_ANY_LE_INT16 ,
GxB_LOR_LE_INT32 , GxB_LAND_LE_INT32 , GxB_LXOR_LE_INT32 , GxB_EQ_LE_INT32 , GxB_ANY_LE_INT32 ,
GxB_LOR_LE_INT64 , GxB_LAND_LE_INT64 , GxB_LXOR_LE_INT64 , GxB_EQ_LE_INT64 , GxB_ANY_LE_INT64 ,
GxB_LOR_LE_UINT8 , GxB_LAND_LE_UINT8 , GxB_LXOR_LE_UINT8 , GxB_EQ_LE_UINT8 , GxB_ANY_LE_UINT8 ,
GxB_LOR_LE_UINT16 , GxB_LAND_LE_UINT16 , GxB_LXOR_LE_UINT16 , GxB_EQ_LE_UINT16 , GxB_ANY_LE_UINT16 ,
GxB_LOR_LE_UINT32 , GxB_LAND_LE_UINT32 , GxB_LXOR_LE_UINT32 , GxB_EQ_LE_UINT32 , GxB_ANY_LE_UINT32 ,
GxB_LOR_LE_UINT64 , GxB_LAND_LE_UINT64 , GxB_LXOR_LE_UINT64 , GxB_EQ_LE_UINT64 , GxB_ANY_LE_UINT64 ,
GxB_LOR_LE_FP32 , GxB_LAND_LE_FP32 , GxB_LXOR_LE_FP32 , GxB_EQ_LE_FP32 , GxB_ANY_LE_FP32 ,
GxB_LOR_LE_FP64 , GxB_LAND_LE_FP64 , GxB_LXOR_LE_FP64 , GxB_EQ_LE_FP64 , GxB_ANY_LE_FP64 ,
//------------------------------------------------------------------------------
// 55 semirings with purely Boolean types, bool x bool -> bool
//------------------------------------------------------------------------------
// Note that lor_pair, land_pair, and eq_pair are all identical to any_pair.
// These 3 are marked below. GxB_EQ_*_BOOL could be called
// GxB_LXNOR_*_BOOL, and GxB_*_EQ_BOOL could be called GxB_*_LXNOR_BOOL,
// but those names are not included.
// purely boolean semirings in the form GxB_(add monoid)_(multiply operator)_BOOL:
GxB_LOR_FIRST_BOOL , GxB_LAND_FIRST_BOOL , GxB_LXOR_FIRST_BOOL , GxB_EQ_FIRST_BOOL , GxB_ANY_FIRST_BOOL ,
GxB_LOR_SECOND_BOOL , GxB_LAND_SECOND_BOOL , GxB_LXOR_SECOND_BOOL , GxB_EQ_SECOND_BOOL , GxB_ANY_SECOND_BOOL ,
GxB_LOR_PAIR_BOOL/**/ , GxB_LAND_PAIR_BOOL/**/ , GxB_LXOR_PAIR_BOOL , GxB_EQ_PAIR_BOOL/**/ , GxB_ANY_PAIR_BOOL ,
GxB_LOR_LOR_BOOL , GxB_LAND_LOR_BOOL , GxB_LXOR_LOR_BOOL , GxB_EQ_LOR_BOOL , GxB_ANY_LOR_BOOL ,
GxB_LOR_LAND_BOOL , GxB_LAND_LAND_BOOL , GxB_LXOR_LAND_BOOL , GxB_EQ_LAND_BOOL , GxB_ANY_LAND_BOOL ,
GxB_LOR_LXOR_BOOL , GxB_LAND_LXOR_BOOL , GxB_LXOR_LXOR_BOOL , GxB_EQ_LXOR_BOOL , GxB_ANY_LXOR_BOOL ,
GxB_LOR_EQ_BOOL , GxB_LAND_EQ_BOOL , GxB_LXOR_EQ_BOOL , GxB_EQ_EQ_BOOL , GxB_ANY_EQ_BOOL ,
GxB_LOR_GT_BOOL , GxB_LAND_GT_BOOL , GxB_LXOR_GT_BOOL , GxB_EQ_GT_BOOL , GxB_ANY_GT_BOOL ,
GxB_LOR_LT_BOOL , GxB_LAND_LT_BOOL , GxB_LXOR_LT_BOOL , GxB_EQ_LT_BOOL , GxB_ANY_LT_BOOL ,
GxB_LOR_GE_BOOL , GxB_LAND_GE_BOOL , GxB_LXOR_GE_BOOL , GxB_EQ_GE_BOOL , GxB_ANY_GE_BOOL ,
GxB_LOR_LE_BOOL , GxB_LAND_LE_BOOL , GxB_LXOR_LE_BOOL , GxB_EQ_LE_BOOL , GxB_ANY_LE_BOOL ,
//------------------------------------------------------------------------------
// 54 complex semirings
//------------------------------------------------------------------------------
// 3 monoids (plus, times, any), 2 types (FC32 and FC64), and 9
// multiplicative operators.
// Note that times_pair is identical to any_pair.
// These 2 are marked below.
GxB_PLUS_FIRST_FC32 , GxB_TIMES_FIRST_FC32 , GxB_ANY_FIRST_FC32 ,
GxB_PLUS_FIRST_FC64 , GxB_TIMES_FIRST_FC64 , GxB_ANY_FIRST_FC64 ,
GxB_PLUS_SECOND_FC32 , GxB_TIMES_SECOND_FC32 , GxB_ANY_SECOND_FC32 ,
GxB_PLUS_SECOND_FC64 , GxB_TIMES_SECOND_FC64 , GxB_ANY_SECOND_FC64 ,
GxB_PLUS_PAIR_FC32 , GxB_TIMES_PAIR_FC32/**/, GxB_ANY_PAIR_FC32 ,
GxB_PLUS_PAIR_FC64 , GxB_TIMES_PAIR_FC64/**/, GxB_ANY_PAIR_FC64 ,
GxB_PLUS_PLUS_FC32 , GxB_TIMES_PLUS_FC32 , GxB_ANY_PLUS_FC32 ,
GxB_PLUS_PLUS_FC64 , GxB_TIMES_PLUS_FC64 , GxB_ANY_PLUS_FC64 ,
GxB_PLUS_MINUS_FC32 , GxB_TIMES_MINUS_FC32 , GxB_ANY_MINUS_FC32 ,
GxB_PLUS_MINUS_FC64 , GxB_TIMES_MINUS_FC64 , GxB_ANY_MINUS_FC64 ,
GxB_PLUS_TIMES_FC32 , GxB_TIMES_TIMES_FC32 , GxB_ANY_TIMES_FC32 ,
GxB_PLUS_TIMES_FC64 , GxB_TIMES_TIMES_FC64 , GxB_ANY_TIMES_FC64 ,
GxB_PLUS_DIV_FC32 , GxB_TIMES_DIV_FC32 , GxB_ANY_DIV_FC32 ,
GxB_PLUS_DIV_FC64 , GxB_TIMES_DIV_FC64 , GxB_ANY_DIV_FC64 ,
GxB_PLUS_RDIV_FC32 , GxB_TIMES_RDIV_FC32 , GxB_ANY_RDIV_FC32 ,
GxB_PLUS_RDIV_FC64 , GxB_TIMES_RDIV_FC64 , GxB_ANY_RDIV_FC64 ,
GxB_PLUS_RMINUS_FC32 , GxB_TIMES_RMINUS_FC32 , GxB_ANY_RMINUS_FC32 ,
GxB_PLUS_RMINUS_FC64 , GxB_TIMES_RMINUS_FC64 , GxB_ANY_RMINUS_FC64 ,
//------------------------------------------------------------------------------
// 64 bitwise semirings
//------------------------------------------------------------------------------
// monoids: (BOR, BAND, BXOR, BXNOR) x
// mult: (BOR, BAND, BXOR, BXNOR) x
// types: (UINT8, UINT16, UINT32, UINT64)
GxB_BOR_BOR_UINT8 , GxB_BOR_BOR_UINT16 , GxB_BOR_BOR_UINT32 , GxB_BOR_BOR_UINT64 ,
GxB_BOR_BAND_UINT8 , GxB_BOR_BAND_UINT16 , GxB_BOR_BAND_UINT32 , GxB_BOR_BAND_UINT64 ,
GxB_BOR_BXOR_UINT8 , GxB_BOR_BXOR_UINT16 , GxB_BOR_BXOR_UINT32 , GxB_BOR_BXOR_UINT64 ,
GxB_BOR_BXNOR_UINT8 , GxB_BOR_BXNOR_UINT16 , GxB_BOR_BXNOR_UINT32 , GxB_BOR_BXNOR_UINT64 ,
GxB_BAND_BOR_UINT8 , GxB_BAND_BOR_UINT16 , GxB_BAND_BOR_UINT32 , GxB_BAND_BOR_UINT64 ,
GxB_BAND_BAND_UINT8 , GxB_BAND_BAND_UINT16 , GxB_BAND_BAND_UINT32 , GxB_BAND_BAND_UINT64 ,
GxB_BAND_BXOR_UINT8 , GxB_BAND_BXOR_UINT16 , GxB_BAND_BXOR_UINT32 , GxB_BAND_BXOR_UINT64 ,
GxB_BAND_BXNOR_UINT8 , GxB_BAND_BXNOR_UINT16 , GxB_BAND_BXNOR_UINT32 , GxB_BAND_BXNOR_UINT64 ,
GxB_BXOR_BOR_UINT8 , GxB_BXOR_BOR_UINT16 , GxB_BXOR_BOR_UINT32 , GxB_BXOR_BOR_UINT64 ,
GxB_BXOR_BAND_UINT8 , GxB_BXOR_BAND_UINT16 , GxB_BXOR_BAND_UINT32 , GxB_BXOR_BAND_UINT64 ,
GxB_BXOR_BXOR_UINT8 , GxB_BXOR_BXOR_UINT16 , GxB_BXOR_BXOR_UINT32 , GxB_BXOR_BXOR_UINT64 ,
GxB_BXOR_BXNOR_UINT8 , GxB_BXOR_BXNOR_UINT16 , GxB_BXOR_BXNOR_UINT32 , GxB_BXOR_BXNOR_UINT64 ,
GxB_BXNOR_BOR_UINT8 , GxB_BXNOR_BOR_UINT16 , GxB_BXNOR_BOR_UINT32 , GxB_BXNOR_BOR_UINT64 ,
GxB_BXNOR_BAND_UINT8 , GxB_BXNOR_BAND_UINT16 , GxB_BXNOR_BAND_UINT32 , GxB_BXNOR_BAND_UINT64 ,
GxB_BXNOR_BXOR_UINT8 , GxB_BXNOR_BXOR_UINT16 , GxB_BXNOR_BXOR_UINT32 , GxB_BXNOR_BXOR_UINT64 ,
GxB_BXNOR_BXNOR_UINT8 , GxB_BXNOR_BXNOR_UINT16 , GxB_BXNOR_BXNOR_UINT32 , GxB_BXNOR_BXNOR_UINT64 ,
//------------------------------------------------------------------------------
// 80 positional semirings
//------------------------------------------------------------------------------
// monoids: (MIN, MAX, ANY, PLUS, TIMES) x
// mult: (FIRSTI, FIRSTI1, FIRSTJ, FIRSTJ1, SECONDI, SECONDI1, SECONDJ, SECONDJ1)
// types: (INT32, INT64)
GxB_MIN_FIRSTI_INT32, GxB_MIN_FIRSTI_INT64,
GxB_MAX_FIRSTI_INT32, GxB_MAX_FIRSTI_INT64,
GxB_ANY_FIRSTI_INT32, GxB_ANY_FIRSTI_INT64,
GxB_PLUS_FIRSTI_INT32, GxB_PLUS_FIRSTI_INT64,
GxB_TIMES_FIRSTI_INT32, GxB_TIMES_FIRSTI_INT64,
GxB_MIN_FIRSTI1_INT32, GxB_MIN_FIRSTI1_INT64,
GxB_MAX_FIRSTI1_INT32, GxB_MAX_FIRSTI1_INT64,
GxB_ANY_FIRSTI1_INT32, GxB_ANY_FIRSTI1_INT64,
GxB_PLUS_FIRSTI1_INT32, GxB_PLUS_FIRSTI1_INT64,
GxB_TIMES_FIRSTI1_INT32, GxB_TIMES_FIRSTI1_INT64,
GxB_MIN_FIRSTJ_INT32, GxB_MIN_FIRSTJ_INT64,
GxB_MAX_FIRSTJ_INT32, GxB_MAX_FIRSTJ_INT64,
GxB_ANY_FIRSTJ_INT32, GxB_ANY_FIRSTJ_INT64,
GxB_PLUS_FIRSTJ_INT32, GxB_PLUS_FIRSTJ_INT64,
GxB_TIMES_FIRSTJ_INT32, GxB_TIMES_FIRSTJ_INT64,
GxB_MIN_FIRSTJ1_INT32, GxB_MIN_FIRSTJ1_INT64,
GxB_MAX_FIRSTJ1_INT32, GxB_MAX_FIRSTJ1_INT64,
GxB_ANY_FIRSTJ1_INT32, GxB_ANY_FIRSTJ1_INT64,
GxB_PLUS_FIRSTJ1_INT32, GxB_PLUS_FIRSTJ1_INT64,
GxB_TIMES_FIRSTJ1_INT32, GxB_TIMES_FIRSTJ1_INT64,
GxB_MIN_SECONDI_INT32, GxB_MIN_SECONDI_INT64,
GxB_MAX_SECONDI_INT32, GxB_MAX_SECONDI_INT64,
GxB_ANY_SECONDI_INT32, GxB_ANY_SECONDI_INT64,
GxB_PLUS_SECONDI_INT32, GxB_PLUS_SECONDI_INT64,
GxB_TIMES_SECONDI_INT32, GxB_TIMES_SECONDI_INT64,
GxB_MIN_SECONDI1_INT32, GxB_MIN_SECONDI1_INT64,
GxB_MAX_SECONDI1_INT32, GxB_MAX_SECONDI1_INT64,
GxB_ANY_SECONDI1_INT32, GxB_ANY_SECONDI1_INT64,
GxB_PLUS_SECONDI1_INT32, GxB_PLUS_SECONDI1_INT64,
GxB_TIMES_SECONDI1_INT32, GxB_TIMES_SECONDI1_INT64,
GxB_MIN_SECONDJ_INT32, GxB_MIN_SECONDJ_INT64,
GxB_MAX_SECONDJ_INT32, GxB_MAX_SECONDJ_INT64,
GxB_ANY_SECONDJ_INT32, GxB_ANY_SECONDJ_INT64,
GxB_PLUS_SECONDJ_INT32, GxB_PLUS_SECONDJ_INT64,
GxB_TIMES_SECONDJ_INT32, GxB_TIMES_SECONDJ_INT64,
GxB_MIN_SECONDJ1_INT32, GxB_MIN_SECONDJ1_INT64,
GxB_MAX_SECONDJ1_INT32, GxB_MAX_SECONDJ1_INT64,
GxB_ANY_SECONDJ1_INT32, GxB_ANY_SECONDJ1_INT64,
GxB_PLUS_SECONDJ1_INT32, GxB_PLUS_SECONDJ1_INT64,
GxB_TIMES_SECONDJ1_INT32, GxB_TIMES_SECONDJ1_INT64 ;
//------------------------------------------------------------------------------
// GrB_* semirings
//------------------------------------------------------------------------------
// The v1.3 C API for GraphBLAS adds the following 124 predefined semirings,
// with GrB_* names. They are identical to 124 GxB_* semirings defined above,
// with the same name, except that GrB_LXNOR_LOR_SEMIRING_BOOL is identical to
// GxB_EQ_LOR_BOOL (since GrB_EQ_BOOL == GrB_LXNOR). The old names are listed
// below alongside each new name; the new GrB_* names are preferred.
// 12 kinds of GrB_* semirings are available for all 10 real non-boolean types:
// PLUS_TIMES, PLUS_MIN,
// MIN_PLUS, MIN_TIMES, MIN_FIRST, MIN_SECOND, MIN_MAX,
// MAX_PLUS, MAX_TIMES, MAX_FIRST, MAX_SECOND, MAX_MIN
// and 4 semirings for boolean only:
// LOR_LAND, LAND_LOR, LXOR_LAND, LXNOR_LOR.
// GxB_* semirings corresponding to the equivalent GrB_* semiring are
// historical.
GB_PUBLIC GrB_Semiring
//--------------------------------------------------------------------------
// 20 semirings with PLUS monoids
//--------------------------------------------------------------------------
// PLUS_TIMES semirings for all 10 real, non-boolean types:
GrB_PLUS_TIMES_SEMIRING_INT8, // GxB_PLUS_TIMES_INT8
GrB_PLUS_TIMES_SEMIRING_INT16, // GxB_PLUS_TIMES_INT16
GrB_PLUS_TIMES_SEMIRING_INT32, // GxB_PLUS_TIMES_INT32
GrB_PLUS_TIMES_SEMIRING_INT64, // GxB_PLUS_TIMES_INT64
GrB_PLUS_TIMES_SEMIRING_UINT8, // GxB_PLUS_TIMES_UINT8
GrB_PLUS_TIMES_SEMIRING_UINT16, // GxB_PLUS_TIMES_UINT16
GrB_PLUS_TIMES_SEMIRING_UINT32, // GxB_PLUS_TIMES_UINT32
GrB_PLUS_TIMES_SEMIRING_UINT64, // GxB_PLUS_TIMES_UINT64
GrB_PLUS_TIMES_SEMIRING_FP32, // GxB_PLUS_TIMES_FP32
GrB_PLUS_TIMES_SEMIRING_FP64, // GxB_PLUS_TIMES_FP64
// PLUS_MIN semirings for all 10 real, non-boolean types:
GrB_PLUS_MIN_SEMIRING_INT8, // GxB_PLUS_MIN_INT8
GrB_PLUS_MIN_SEMIRING_INT16, // GxB_PLUS_MIN_INT16
GrB_PLUS_MIN_SEMIRING_INT32, // GxB_PLUS_MIN_INT32
GrB_PLUS_MIN_SEMIRING_INT64, // GxB_PLUS_MIN_INT64
GrB_PLUS_MIN_SEMIRING_UINT8, // GxB_PLUS_MIN_UINT8
GrB_PLUS_MIN_SEMIRING_UINT16, // GxB_PLUS_MIN_UINT16
GrB_PLUS_MIN_SEMIRING_UINT32, // GxB_PLUS_MIN_UINT32
GrB_PLUS_MIN_SEMIRING_UINT64, // GxB_PLUS_MIN_UINT64
GrB_PLUS_MIN_SEMIRING_FP32, // GxB_PLUS_MIN_FP32
GrB_PLUS_MIN_SEMIRING_FP64, // GxB_PLUS_MIN_FP64
//--------------------------------------------------------------------------
// 50 semirings with MIN monoids
//--------------------------------------------------------------------------
// MIN_PLUS semirings for all 10 real, non-boolean types:
GrB_MIN_PLUS_SEMIRING_INT8, // GxB_MIN_PLUS_INT8
GrB_MIN_PLUS_SEMIRING_INT16, // GxB_MIN_PLUS_INT16
GrB_MIN_PLUS_SEMIRING_INT32, // GxB_MIN_PLUS_INT32
GrB_MIN_PLUS_SEMIRING_INT64, // GxB_MIN_PLUS_INT64
GrB_MIN_PLUS_SEMIRING_UINT8, // GxB_MIN_PLUS_UINT8
GrB_MIN_PLUS_SEMIRING_UINT16, // GxB_MIN_PLUS_UINT16
GrB_MIN_PLUS_SEMIRING_UINT32, // GxB_MIN_PLUS_UINT32
GrB_MIN_PLUS_SEMIRING_UINT64, // GxB_MIN_PLUS_UINT64
GrB_MIN_PLUS_SEMIRING_FP32, // GxB_MIN_PLUS_FP32
GrB_MIN_PLUS_SEMIRING_FP64, // GxB_MIN_PLUS_FP64
// MIN_TIMES semirings for all 10 real, non-boolean types:
GrB_MIN_TIMES_SEMIRING_INT8, // GxB_MIN_TIMES_INT8
GrB_MIN_TIMES_SEMIRING_INT16, // GxB_MIN_TIMES_INT16
GrB_MIN_TIMES_SEMIRING_INT32, // GxB_MIN_TIMES_INT32
GrB_MIN_TIMES_SEMIRING_INT64, // GxB_MIN_TIMES_INT64
GrB_MIN_TIMES_SEMIRING_UINT8, // GxB_MIN_TIMES_UINT8
GrB_MIN_TIMES_SEMIRING_UINT16, // GxB_MIN_TIMES_UINT16
GrB_MIN_TIMES_SEMIRING_UINT32, // GxB_MIN_TIMES_UINT32
GrB_MIN_TIMES_SEMIRING_UINT64, // GxB_MIN_TIMES_UINT64
GrB_MIN_TIMES_SEMIRING_FP32, // GxB_MIN_TIMES_FP32
GrB_MIN_TIMES_SEMIRING_FP64, // GxB_MIN_TIMES_FP64
// MIN_FIRST semirings for all 10 real, non-boolean types:
GrB_MIN_FIRST_SEMIRING_INT8, // GxB_MIN_FIRST_INT8
GrB_MIN_FIRST_SEMIRING_INT16, // GxB_MIN_FIRST_INT16
GrB_MIN_FIRST_SEMIRING_INT32, // GxB_MIN_FIRST_INT32
GrB_MIN_FIRST_SEMIRING_INT64, // GxB_MIN_FIRST_INT64
GrB_MIN_FIRST_SEMIRING_UINT8, // GxB_MIN_FIRST_UINT8
GrB_MIN_FIRST_SEMIRING_UINT16, // GxB_MIN_FIRST_UINT16
GrB_MIN_FIRST_SEMIRING_UINT32, // GxB_MIN_FIRST_UINT32
GrB_MIN_FIRST_SEMIRING_UINT64, // GxB_MIN_FIRST_UINT64
GrB_MIN_FIRST_SEMIRING_FP32, // GxB_MIN_FIRST_FP32
GrB_MIN_FIRST_SEMIRING_FP64, // GxB_MIN_FIRST_FP64
// MIN_SECOND semirings for all 10 real, non-boolean types:
GrB_MIN_SECOND_SEMIRING_INT8, // GxB_MIN_SECOND_INT8
GrB_MIN_SECOND_SEMIRING_INT16, // GxB_MIN_SECOND_INT16
GrB_MIN_SECOND_SEMIRING_INT32, // GxB_MIN_SECOND_INT32
GrB_MIN_SECOND_SEMIRING_INT64, // GxB_MIN_SECOND_INT64
GrB_MIN_SECOND_SEMIRING_UINT8, // GxB_MIN_SECOND_UINT8
GrB_MIN_SECOND_SEMIRING_UINT16, // GxB_MIN_SECOND_UINT16
GrB_MIN_SECOND_SEMIRING_UINT32, // GxB_MIN_SECOND_UINT32
GrB_MIN_SECOND_SEMIRING_UINT64, // GxB_MIN_SECOND_UINT64
GrB_MIN_SECOND_SEMIRING_FP32, // GxB_MIN_SECOND_FP32
GrB_MIN_SECOND_SEMIRING_FP64, // GxB_MIN_SECOND_FP64
// MIN_MAX semirings for all 10 real, non-boolean types:
GrB_MIN_MAX_SEMIRING_INT8, // GxB_MIN_MAX_INT8
GrB_MIN_MAX_SEMIRING_INT16, // GxB_MIN_MAX_INT16
GrB_MIN_MAX_SEMIRING_INT32, // GxB_MIN_MAX_INT32
GrB_MIN_MAX_SEMIRING_INT64, // GxB_MIN_MAX_INT64
GrB_MIN_MAX_SEMIRING_UINT8, // GxB_MIN_MAX_UINT8
GrB_MIN_MAX_SEMIRING_UINT16, // GxB_MIN_MAX_UINT16
GrB_MIN_MAX_SEMIRING_UINT32, // GxB_MIN_MAX_UINT32
GrB_MIN_MAX_SEMIRING_UINT64, // GxB_MIN_MAX_UINT64
GrB_MIN_MAX_SEMIRING_FP32, // GxB_MIN_MAX_FP32
GrB_MIN_MAX_SEMIRING_FP64, // GxB_MIN_MAX_FP64
//--------------------------------------------------------------------------
// 50 semirings with MAX monoids
//--------------------------------------------------------------------------
// MAX_PLUS semirings for all 10 real, non-boolean types
GrB_MAX_PLUS_SEMIRING_INT8, // GxB_MAX_PLUS_INT8
GrB_MAX_PLUS_SEMIRING_INT16, // GxB_MAX_PLUS_INT16
GrB_MAX_PLUS_SEMIRING_INT32, // GxB_MAX_PLUS_INT32
GrB_MAX_PLUS_SEMIRING_INT64, // GxB_MAX_PLUS_INT64
GrB_MAX_PLUS_SEMIRING_UINT8, // GxB_MAX_PLUS_UINT8
GrB_MAX_PLUS_SEMIRING_UINT16, // GxB_MAX_PLUS_UINT16
GrB_MAX_PLUS_SEMIRING_UINT32, // GxB_MAX_PLUS_UINT32
GrB_MAX_PLUS_SEMIRING_UINT64, // GxB_MAX_PLUS_UINT64
GrB_MAX_PLUS_SEMIRING_FP32, // GxB_MAX_PLUS_FP32
GrB_MAX_PLUS_SEMIRING_FP64, // GxB_MAX_PLUS_FP64
// MAX_TIMES semirings for all 10 real, non-boolean types:
GrB_MAX_TIMES_SEMIRING_INT8, // GxB_MAX_TIMES_INT8
GrB_MAX_TIMES_SEMIRING_INT16, // GxB_MAX_TIMES_INT16
GrB_MAX_TIMES_SEMIRING_INT32, // GxB_MAX_TIMES_INT32
GrB_MAX_TIMES_SEMIRING_INT64, // GxB_MAX_TIMES_INT64
GrB_MAX_TIMES_SEMIRING_UINT8, // GxB_MAX_TIMES_UINT8
GrB_MAX_TIMES_SEMIRING_UINT16, // GxB_MAX_TIMES_UINT16
GrB_MAX_TIMES_SEMIRING_UINT32, // GxB_MAX_TIMES_UINT32
GrB_MAX_TIMES_SEMIRING_UINT64, // GxB_MAX_TIMES_UINT64
GrB_MAX_TIMES_SEMIRING_FP32, // GxB_MAX_TIMES_FP32
GrB_MAX_TIMES_SEMIRING_FP64, // GxB_MAX_TIMES_FP64
// MAX_FIRST semirings for all 10 real, non-boolean types:
GrB_MAX_FIRST_SEMIRING_INT8, // GxB_MAX_FIRST_INT8
GrB_MAX_FIRST_SEMIRING_INT16, // GxB_MAX_FIRST_INT16
GrB_MAX_FIRST_SEMIRING_INT32, // GxB_MAX_FIRST_INT32
GrB_MAX_FIRST_SEMIRING_INT64, // GxB_MAX_FIRST_INT64
GrB_MAX_FIRST_SEMIRING_UINT8, // GxB_MAX_FIRST_UINT8
GrB_MAX_FIRST_SEMIRING_UINT16, // GxB_MAX_FIRST_UINT16
GrB_MAX_FIRST_SEMIRING_UINT32, // GxB_MAX_FIRST_UINT32
GrB_MAX_FIRST_SEMIRING_UINT64, // GxB_MAX_FIRST_UINT64
GrB_MAX_FIRST_SEMIRING_FP32, // GxB_MAX_FIRST_FP32
GrB_MAX_FIRST_SEMIRING_FP64, // GxB_MAX_FIRST_FP64
// MAX_SECOND semirings for all 10 real, non-boolean types:
GrB_MAX_SECOND_SEMIRING_INT8, // GxB_MAX_SECOND_INT8
GrB_MAX_SECOND_SEMIRING_INT16, // GxB_MAX_SECOND_INT16
GrB_MAX_SECOND_SEMIRING_INT32, // GxB_MAX_SECOND_INT32
GrB_MAX_SECOND_SEMIRING_INT64, // GxB_MAX_SECOND_INT64
GrB_MAX_SECOND_SEMIRING_UINT8, // GxB_MAX_SECOND_UINT8
GrB_MAX_SECOND_SEMIRING_UINT16, // GxB_MAX_SECOND_UINT16
GrB_MAX_SECOND_SEMIRING_UINT32, // GxB_MAX_SECOND_UINT32
GrB_MAX_SECOND_SEMIRING_UINT64, // GxB_MAX_SECOND_UINT64
GrB_MAX_SECOND_SEMIRING_FP32, // GxB_MAX_SECOND_FP32
GrB_MAX_SECOND_SEMIRING_FP64, // GxB_MAX_SECOND_FP64
// MAX_MIN semirings for all 10 real, non-boolean types:
GrB_MAX_MIN_SEMIRING_INT8, // GxB_MAX_MIN_INT8
GrB_MAX_MIN_SEMIRING_INT16, // GxB_MAX_MIN_INT16
GrB_MAX_MIN_SEMIRING_INT32, // GxB_MAX_MIN_INT32
GrB_MAX_MIN_SEMIRING_INT64, // GxB_MAX_MIN_INT64
GrB_MAX_MIN_SEMIRING_UINT8, // GxB_MAX_MIN_UINT8
GrB_MAX_MIN_SEMIRING_UINT16, // GxB_MAX_MIN_UINT16
GrB_MAX_MIN_SEMIRING_UINT32, // GxB_MAX_MIN_UINT32
GrB_MAX_MIN_SEMIRING_UINT64, // GxB_MAX_MIN_UINT64
GrB_MAX_MIN_SEMIRING_FP32, // GxB_MAX_MIN_FP32
GrB_MAX_MIN_SEMIRING_FP64, // GxB_MAX_MIN_FP64
//--------------------------------------------------------------------------
// 4 boolean semirings:
//--------------------------------------------------------------------------
GrB_LOR_LAND_SEMIRING_BOOL, // GxB_LOR_LAND_BOOL
GrB_LAND_LOR_SEMIRING_BOOL, // GxB_LAND_LOR_BOOL
GrB_LXOR_LAND_SEMIRING_BOOL, // GxB_LXOR_LAND_BOOL
GrB_LXNOR_LOR_SEMIRING_BOOL ; // GxB_EQ_LOR_BOOL (note EQ == LXNOR)
//==============================================================================
// GrB_*_resize: change the size of a matrix or vector
//==============================================================================
// If the dimensions decrease, entries that fall outside the resized matrix or
// vector are deleted.
GB_PUBLIC
GrB_Info GrB_Matrix_resize // change the size of a matrix
(
GrB_Matrix C, // matrix to modify
GrB_Index nrows_new, // new number of rows in matrix
GrB_Index ncols_new // new number of columns in matrix
) ;
GB_PUBLIC
GrB_Info GrB_Vector_resize // change the size of a vector
(
GrB_Vector w, // vector to modify
GrB_Index nrows_new // new number of rows in vector
) ;
// GxB_*_resize are identical to the GrB_*resize methods above
GB_PUBLIC
GrB_Info GxB_Matrix_resize // change the size of a matrix (historical)
(
GrB_Matrix C, // matrix to modify
GrB_Index nrows_new, // new number of rows in matrix
GrB_Index ncols_new // new number of columns in matrix
) ;
GB_PUBLIC
GrB_Info GxB_Vector_resize // change the size of a vector (historical)
(
GrB_Vector w, // vector to modify
GrB_Index nrows_new // new number of rows in vector
) ;
// GxB_resize is a generic function for resizing a matrix or vector:
// GrB_Vector_resize (u,nrows_new)
// GrB_Matrix_resize (A,nrows_new,ncols_new)
#if GxB_STDC_VERSION >= 201112L
#define GxB_resize(arg1,...) \
_Generic \
( \
(arg1), \
GrB_Vector : GrB_Vector_resize , \
GrB_Matrix : GrB_Matrix_resize \
) \
(arg1, __VA_ARGS__)
#endif
//------------------------------------------------------------------------------
// GxB_Matrix_Pending: Checks to see if matrix has pending operations
//------------------------------------------------------------------------------
GrB_Info GxB_Matrix_Pending
(
GrB_Matrix A, // matrix to query
bool *pending // are there any pending operations
) ;
//==============================================================================
// GxB_fprint and GxB_print: print the contents of a GraphBLAS object
//==============================================================================
// GxB_fprint (object, GxB_Print_Level pr, FILE *f) prints the contents of any
// of the 9 GraphBLAS objects to the file f, and also does an extensive test on
// the object to determine if it is valid. It returns one of the following
// error conditions:
//
// GrB_SUCCESS object is valid
// GrB_UNINITIALIZED_OBJECT object is not initialized
// GrB_INVALID_OBJECT object is not valid
// GrB_NULL_POINTER object is a NULL pointer
// GrB_INVALID_VALUE fprintf returned an I/O error; see the ANSI C
// errno or GrB_error( )for details.
//
// GxB_fprint does not modify the status of any object. If a matrix or vector
// has not been completed, the pending computations are guaranteed to *not* be
// performed by GxB_fprint. The reason is simple. It is possible for a bug in
// the user application (such as accessing memory outside the bounds of an
// array) to mangle the internal content of a GraphBLAS object, and GxB_fprint
// can be a helpful tool to track down this bug. If GxB_fprint attempted to
// complete any computations prior to printing or checking the contents of the
// matrix or vector, then further errors could occur, including a segfault.
//
// The type-specific functions include an additional argument, the name string.
// The name is printed at the beginning of the display (assuming pr is not
// GxB_SILENT) so that the object can be more easily identified in the output.
// For the type-generic methods GxB_fprint and GxB_print, the name string is
// the variable name of the object itself.
//
// If f is NULL, stdout is used; this is not an error condition. If pr is
// outside the bounds 0 to 3, negative values are treated as GxB_SILENT, and
// values > 3 are treated as GxB_COMPLETE. If name is NULL, it is treated as
// the empty string.
//
// GxB_print (object, GxB_Print_Level pr) is the same as GxB_fprint, except
// that it prints the contents with printf instead of fprintf to a file f.
//
// The exact content and format of what is printed is implementation-dependent,
// and will change from version to version of SuiteSparse:GraphBLAS. Do not
// attempt to rely on the exact content or format by trying to parse the
// resulting output via another program. The intent of these functions is to
// produce a report of the object for visual inspection.
typedef enum
{
GxB_SILENT = 0, // nothing is printed, just check the object
GxB_SUMMARY = 1, // print a terse summary
GxB_SHORT = 2, // short description, about 30 entries of a matrix
GxB_COMPLETE = 3, // print the entire contents of the object
GxB_SHORT_VERBOSE = 4, // GxB_SHORT but with "%.15g" for doubles
GxB_COMPLETE_VERBOSE = 5 // GxB_COMPLETE but with "%.15g" for doubles
}
GxB_Print_Level ;
GB_PUBLIC
GrB_Info GxB_Type_fprint // print and check a GrB_Type
(
GrB_Type type, // object to print and check
const char *name, // name of the object
GxB_Print_Level pr, // print level
FILE *f // file for output
) ;
GB_PUBLIC
GrB_Info GxB_UnaryOp_fprint // print and check a GrB_UnaryOp
(
GrB_UnaryOp unaryop, // object to print and check
const char *name, // name of the object
GxB_Print_Level pr, // print level
FILE *f // file for output
) ;
GB_PUBLIC
GrB_Info GxB_BinaryOp_fprint // print and check a GrB_BinaryOp
(
GrB_BinaryOp binaryop, // object to print and check
const char *name, // name of the object
GxB_Print_Level pr, // print level
FILE *f // file for output
) ;
GB_PUBLIC
GrB_Info GxB_IndexUnaryOp_fprint // print and check a GrB_IndexUnaryOp
(
GrB_IndexUnaryOp op, // object to print and check
const char *name, // name of the object
GxB_Print_Level pr, // print level
FILE *f // file for output
) ;
GB_PUBLIC
GrB_Info GxB_SelectOp_fprint // print and check a GxB_SelectOp
(
GxB_SelectOp selectop, // object to print and check
const char *name, // name of the object
GxB_Print_Level pr, // print level
FILE *f // file for output
) ;
GB_PUBLIC
GrB_Info GxB_Monoid_fprint // print and check a GrB_Monoid
(
GrB_Monoid monoid, // object to print and check
const char *name, // name of the object
GxB_Print_Level pr, // print level
FILE *f // file for output
) ;
GB_PUBLIC
GrB_Info GxB_Semiring_fprint // print and check a GrB_Semiring
(
GrB_Semiring semiring, // object to print and check
const char *name, // name of the object
GxB_Print_Level pr, // print level
FILE *f // file for output
) ;
GB_PUBLIC
GrB_Info GxB_Descriptor_fprint // print and check a GrB_Descriptor
(
GrB_Descriptor descriptor, // object to print and check
const char *name, // name of the object
GxB_Print_Level pr, // print level
FILE *f // file for output
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_fprint // print and check a GrB_Matrix
(
GrB_Matrix A, // object to print and check
const char *name, // name of the object
GxB_Print_Level pr, // print level
FILE *f // file for output
) ;
GB_PUBLIC
GrB_Info GxB_Vector_fprint // print and check a GrB_Vector
(
GrB_Vector v, // object to print and check
const char *name, // name of the object
GxB_Print_Level pr, // print level
FILE *f // file for output
) ;
GB_PUBLIC
GrB_Info GxB_Scalar_fprint // print and check a GrB_Scalar
(
GrB_Scalar s, // object to print and check
const char *name, // name of the object
GxB_Print_Level pr, // print level
FILE *f // file for output
) ;
#if GxB_STDC_VERSION >= 201112L
#define GxB_fprint(object,pr,f) \
_Generic \
( \
(object), \
const GrB_Type : GxB_Type_fprint , \
GrB_Type : GxB_Type_fprint , \
const GrB_UnaryOp : GxB_UnaryOp_fprint , \
GrB_UnaryOp : GxB_UnaryOp_fprint , \
const GrB_BinaryOp : GxB_BinaryOp_fprint , \
GrB_BinaryOp : GxB_BinaryOp_fprint , \
const GrB_IndexUnaryOp : GxB_IndexUnaryOp_fprint , \
GrB_IndexUnaryOp : GxB_IndexUnaryOp_fprint , \
const GxB_SelectOp : GxB_SelectOp_fprint , \
GxB_SelectOp : GxB_SelectOp_fprint , \
const GrB_Monoid : GxB_Monoid_fprint , \
GrB_Monoid : GxB_Monoid_fprint , \
const GrB_Semiring : GxB_Semiring_fprint , \
GrB_Semiring : GxB_Semiring_fprint , \
const GrB_Scalar : GxB_Scalar_fprint , \
GrB_Scalar : GxB_Scalar_fprint , \
const GrB_Vector : GxB_Vector_fprint , \
GrB_Vector : GxB_Vector_fprint , \
const GrB_Matrix : GxB_Matrix_fprint , \
GrB_Matrix : GxB_Matrix_fprint , \
const GrB_Descriptor : GxB_Descriptor_fprint , \
GrB_Descriptor : GxB_Descriptor_fprint \
) \
(object, GB_STR(object), pr, f)
#define GxB_print(object,pr) GxB_fprint(object,pr,NULL)
#endif
//==============================================================================
// Matrix and vector import/export/pack/unpack
//==============================================================================
// The import/export/pack/unpack functions allow the user application to create
// a GrB_Matrix or GrB_Vector object, and to extract its contents, faster and
// with less memory overhead than the GrB_*_build and GrB_*_extractTuples
// functions.
// The semantics of import/export/pack/unpack are the same as the "move
// constructor" in C++. On import, the user provides a set of arrays that have
// been previously allocated via the ANSI C malloc function. The arrays define
// the content of the matrix or vector. Unlike GrB_*_build, the GraphBLAS
// library then takes ownership of the user's input arrays and may either (a)
// incorporate them into its internal data structure for the new GrB_Matrix or
// GrB_Vector, potentially creating the GrB_Matrix or GrB_Vector in constant
// time with no memory copying performed, or (b) if the library does not
// support the import format directly, then it may convert the input to its
// internal format, and then free the user's input arrays. GraphBLAS may also
// choose to use a mix of the two strategies. In either case, the input arrays
// are no longer "owned" by the user application. If A is a GrB_Matrix created
// by an import/pack, the user input arrays are freed no later than GrB_free
// (&A), and may be freed earlier, at the discretion of the GraphBLAS library.
// The data structure of the GrB_Matrix and GrB_Vector remain opaque.
// The export/unpack of a GrB_Matrix or GrB_Vector is symmetric with the import
// operation. The export is destructive, where the GrB_Matrix or GrB_Vector no
// longer exists when the export completes. The GrB_Matrix or GrB_Vector
// exists after an unpack operation, just with no entries. In both export and
// unpack, the user is returned several arrays that contain the matrix or
// vector in the requested format. Ownership of these arrays is given to the
// user application, which is then responsible for freeing them via the ANSI C
// free function. If the output format is supported by the GraphBLAS library,
// then these arrays may be returned to the user application in O(1) time and
// with no memory copying performed. Otherwise, the GraphBLAS library will
// create the output arrays for the user (via the ANSI C malloc function), fill
// them with the GrB_Matrix or GrB_Vector data, and then return the newly
// allocated arrays to the user.
// Eight different formats are provided for import/export. For each format,
// the Ax array has a C-type <type> corresponding to one of the 13 built-in
// types in GraphBLAS (bool, int*_t, uint*_t, float, double, float complex, or
// double complex), or a user-defined type.
// On import/pack, the required user arrays Ah, Ap, Ab, Ai, Aj, and/or Ax must
// be non-NULL pointers to memory space allocated by the ANSI C malloc (or
// calloc, or realloc), unless nzmax is zero (in which case the Ab, Ai, Aj, Ax,
// vb, vi, and vx arrays may all be NULL). For the import, A (or GrB_Vector v)
// is undefined on input, just like GrB_*_new, the GrB_Matrix. If the import
// is successful, the GrB_Matrix A or GrB_Vector v is created, and the pointers
// to the user input arrays have been set to NULL. These user arrays have
// either been incorporated directly into the GrB_Matrix A or GrB_Vector v, in
// which case the user input arrays will eventually be freed by GrB_free (&A),
// or their contents have been copied and the arrays freed. This decision is
// made by the GraphBLAS library itself, and the user application has no
// control over this decision.
// If any of the arrays Ab, Aj, Ai, Ax, vb, vi, or vx have zero size (with
// nzmax of zero), they are allowed to be be NULL pointers on input.
// A matrix or vector may be "iso", where all entries present in the pattern
// have the same value. In this case, the boolean iso flag is true, and the
// corresponding numerical array (Ax for matrices, vx for vectors, below) need
// be only large enough to hold a single value.
// No error checking is performed on the content of the user input arrays. If
// the user input arrays do not conform to the precise specifications above,
// results are undefined. No typecasting of the values of the matrix or vector
// entries is performed on import or export.
// SuiteSparse:GraphBLAS supports all eight formats natively (CSR, CSC,
// HyperCSR, and HyperCSC, BitmapR, BitmapC, FullR, FullC). For vectors, only
// CSC, BitmapC, and FullC formats are used. On import, the all eight formats
// take O(1) time and memory to import. On export, if the GrB_Matrix or
// GrB_Vector is already in this particular format, then the export takes O(1)
// time and no memory copying is performed.
// If the import is not successful, the GxB_Matrix_import_* functions return A
// as NULL, GxB_Vector_import returns v as NULL, and the user input arrays are
// neither modified nor freed. They are still owned by the user application.
// If the input data is untrusted, use the following descriptor setting for
// GxB_Matrix_import* and GxB_Matrix_pack*. The import/pack will be slower,
// but secure. GrB_Matrix_import uses the slow, secure method, since it has
// no descriptor input.
//
// GxB_set (desc, GxB_IMPORT, GxB_SECURE_IMPORT) ;
// As of v5.2.0, GxB_*import* and GxB_*export* are declared historical. Use
// GxB_*pack* and GxB_*unpack* instead. The GxB import/export will be kept
// but only documented here, not in the User Guide.
//------------------------------------------------------------------------------
// GxB_Matrix_pack_CSR: pack a CSR matrix
//------------------------------------------------------------------------------
GB_PUBLIC
GrB_Info GxB_Matrix_import_CSR // historical: use GxB_Matrix_pack_CSR
(
GrB_Matrix *A, // handle of matrix to create
GrB_Type type, // type of matrix to create
GrB_Index nrows, // number of rows of the matrix
GrB_Index ncols, // number of columns of the matrix
GrB_Index **Ap, // row "pointers", Ap_size >= (nrows+1)* sizeof(int64_t)
GrB_Index **Aj, // column indices, Aj_size >= nvals(A) * sizeof(int64_t)
void **Ax, // values, Ax_size >= nvals(A) * (type size)
// or Ax_size >= (type size), if iso is true
GrB_Index Ap_size, // size of Ap in bytes
GrB_Index Aj_size, // size of Aj in bytes
GrB_Index Ax_size, // size of Ax in bytes
bool iso, // if true, A is iso
bool jumbled, // if true, indices in each row may be unsorted
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_pack_CSR // pack a CSR matrix
(
GrB_Matrix A, // matrix to create (type, nrows, ncols unchanged)
GrB_Index **Ap, // row "pointers", Ap_size >= (nrows+1)* sizeof(int64_t)
GrB_Index **Aj, // column indices, Aj_size >= nvals(A) * sizeof(int64_t)
void **Ax, // values, Ax_size >= nvals(A) * (type size)
// or Ax_size >= (type size), if iso is true
GrB_Index Ap_size, // size of Ap in bytes
GrB_Index Aj_size, // size of Aj in bytes
GrB_Index Ax_size, // size of Ax in bytes
bool iso, // if true, A is iso
bool jumbled, // if true, indices in each row may be unsorted
const GrB_Descriptor desc
) ;
// CSR: an nrows-by-ncols matrix with nvals entries in CSR format consists
// of 3 arrays, where nvals = Ap [nrows]:
//
// GrB_Index Ap [nrows+1], Aj [nvals] ; <type> Ax [nvals] ;
//
// The column indices of entries in the ith row of the matrix are held
// in Aj [Ap [i] ... Ap[i+1]], and the corresponding values are held
// in the same positions in Ax. Column indices must be in the range 0
// to ncols-1. If jumbled is false, the column indices must appear in
// sorted order within each row. No duplicate column indices may
// appear in any row. Ap [0] must equal zero, and Ap [nrows] must
// equal nvals. The Ap array must be of size nrows+1 (or larger), and
// the Aj and Ax arrays must have size at least nvals. If nvals is
// zero, then the Aj and Ax arrays need not be present and can be
// NULL.
//------------------------------------------------------------------------------
// GxB_Matrix_pack_CSC: pack a CSC matrix
//------------------------------------------------------------------------------
GB_PUBLIC
GrB_Info GxB_Matrix_import_CSC // historical: use GxB_Matrix_pack_CSC
(
GrB_Matrix *A, // handle of matrix to create
GrB_Type type, // type of matrix to create
GrB_Index nrows, // number of rows of the matrix
GrB_Index ncols, // number of columns of the matrix
GrB_Index **Ap, // col "pointers", Ap_size >= (ncols+1)*sizeof(int64_t)
GrB_Index **Ai, // row indices, Ai_size >= nvals(A)*sizeof(int64_t)
void **Ax, // values, Ax_size >= nvals(A) * (type size)
// or Ax_size >= (type size), if iso is true
GrB_Index Ap_size, // size of Ap in bytes
GrB_Index Ai_size, // size of Ai in bytes
GrB_Index Ax_size, // size of Ax in bytes
bool iso, // if true, A is iso
bool jumbled, // if true, indices in each column may be unsorted
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_pack_CSC // pack a CSC matrix
(
GrB_Matrix A, // matrix to create (type, nrows, ncols unchanged)
GrB_Index **Ap, // col "pointers", Ap_size >= (ncols+1)*sizeof(int64_t)
GrB_Index **Ai, // row indices, Ai_size >= nvals(A)*sizeof(int64_t)
void **Ax, // values, Ax_size >= nvals(A) * (type size)
// or Ax_size >= (type size), if iso is true
GrB_Index Ap_size, // size of Ap in bytes
GrB_Index Ai_size, // size of Ai in bytes
GrB_Index Ax_size, // size of Ax in bytes
bool iso, // if true, A is iso
bool jumbled, // if true, indices in each column may be unsorted
const GrB_Descriptor desc
) ;
// CSC: an nrows-by-ncols matrix with nvals entries in CSC format consists
// of 3 arrays, where nvals = Ap [ncols]:
//
// GrB_Index Ap [ncols+1], Ai [nvals] ; <type> Ax [nvals] ;
//
// The row indices of entries in the jth column of the matrix are held
// in Ai [Ap [j] ... Ap[j+1]], and the corresponding values are held
// in the same positions in Ax. Row indices must be in the range 0 to
// nrows-1. If jumbled is false, the row indices must appear in
// sorted order within each column. No duplicate row indices may
// appear in any column. Ap [0] must equal zero, and Ap [ncols] must
// equal nvals. The Ap array must be of size ncols+1 (or larger), and
// the Ai and Ax arrays must have size at least nvals. If nvals is
// zero, then the Ai and Ax arrays need not be present and can be
// NULL.
//------------------------------------------------------------------------------
// GxB_Matrix_pack_HyperCSR: pack a hypersparse CSR matrix
//------------------------------------------------------------------------------
GB_PUBLIC
GrB_Info GxB_Matrix_import_HyperCSR // historical: use GxB_Matrix_pack_HyperCSR
(
GrB_Matrix *A, // handle of matrix to create
GrB_Type type, // type of matrix to create
GrB_Index nrows, // number of rows of the matrix
GrB_Index ncols, // number of columns of the matrix
GrB_Index **Ap, // row "pointers", Ap_size >= (nvec+1)*sizeof(int64_t)
GrB_Index **Ah, // row indices, Ah_size >= nvec*sizeof(int64_t)
GrB_Index **Aj, // column indices, Aj_size >= nvals(A)*sizeof(int64_t)
void **Ax, // values, Ax_size >= nvals(A) * (type size)
// or Ax_size >= (type size), if iso is true
GrB_Index Ap_size, // size of Ap in bytes
GrB_Index Ah_size, // size of Ah in bytes
GrB_Index Aj_size, // size of Aj in bytes
GrB_Index Ax_size, // size of Ax in bytes
bool iso, // if true, A is iso
GrB_Index nvec, // number of rows that appear in Ah
bool jumbled, // if true, indices in each row may be unsorted
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_pack_HyperCSR // pack a hypersparse CSR matrix
(
GrB_Matrix A, // matrix to create (type, nrows, ncols unchanged)
GrB_Index **Ap, // row "pointers", Ap_size >= (nvec+1)*sizeof(int64_t)
GrB_Index **Ah, // row indices, Ah_size >= nvec*sizeof(int64_t)
GrB_Index **Aj, // column indices, Aj_size >= nvals(A)*sizeof(int64_t)
void **Ax, // values, Ax_size >= nvals(A) * (type size)
// or Ax_size >= (type size), if iso is true
GrB_Index Ap_size, // size of Ap in bytes
GrB_Index Ah_size, // size of Ah in bytes
GrB_Index Aj_size, // size of Aj in bytes
GrB_Index Ax_size, // size of Ax in bytes
bool iso, // if true, A is iso
GrB_Index nvec, // number of rows that appear in Ah
bool jumbled, // if true, indices in each row may be unsorted
const GrB_Descriptor desc
) ;
// HyperCSR: an nrows-by-ncols matrix with nvals entries and nvec
// rows that may have entries in HyperCSR format consists of 4 arrays,
// where nvals = Ap [nvec]:
//
// GrB_Index Ah [nvec], Ap [nvec+1], Aj [nvals] ;
// <type> Ax [nvals] ;
//
// The Aj and Ax arrays are the same for a matrix in CSR or HyperCSR
// format. Only Ap and Ah differ.
//
// The Ah array is a list of the row indices of rows that appear in
// the matrix. It
// must appear in sorted order, and no duplicates may appear. If i =
// Ah [k] is the kth row, then the column indices of the ith
// row appear in Aj [Ap [k] ... Ap [k+1]], and the corresponding
// values appear in the same locations in Ax. Column indices must be
// in the range 0 to ncols-1, and must appear in sorted order within
// each row. No duplicate column indices may appear in any row. nvec
// may be zero, to denote an array with no entries. The Ah array must
// be of size at least nvec, Ap must be of size at least nvec+1, and
// Aj and Ax must be at least of size nvals. If nvals is zero, then
// the Aj and Ax arrays need not be present and can be NULL.
//------------------------------------------------------------------------------
// GxB_Matrix_pack_HyperCSC: pack a hypersparse CSC matrix
//------------------------------------------------------------------------------
GB_PUBLIC
GrB_Info GxB_Matrix_import_HyperCSC // historical: use GxB_Matrix_pack_HyperCSC
(
GrB_Matrix *A, // handle of matrix to create
GrB_Type type, // type of matrix to create
GrB_Index nrows, // number of rows of the matrix
GrB_Index ncols, // number of columns of the matrix
GrB_Index **Ap, // col "pointers", Ap_size >= (nvec+1)*sizeof(int64_t)
GrB_Index **Ah, // column indices, Ah_size >= nvec*sizeof(int64_t)
GrB_Index **Ai, // row indices, Ai_size >= nvals(A)*sizeof(int64_t)
void **Ax, // values, Ax_size >= nvals(A)*(type size)
// or Ax_size >= (type size), if iso is true
GrB_Index Ap_size, // size of Ap in bytes
GrB_Index Ah_size, // size of Ah in bytes
GrB_Index Ai_size, // size of Ai in bytes
GrB_Index Ax_size, // size of Ax in bytes
bool iso, // if true, A is iso
GrB_Index nvec, // number of columns that appear in Ah
bool jumbled, // if true, indices in each column may be unsorted
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_pack_HyperCSC // pack a hypersparse CSC matrix
(
GrB_Matrix A, // matrix to create (type, nrows, ncols unchanged)
GrB_Index **Ap, // col "pointers", Ap_size >= (nvec+1)*sizeof(int64_t)
GrB_Index **Ah, // column indices, Ah_size >= nvec*sizeof(int64_t)
GrB_Index **Ai, // row indices, Ai_size >= nvals(A)*sizeof(int64_t)
void **Ax, // values, Ax_size >= nvals(A)*(type size)
// or Ax_size >= (type size), if iso is true
GrB_Index Ap_size, // size of Ap in bytes
GrB_Index Ah_size, // size of Ah in bytes
GrB_Index Ai_size, // size of Ai in bytes
GrB_Index Ax_size, // size of Ax in bytes
bool iso, // if true, A is iso
GrB_Index nvec, // number of columns that appear in Ah
bool jumbled, // if true, indices in each column may be unsorted
const GrB_Descriptor desc
) ;
// HyperCSC: an nrows-by-ncols matrix with nvals entries and nvec
// columns that may have entries in HyperCSC format consists of 4 arrays,
// where nvals = Ap [nvec]:
//
//
// GrB_Index Ah [nvec], Ap [nvec+1], Ai [nvals] ;
// <type> Ax [nvals] ;
//
// The Ai and Ax arrays are the same for a matrix in CSC or HyperCSC
// format. Only Ap and Ah differ.
//
// The Ah array is a list of the column indices of non-empty columns.
// It must appear in sorted order, and no duplicates may appear. If j
// = Ah [k] is the kth non-empty column, then the row indices of the
// jth column appear in Ai [Ap [k] ... Ap [k+1]], and the
// corresponding values appear in the same locations in Ax. Row
// indices must be in the range 0 to nrows-1, and must appear in
// sorted order within each column. No duplicate row indices may
// appear in any column. nvec may be zero, to denote an array with no
// entries. The Ah array must be of size at least nvec, Ap must be of
// size at least nvec+1, and Ai and Ax must be at least of size nvals.
// If nvals is zero, then the Ai and Ax arrays need not be present and
// can be NULL.
//------------------------------------------------------------------------------
// GxB_Matrix_pack_BitmapR: pack a bitmap matrix, held by row
//------------------------------------------------------------------------------
GB_PUBLIC
GrB_Info GxB_Matrix_import_BitmapR // historical: use GxB_Matrix_pack_BitmapR
(
GrB_Matrix *A, // handle of matrix to create
GrB_Type type, // type of matrix to create
GrB_Index nrows, // number of rows of the matrix
GrB_Index ncols, // number of columns of the matrix
int8_t **Ab, // bitmap, Ab_size >= nrows*ncols
void **Ax, // values, Ax_size >= nrows*ncols * (type size)
// or Ax_size >= (type size), if iso is true
GrB_Index Ab_size, // size of Ab in bytes
GrB_Index Ax_size, // size of Ax in bytes
bool iso, // if true, A is iso
GrB_Index nvals, // # of entries in bitmap
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_pack_BitmapR // pack a bitmap matrix, held by row
(
GrB_Matrix A, // matrix to create (type, nrows, ncols unchanged)
int8_t **Ab, // bitmap, Ab_size >= nrows*ncols
void **Ax, // values, Ax_size >= nrows*ncols * (type size)
// or Ax_size >= (type size), if iso is true
GrB_Index Ab_size, // size of Ab in bytes
GrB_Index Ax_size, // size of Ax in bytes
bool iso, // if true, A is iso
GrB_Index nvals, // # of entries in bitmap
const GrB_Descriptor desc
) ;
// BitmapR: a dense format, but able to represent sparsity structure of A.
//
// int8_t Ab [nrows*ncols] ;
// <type> Ax [nrows*ncols] ;
//
// Ab and Ax are both of size nrows*ncols. Ab [i*ncols+j] = 1 if the
// A(i,j) entry is present with value Ax [i*ncols+j], or 0 if A(i,j)
// is not present. nvals must equal the number of 1's in the Ab
// array.
//------------------------------------------------------------------------------
// GxB_Matrix_pack_BitmapC: pack a bitmap matrix, held by column
//------------------------------------------------------------------------------
GB_PUBLIC
GrB_Info GxB_Matrix_import_BitmapC // historical: use GxB_Matrix_pack_BitmapC
(
GrB_Matrix *A, // handle of matrix to create
GrB_Type type, // type of matrix to create
GrB_Index nrows, // number of rows of the matrix
GrB_Index ncols, // number of columns of the matrix
int8_t **Ab, // bitmap, Ab_size >= nrows*ncols
void **Ax, // values, Ax_size >= nrows*ncols * (type size)
// or Ax_size >= (type size), if iso is true
GrB_Index Ab_size, // size of Ab in bytes
GrB_Index Ax_size, // size of Ax in bytes
bool iso, // if true, A is iso
GrB_Index nvals, // # of entries in bitmap
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_pack_BitmapC // pack a bitmap matrix, held by column
(
GrB_Matrix A, // matrix to create (type, nrows, ncols unchanged)
int8_t **Ab, // bitmap, Ab_size >= nrows*ncols
void **Ax, // values, Ax_size >= nrows*ncols * (type size)
// or Ax_size >= (type size), if iso is true
GrB_Index Ab_size, // size of Ab in bytes
GrB_Index Ax_size, // size of Ax in bytes
bool iso, // if true, A is iso
GrB_Index nvals, // # of entries in bitmap
const GrB_Descriptor desc
) ;
// BitmapC: a dense format, but able to represent sparsity structure of A.
//
// int8_t Ab [nrows*ncols] ;
// <type> Ax [nrows*ncols] ;
//
// Ab and Ax are both of size nrows*ncols. Ab [i+j*nrows] = 1 if the
// A(i,j) entry is present with value Ax [i+j*nrows], or 0 if A(i,j)
// is not present. nvals must equal the number of 1's in the Ab
// array.
//------------------------------------------------------------------------------
// GxB_Matrix_pack_FullR: pack a full matrix, held by row
//------------------------------------------------------------------------------
GB_PUBLIC
GrB_Info GxB_Matrix_import_FullR // historical: use GxB_Matrix_pack_FullR
(
GrB_Matrix *A, // handle of matrix to create
GrB_Type type, // type of matrix to create
GrB_Index nrows, // number of rows of the matrix
GrB_Index ncols, // number of columns of the matrix
void **Ax, // values, Ax_size >= nrows*ncols * (type size)
// or Ax_size >= (type size), if iso is true
GrB_Index Ax_size, // size of Ax in bytes
bool iso, // if true, A is iso
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_pack_FullR // pack a full matrix, held by row
(
GrB_Matrix A, // matrix to create (type, nrows, ncols unchanged)
void **Ax, // values, Ax_size >= nrows*ncols * (type size)
// or Ax_size >= (type size), if iso is true
GrB_Index Ax_size, // size of Ax in bytes
bool iso, // if true, A is iso
const GrB_Descriptor desc
) ;
// FullR: an nrows-by-ncols full matrix held in row-major order:
//
// <type> Ax [nrows*ncols] ;
//
// Ax is an array of size nrows*ncols, where A(i,j) is held in
// Ax [i*ncols+j]. All entries in A are present.
//------------------------------------------------------------------------------
// GxB_Matrix_pack_FullC: pack a full matrix, held by column
//------------------------------------------------------------------------------
GB_PUBLIC
GrB_Info GxB_Matrix_import_FullC // historical: use GxB_Matrix_pack_FullC
(
GrB_Matrix *A, // handle of matrix to create
GrB_Type type, // type of matrix to create
GrB_Index nrows, // number of rows of the matrix
GrB_Index ncols, // number of columns of the matrix
void **Ax, // values, Ax_size >= nrows*ncols * (type size)
// or Ax_size >= (type size), if iso is true
GrB_Index Ax_size, // size of Ax in bytes
bool iso, // if true, A is iso
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_pack_FullC // pack a full matrix, held by column
(
GrB_Matrix A, // matrix to create (type, nrows, ncols unchanged)
void **Ax, // values, Ax_size >= nrows*ncols * (type size)
// or Ax_size >= (type size), if iso is true
GrB_Index Ax_size, // size of Ax in bytes
bool iso, // if true, A is iso
const GrB_Descriptor desc
) ;
// FullC: an nrows-by-ncols full matrix held in column-major order:
//
// <type> Ax [nrows*ncols] ;
//
// Ax is an array of size nrows*ncols, where A(i,j) is held in
// Ax [i+j*nrows]. All entries in A are present.
//------------------------------------------------------------------------------
// GxB_Vector_pack_CSC: import/pack a vector in CSC format
//------------------------------------------------------------------------------
GB_PUBLIC
GrB_Info GxB_Vector_import_CSC // historical: use GxB_Vector_pack_CSC
(
GrB_Vector *v, // handle of vector to create
GrB_Type type, // type of vector to create
GrB_Index n, // vector length
GrB_Index **vi, // indices, vi_size >= nvals(v) * sizeof(int64_t)
void **vx, // values, vx_size >= nvals(v) * (type size)
// or vx_size >= (type size), if iso is true
GrB_Index vi_size, // size of vi in bytes
GrB_Index vx_size, // size of vx in bytes
bool iso, // if true, v is iso
GrB_Index nvals, // # of entries in vector
bool jumbled, // if true, indices may be unsorted
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Vector_pack_CSC // pack a vector in CSC format
(
GrB_Vector v, // vector to create (type and length unchanged)
GrB_Index **vi, // indices, vi_size >= nvals(v) * sizeof(int64_t)
void **vx, // values, vx_size >= nvals(v) * (type size)
// or vx_size >= (type size), if iso is true
GrB_Index vi_size, // size of vi in bytes
GrB_Index vx_size, // size of vx in bytes
bool iso, // if true, v is iso
GrB_Index nvals, // # of entries in vector
bool jumbled, // if true, indices may be unsorted
const GrB_Descriptor desc
) ;
// The GrB_Vector is treated as if it was a single column of an n-by-1
// matrix in CSC format, except that no vp array is required. If nvals is
// zero, then the vi and vx arrays need not be present and can be NULL.
//------------------------------------------------------------------------------
// GxB_Vector_pack_Bitmap: pack a vector in bitmap format
//------------------------------------------------------------------------------
GB_PUBLIC
GrB_Info GxB_Vector_import_Bitmap // historical: GxB_Vector_pack_Bitmap
(
GrB_Vector *v, // handle of vector to create
GrB_Type type, // type of vector to create
GrB_Index n, // vector length
int8_t **vb, // bitmap, vb_size >= n
void **vx, // values, vx_size >= n * (type size)
// or vx_size >= (type size), if iso is true
GrB_Index vb_size, // size of vb in bytes
GrB_Index vx_size, // size of vx in bytes
bool iso, // if true, v is iso
GrB_Index nvals, // # of entries in bitmap
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Vector_pack_Bitmap // pack a bitmap vector
(
GrB_Vector v, // vector to create (type and length unchanged)
int8_t **vb, // bitmap, vb_size >= n
void **vx, // values, vx_size >= n * (type size)
// or vx_size >= (type size), if iso is true
GrB_Index vb_size, // size of vb in bytes
GrB_Index vx_size, // size of vx in bytes
bool iso, // if true, v is iso
GrB_Index nvals, // # of entries in bitmap
const GrB_Descriptor desc
) ;
// The GrB_Vector is treated as if it was a single column of an n-by-1
// matrix in BitmapC format.
//------------------------------------------------------------------------------
// GxB_Vector_pack_Full: pack a vector in full format
//------------------------------------------------------------------------------
GB_PUBLIC
GrB_Info GxB_Vector_import_Full // historical: use GxB_Vector_pack_Full
(
GrB_Vector *v, // handle of vector to create
GrB_Type type, // type of vector to create
GrB_Index n, // vector length
void **vx, // values, vx_size >= nvals(v) * (type size)
// or vx_size >= (type size), if iso is true
GrB_Index vx_size, // size of vx in bytes
bool iso, // if true, v is iso
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Vector_pack_Full // pack a full vector
(
GrB_Vector v, // vector to create (type and length unchanged)
void **vx, // values, vx_size >= nvals(v) * (type size)
// or vx_size >= (type size), if iso is true
GrB_Index vx_size, // size of vx in bytes
bool iso, // if true, v is iso
const GrB_Descriptor desc
) ;
// The GrB_Vector is treated as if it was a single column of an n-by-1
// matrix in FullC format.
//------------------------------------------------------------------------------
// GxB* export/unpack
//------------------------------------------------------------------------------
// The GxB_*_export/unpack functions are symmetric with the GxB_*_import/pack
// functions. The export/unpack functions force completion of any pending
// operations, prior to the export, except if the only pending operation is to
// unjumble the matrix.
//
// If there are no entries in the matrix or vector, then the index arrays (Ai,
// Aj, or vi) and value arrays (Ax or vx) are returned as NULL. This is not an
// error condition.
//
// A GrB_Matrix may be exported/unpacked in any one of four different formats.
// On successful export, the input GrB_Matrix A is freed, and the output arrays
// Ah, Ap, Ai, Aj, and/or Ax are returned to the user application as arrays
// allocated by the ANSI C malloc function. The four formats are the same as
// the import formats for GxB_Matrix_import/pack.
//
// If jumbled is NULL on input, this indicates to GxB_*export/unpack* that the
// exported/unpacked matrix cannot be returned in a jumbled format. In this
// case, if the matrix is jumbled, it is sorted before exporting it to the
// caller.
//
// If iso is NULL on input, this indicates to the export/unpack methods that
// the exported/unpacked matrix cannot be returned in a iso format, with an Ax
// array with just one entry. In this case, if the matrix is iso, it is
// expanded before exporting/unpacking it to the caller.
//
// For the export/unpack*Full* methods, all entries in the matrix or must be
// present. That is, GrB_*_nvals must report nvals equal to nrows*ncols or a
// matrix. If this condition does not hold, the matrix/vector is not exported,
// and GrB_INVALID_VALUE is returned.
//
// If the export/unpack is not successful, the export/unpack functions do not
// modify matrix or vector and the user arrays are returned as NULL.
GB_PUBLIC
GrB_Info GxB_Matrix_export_CSR // historical: use GxB_Matrix_unpack_CSR
(
GrB_Matrix *A, // handle of matrix to export and free
GrB_Type *type, // type of matrix exported
GrB_Index *nrows, // number of rows of the matrix
GrB_Index *ncols, // number of columns of the matrix
GrB_Index **Ap, // row "pointers"
GrB_Index **Aj, // column indices
void **Ax, // values
GrB_Index *Ap_size, // size of Ap in bytes
GrB_Index *Aj_size, // size of Aj in bytes
GrB_Index *Ax_size, // size of Ax in bytes
bool *iso, // if true, A is iso
bool *jumbled, // if true, indices in each row may be unsorted
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_unpack_CSR // unpack a CSR matrix
(
GrB_Matrix A, // matrix to unpack (type, nrows, ncols unchanged)
GrB_Index **Ap, // row "pointers"
GrB_Index **Aj, // column indices
void **Ax, // values
GrB_Index *Ap_size, // size of Ap in bytes
GrB_Index *Aj_size, // size of Aj in bytes
GrB_Index *Ax_size, // size of Ax in bytes
bool *iso, // if true, A is iso
bool *jumbled, // if true, indices in each row may be unsorted
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_export_CSC // historical: use GxB_Matrix_unpack_CSC
(
GrB_Matrix *A, // handle of matrix to export and free
GrB_Type *type, // type of matrix exported
GrB_Index *nrows, // number of rows of the matrix
GrB_Index *ncols, // number of columns of the matrix
GrB_Index **Ap, // column "pointers"
GrB_Index **Ai, // row indices
void **Ax, // values
GrB_Index *Ap_size, // size of Ap in bytes
GrB_Index *Ai_size, // size of Ai in bytes
GrB_Index *Ax_size, // size of Ax in bytes
bool *iso, // if true, A is iso
bool *jumbled, // if true, indices in each column may be unsorted
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_unpack_CSC // unpack a CSC matrix
(
GrB_Matrix A, // matrix to unpack (type, nrows, ncols unchanged)
GrB_Index **Ap, // column "pointers"
GrB_Index **Ai, // row indices
void **Ax, // values
GrB_Index *Ap_size, // size of Ap in bytes
GrB_Index *Ai_size, // size of Ai in bytes
GrB_Index *Ax_size, // size of Ax in bytes
bool *iso, // if true, A is iso
bool *jumbled, // if true, indices in each column may be unsorted
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_export_HyperCSR // historical: use GxB_Matrix_unpack_HyperCSR
(
GrB_Matrix *A, // handle of matrix to export and free
GrB_Type *type, // type of matrix exported
GrB_Index *nrows, // number of rows of the matrix
GrB_Index *ncols, // number of columns of the matrix
GrB_Index **Ap, // row "pointers"
GrB_Index **Ah, // row indices
GrB_Index **Aj, // column indices
void **Ax, // values
GrB_Index *Ap_size, // size of Ap in bytes
GrB_Index *Ah_size, // size of Ah in bytes
GrB_Index *Aj_size, // size of Aj in bytes
GrB_Index *Ax_size, // size of Ax in bytes
bool *iso, // if true, A is iso
GrB_Index *nvec, // number of rows that appear in Ah
bool *jumbled, // if true, indices in each row may be unsorted
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_unpack_HyperCSR // unpack a hypersparse CSR matrix
(
GrB_Matrix A, // matrix to unpack (type, nrows, ncols unchanged)
GrB_Index **Ap, // row "pointers"
GrB_Index **Ah, // row indices
GrB_Index **Aj, // column indices
void **Ax, // values
GrB_Index *Ap_size, // size of Ap in bytes
GrB_Index *Ah_size, // size of Ah in bytes
GrB_Index *Aj_size, // size of Aj in bytes
GrB_Index *Ax_size, // size of Ax in bytes
bool *iso, // if true, A is iso
GrB_Index *nvec, // number of rows that appear in Ah
bool *jumbled, // if true, indices in each row may be unsorted
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_export_HyperCSC // historical: use GxB_Matrix_unpack_HyperCSC
(
GrB_Matrix *A, // handle of matrix to export and free
GrB_Type *type, // type of matrix exported
GrB_Index *nrows, // number of rows of the matrix
GrB_Index *ncols, // number of columns of the matrix
GrB_Index **Ap, // column "pointers"
GrB_Index **Ah, // column indices
GrB_Index **Ai, // row indices
void **Ax, // values
GrB_Index *Ap_size, // size of Ap in bytes
GrB_Index *Ah_size, // size of Ah in bytes
GrB_Index *Ai_size, // size of Ai in bytes
GrB_Index *Ax_size, // size of Ax in bytes
bool *iso, // if true, A is iso
GrB_Index *nvec, // number of columns that appear in Ah
bool *jumbled, // if true, indices in each column may be unsorted
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_unpack_HyperCSC // unpack a hypersparse CSC matrix
(
GrB_Matrix A, // matrix to unpack (type, nrows, ncols unchanged)
GrB_Index **Ap, // column "pointers"
GrB_Index **Ah, // column indices
GrB_Index **Ai, // row indices
void **Ax, // values
GrB_Index *Ap_size, // size of Ap in bytes
GrB_Index *Ah_size, // size of Ah in bytes
GrB_Index *Ai_size, // size of Ai in bytes
GrB_Index *Ax_size, // size of Ax in bytes
bool *iso, // if true, A is iso
GrB_Index *nvec, // number of columns that appear in Ah
bool *jumbled, // if true, indices in each column may be unsorted
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_export_BitmapR // historical: use GxB_Matrix_unpack_BitmapR
(
GrB_Matrix *A, // handle of matrix to export and free
GrB_Type *type, // type of matrix exported
GrB_Index *nrows, // number of rows of the matrix
GrB_Index *ncols, // number of columns of the matrix
int8_t **Ab, // bitmap
void **Ax, // values
GrB_Index *Ab_size, // size of Ab in bytes
GrB_Index *Ax_size, // size of Ax in bytes
bool *iso, // if true, A is iso
GrB_Index *nvals, // # of entries in bitmap
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_unpack_BitmapR // unpack a bitmap matrix, by row
(
GrB_Matrix A, // matrix to unpack (type, nrows, ncols unchanged)
int8_t **Ab, // bitmap
void **Ax, // values
GrB_Index *Ab_size, // size of Ab in bytes
GrB_Index *Ax_size, // size of Ax in bytes
bool *iso, // if true, A is iso
GrB_Index *nvals, // # of entries in bitmap
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_export_BitmapC // historical: use GxB_Matrix_unpack_BitmapC
(
GrB_Matrix *A, // handle of matrix to export and free
GrB_Type *type, // type of matrix exported
GrB_Index *nrows, // number of rows of the matrix
GrB_Index *ncols, // number of columns of the matrix
int8_t **Ab, // bitmap
void **Ax, // values
GrB_Index *Ab_size, // size of Ab in bytes
GrB_Index *Ax_size, // size of Ax in bytes
bool *iso, // if true, A is iso
GrB_Index *nvals, // # of entries in bitmap
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_unpack_BitmapC // unpack a bitmap matrix, by col
(
GrB_Matrix A, // matrix to unpack (type, nrows, ncols unchanged)
int8_t **Ab, // bitmap
void **Ax, // values
GrB_Index *Ab_size, // size of Ab in bytes
GrB_Index *Ax_size, // size of Ax in bytes
bool *iso, // if true, A is iso
GrB_Index *nvals, // # of entries in bitmap
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_export_FullR // historical: use GxB_Matrix_unpack_FullR
(
GrB_Matrix *A, // handle of matrix to export and free
GrB_Type *type, // type of matrix exported
GrB_Index *nrows, // number of rows of the matrix
GrB_Index *ncols, // number of columns of the matrix
void **Ax, // values
GrB_Index *Ax_size, // size of Ax in bytes
bool *iso, // if true, A is iso
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_unpack_FullR // unpack a full matrix, by row
(
GrB_Matrix A, // matrix to unpack (type, nrows, ncols unchanged)
void **Ax, // values
GrB_Index *Ax_size, // size of Ax in bytes
bool *iso, // if true, A is iso
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_export_FullC // historical: use GxB_Matrix_unpack_FullC
(
GrB_Matrix *A, // handle of matrix to export and free
GrB_Type *type, // type of matrix exported
GrB_Index *nrows, // number of rows of the matrix
GrB_Index *ncols, // number of columns of the matrix
void **Ax, // values
GrB_Index *Ax_size, // size of Ax in bytes
bool *iso, // if true, A is iso
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_unpack_FullC // unpack a full matrix, by column
(
GrB_Matrix A, // matrix to unpack (type, nrows, ncols unchanged)
void **Ax, // values
GrB_Index *Ax_size, // size of Ax in bytes
bool *iso, // if true, A is iso
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Vector_export_CSC // historical: use GxB_Vector_unpack_CSC
(
GrB_Vector *v, // handle of vector to export and free
GrB_Type *type, // type of vector exported
GrB_Index *n, // length of the vector
GrB_Index **vi, // indices
void **vx, // values
GrB_Index *vi_size, // size of vi in bytes
GrB_Index *vx_size, // size of vx in bytes
bool *iso, // if true, v is iso
GrB_Index *nvals, // # of entries in vector
bool *jumbled, // if true, indices may be unsorted
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Vector_unpack_CSC // unpack a CSC vector
(
GrB_Vector v, // vector to unpack (type and length unchanged)
GrB_Index **vi, // indices
void **vx, // values
GrB_Index *vi_size, // size of vi in bytes
GrB_Index *vx_size, // size of vx in bytes
bool *iso, // if true, v is iso
GrB_Index *nvals, // # of entries in vector
bool *jumbled, // if true, indices may be unsorted
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Vector_export_Bitmap // historical: use GxB_Vector_unpack_Bitmap
(
GrB_Vector *v, // handle of vector to export and free
GrB_Type *type, // type of vector exported
GrB_Index *n, // length of the vector
int8_t **vb, // bitmap
void **vx, // values
GrB_Index *vb_size, // size of vb in bytes
GrB_Index *vx_size, // size of vx in bytes
bool *iso, // if true, v is iso
GrB_Index *nvals, // # of entries in bitmap
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Vector_unpack_Bitmap // unpack a bitmap vector
(
GrB_Vector v, // vector to unpack (type and length unchanged)
int8_t **vb, // bitmap
void **vx, // values
GrB_Index *vb_size, // size of vb in bytes
GrB_Index *vx_size, // size of vx in bytes
bool *iso, // if true, v is iso
GrB_Index *nvals, // # of entries in bitmap
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Vector_export_Full // historical: use GxB_Vector_unpack_Full
(
GrB_Vector *v, // handle of vector to export and free
GrB_Type *type, // type of vector exported
GrB_Index *n, // length of the vector
void **vx, // values
GrB_Index *vx_size, // size of vx in bytes
bool *iso, // if true, v is iso
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Vector_unpack_Full // unpack a full vector
(
GrB_Vector v, // vector to unpack (type and length unchanged)
void **vx, // values
GrB_Index *vx_size, // size of vx in bytes
bool *iso, // if true, v is iso
const GrB_Descriptor desc
) ;
//==============================================================================
// GrB import/export
//==============================================================================
// The GrB_Matrix_import method copies from user-provided arrays into an
// opaque GrB_Matrix and GrB_Matrix_export copies data out, from an opaque
// GrB_Matrix into user-provided arrays. Unlike the GxB pack/unpack methods,
// memory is not handed off between the user application and GraphBLAS.
// These methods are much slower than the GxB pack/unpack methods, since they
// require a copy of the data to be made. GrB_Matrix_import also must assume
// its input data cannot be trusted, and so it does extensive checks. The GxB
// pack takes O(1) time in all cases (unless it is told the input data is
// untrusted, via the descriptor). GxB unpack takes O(1) time unless the
// matrix is exported in a different format than it currently has.
// No typecasting of the values is done on import or export.
// The GrB C API specification supports 3 formats:
typedef enum
{
GrB_CSR_FORMAT = 0, // CSR format (equiv to GxB_SPARSE with GxB_BY_ROW)
GrB_CSC_FORMAT = 1, // CSC format (equiv to GxB_SPARSE with GxB_BY_COL)
GrB_COO_FORMAT = 2 // triplet format (like input to GrB*build)
}
GrB_Format ;
GB_PUBLIC
GrB_Info GrB_Matrix_import_BOOL // import a GrB_BOOL matrix
(
GrB_Matrix *A, // handle of matrix to create
GrB_Type type, // type of matrix to create (must be GrB_BOOL)
GrB_Index nrows, // number of rows of the matrix
GrB_Index ncols, // number of columns of the matrix
const GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
const GrB_Index *Ai, // row indices for CSR, CSC
const bool *Ax, // values
GrB_Index Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format // import format
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_import_INT8 // import a GrB_INT8 matrix
(
GrB_Matrix *A, // handle of matrix to create
GrB_Type type, // type of matrix to create (must be GrB_iNT8)
GrB_Index nrows, // number of rows of the matrix
GrB_Index ncols, // number of columns of the matrix
const GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
const GrB_Index *Ai, // row indices for CSR, CSC
const int8_t *Ax, // values
GrB_Index Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format // import format
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_import_INT16 // import a GrB_INT16 matrix
(
GrB_Matrix *A, // handle of matrix to create
GrB_Type type, // type of matrix to create (must be GrB_INT16)
GrB_Index nrows, // number of rows of the matrix
GrB_Index ncols, // number of columns of the matrix
const GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
const GrB_Index *Ai, // row indices for CSR, CSC
const int16_t *Ax, // values
GrB_Index Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format // import format
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_import_INT32 // import a GrB_INT32 matrix
(
GrB_Matrix *A, // handle of matrix to create
GrB_Type type, // type of matrix to create (must be GrB_INT32)
GrB_Index nrows, // number of rows of the matrix
GrB_Index ncols, // number of columns of the matrix
const GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
const GrB_Index *Ai, // row indices for CSR, CSC
const int32_t *Ax, // values
GrB_Index Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format // import format
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_import_INT64 // import a GrB_INT64 matrix
(
GrB_Matrix *A, // handle of matrix to create
GrB_Type type, // type of matrix to create (must be GrB_INT64)
GrB_Index nrows, // number of rows of the matrix
GrB_Index ncols, // number of columns of the matrix
const GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
const GrB_Index *Ai, // row indices for CSR, CSC
const int64_t *Ax, // values
GrB_Index Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format // import format
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_import_UINT8 // import a GrB_UINT8 matrix
(
GrB_Matrix *A, // handle of matrix to create
GrB_Type type, // type of matrix to create (must be GrB_UINT8)
GrB_Index nrows, // number of rows of the matrix
GrB_Index ncols, // number of columns of the matrix
const GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
const GrB_Index *Ai, // row indices for CSR, CSC
const uint8_t *Ax, // values
GrB_Index Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format // import format
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_import_UINT16 // import a GrB_UINT16 matrix
(
GrB_Matrix *A, // handle of matrix to create
GrB_Type type, // type of matrix to create (must be GrB_UINT16)
GrB_Index nrows, // number of rows of the matrix
GrB_Index ncols, // number of columns of the matrix
const GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
const GrB_Index *Ai, // row indices for CSR, CSC
const uint16_t *Ax, // values
GrB_Index Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format // import format
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_import_UINT32 // import a GrB_UINT32 matrix
(
GrB_Matrix *A, // handle of matrix to create
GrB_Type type, // type of matrix to create (must be GrB_UINT32)
GrB_Index nrows, // number of rows of the matrix
GrB_Index ncols, // number of columns of the matrix
const GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
const GrB_Index *Ai, // row indices for CSR, CSC
const uint32_t *Ax, // values
GrB_Index Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format // import format
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_import_UINT64 // import a GrB_UINT64 matrix
(
GrB_Matrix *A, // handle of matrix to create
GrB_Type type, // type of matrix to create (must be GrB_UINT64)
GrB_Index nrows, // number of rows of the matrix
GrB_Index ncols, // number of columns of the matrix
const GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
const GrB_Index *Ai, // row indices for CSR, CSC
const uint64_t *Ax, // values
GrB_Index Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format // import format
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_import_FP32 // import a GrB_FP32 matrix
(
GrB_Matrix *A, // handle of matrix to create
GrB_Type type, // type of matrix to create (must be GrB_FP32)
GrB_Index nrows, // number of rows of the matrix
GrB_Index ncols, // number of columns of the matrix
const GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
const GrB_Index *Ai, // row indices for CSR, CSC
const float *Ax, // values
GrB_Index Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format // import format
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_import_FP64 // import a GrB_FP64 matrix
(
GrB_Matrix *A, // handle of matrix to create
GrB_Type type, // type of matrix to create (must be GrB_FP64)
GrB_Index nrows, // number of rows of the matrix
GrB_Index ncols, // number of columns of the matrix
const GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
const GrB_Index *Ai, // row indices for CSR, CSC
const double *Ax, // values
GrB_Index Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format // import format
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_import_FC32 // import a GxB_FC32 matrix
(
GrB_Matrix *A, // handle of matrix to create
GrB_Type type, // type of matrix to create (must be GxB_FC32)
GrB_Index nrows, // number of rows of the matrix
GrB_Index ncols, // number of columns of the matrix
const GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
const GrB_Index *Ai, // row indices for CSR, CSC
const GxB_FC32_t *Ax, // values
GrB_Index Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format // import format
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_import_FC64 // import a GxB_FC64 matrix
(
GrB_Matrix *A, // handle of matrix to create
GrB_Type type, // type of matrix to create (must be GxB_FC64)
GrB_Index nrows, // number of rows of the matrix
GrB_Index ncols, // number of columns of the matrix
const GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
const GrB_Index *Ai, // row indices for CSR, CSC
const GxB_FC64_t *Ax, // values
GrB_Index Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format // import format
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_import_UDT // import a matrix with a user-defined type
(
GrB_Matrix *A, // handle of matrix to create
GrB_Type type, // type of matrix to create
GrB_Index nrows, // number of rows of the matrix
GrB_Index ncols, // number of columns of the matrix
const GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
const GrB_Index *Ai, // row indices for CSR, CSC
const void *Ax, // values (must match the type parameter)
GrB_Index Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format // import format
) ;
#if GxB_STDC_VERSION >= 201112L
#define GrB_Matrix_import(A,type,nrows,ncols,Ap,Ai,Ax,Ap_len,Ai_len,Ax_len,fmt)\
_Generic \
( \
(Ax), \
GB_CASES (*, GrB, Matrix_import) \
) \
(A, type, nrows, ncols, Ap, Ai, Ax, Ap_len, Ai_len, Ax_len, fmt)
#endif
// For GrB_Matrix_export_T: on input, Ap_len, Ai_len, and Ax_len are
// the size of the 3 arrays Ap, Ai, and Ax, in terms of the # of entries.
// On output, these 3 values are modified to be the # of entries copied
// into those 3 arrays.
GB_PUBLIC
GrB_Info GrB_Matrix_export_BOOL // export a GrB_BOOL matrix
(
GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
GrB_Index *Ai, // col indices for CSR/COO, row indices for CSC
bool *Ax, // values (must match the type of A)
GrB_Index *Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index *Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index *Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format, // export format
GrB_Matrix A // matrix to export (must be of type GrB_BOOL)
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_export_INT8 // export a GrB_INT8 matrix
(
GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
GrB_Index *Ai, // col indices for CSR/COO, row indices for CSC
int8_t *Ax, // values (must match the type of A)
GrB_Index *Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index *Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index *Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format, // export format
GrB_Matrix A // matrix to export (must be of type GrB_INT8)
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_export_INT16 // export a GrB_INT16 matrix
(
GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
GrB_Index *Ai, // col indices for CSR/COO, row indices for CSC
int16_t *Ax, // values (must match the type of A)
GrB_Index *Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index *Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index *Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format, // export format
GrB_Matrix A // matrix to export (must be of type GrB_INT16)
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_export_INT32 // export a GrB_INT32 matrix
(
GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
GrB_Index *Ai, // col indices for CSR/COO, row indices for CSC
int32_t *Ax, // values (must match the type of A)
GrB_Index *Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index *Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index *Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format, // export format
GrB_Matrix A // matrix to export (must be of type GrB_INT32)
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_export_INT64 // export a GrB_INT64 matrix
(
GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
GrB_Index *Ai, // col indices for CSR/COO, row indices for CSC
int64_t *Ax, // values (must match the type of A)
GrB_Index *Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index *Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index *Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format, // export format
GrB_Matrix A // matrix to export (must be of type GrB_INT64)
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_export_UINT8 // export a GrB_UINT8 matrix
(
GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
GrB_Index *Ai, // col indices for CSR/COO, row indices for CSC
uint8_t *Ax, // values (must match the type of A)
GrB_Index *Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index *Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index *Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format, // export format
GrB_Matrix A // matrix to export (must be of type GrB_UINT8)
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_export_UINT16 // export a GrB_UINT16 matrix
(
GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
GrB_Index *Ai, // col indices for CSR/COO, row indices for CSC
uint16_t *Ax, // values (must match the type of A)
GrB_Index *Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index *Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index *Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format, // export format
GrB_Matrix A // matrix to export (must be of type GrB_UINT16)
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_export_UINT32 // export a GrB_UINT32 matrix
(
GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
GrB_Index *Ai, // col indices for CSR/COO, row indices for CSC
uint32_t *Ax, // values (must match the type of A)
GrB_Index *Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index *Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index *Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format, // export format
GrB_Matrix A // matrix to export (must be of type GrB_UINT32)
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_export_UINT64 // export a GrB_UINT64 matrix
(
GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
GrB_Index *Ai, // col indices for CSR/COO, row indices for CSC
uint64_t *Ax, // values (must match the type of A)
GrB_Index *Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index *Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index *Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format, // export format
GrB_Matrix A // matrix to export (must be of type GrB_UINT64)
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_export_FP32 // export a GrB_FP32 matrix
(
GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
GrB_Index *Ai, // col indices for CSR/COO, row indices for CSC
float *Ax, // values (must match the type of A)
GrB_Index *Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index *Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index *Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format, // export format
GrB_Matrix A // matrix to export (must be of type GrB_FP32)
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_export_FP64 // export a GrB_FP64 matrix
(
GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
GrB_Index *Ai, // col indices for CSR/COO, row indices for CSC
double *Ax, // values (must match the type of A)
GrB_Index *Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index *Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index *Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format, // export format
GrB_Matrix A // matrix to export (must be of type GrB_FP64)
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_export_FC32 // export a GrB_FC32 matrix
(
GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
GrB_Index *Ai, // col indices for CSR/COO, row indices for CSC
GxB_FC32_t *Ax, // values (must match the type of A)
GrB_Index *Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index *Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index *Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format, // export format
GrB_Matrix A // matrix to export (must be of type GrB_FC32)
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_export_FC64 // export a GrB_FC64 matrix
(
GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
GrB_Index *Ai, // col indices for CSR/COO, row indices for CSC
GxB_FC64_t *Ax, // values (must match the type of A)
GrB_Index *Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index *Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index *Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format, // export format
GrB_Matrix A // matrix to export (must be of type GrB_FC64)
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_export_UDT // export a matrix with a user-defined type
(
GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
GrB_Index *Ai, // col indices for CSR/COO, row indices for CSC
void *Ax, // values (must match the type of A)
GrB_Index *Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index *Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index *Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format, // export format
GrB_Matrix A // matrix to export
) ;
#if GxB_STDC_VERSION >= 201112L
#define GrB_Matrix_export(Ap,Ai,Ax,Ap_len,Ai_len,Ax_len,fmt,A) \
_Generic \
( \
(Ax), \
GB_CASES (*, GrB, Matrix_export) \
) \
(Ap, Ai, Ax, Ap_len, Ai_len, Ax_len, fmt, A)
#endif
GB_PUBLIC
GrB_Info GrB_Matrix_exportSize // determine sizes of user arrays for export
(
GrB_Index *Ap_len, // # of entries required for Ap (not # of bytes)
GrB_Index *Ai_len, // # of entries required for Ai (not # of bytes)
GrB_Index *Ax_len, // # of entries required for Ax (not # of bytes)
GrB_Format format, // export format
GrB_Matrix A // matrix to export
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_exportHint // suggest the best export format
(
GrB_Format *format, // export format
GrB_Matrix A // matrix to export
) ;
//==============================================================================
// serialize/deserialize
//==============================================================================
// GxB_Matrix_serialize copies the contents of a GrB_Matrix into a single array
// of bytes (the "blob"). The contents of the blob are implementation
// dependent. The blob can be saved to a file, or sent across a communication
// channel, and then a GrB_Matrix can be reconstructed from the blob, even on
// another process or another machine, using the same version of
// SuiteSparse:GraphBLAS (v5.2.0 or later). The goal is that future versions
// of SuiteSparse:GraphBLAS should be able to read in the blob as well, and
// reconstruct a matrix. The matrix can be reconstructed from the blob using
// GxB_Matrix_deserialize. The blob is compressed, by default, and
// uncompressed by GxB_Matrix_deserialize.
// GrB_Matrix_serialize/deserialize are slightly different from their GxB*
// counterparts. The blob is allocated by GxB_Matrix_serialize, and must be
// freed by GxB_serialize_free (which calls the ANSI C11 free if GrB_init was
// used). By contrast, the GrB* methods require the user application to pass
// in a preallocated blob to GrB_Matrix_serialize, whose size can be given by
// GrB_Matrix_serializeSize (as a loose upper bound).
// The GrB* and GxB* methods can be mixed. GrB_Matrix_serialize and
// GxB_Matrix_serialize construct the same blob (assuming they are given the
// same # of threads to do the work). Both GrB_Matrix_deserialize and
// GxB_Matrix_deserialize can deserialize a blob coming from either
// GrB_Matrix_serialize or GxB_Matrix_serialize.
// Deserialization of untrusted data is a common security problem; see
// https://cwe.mitre.org/data/definitions/502.html. The deserialization methods
// below do a few basic checks so that no out-of-bounds access occurs during
// deserialization, but the output matrix itself may still be corrupted. If
// the data is untrusted, use this to check the matrix:
// GxB_Matrix_fprint (A, "A deserialized", GrB_SILENT, NULL)
// Example usage:
/*
//--------------------------------------------------------------------------
// using GxB serialize/deserialize
//--------------------------------------------------------------------------
// Given a GrB_Matrix A: assuming a user-defined type:
void *blob ;
GrB_Index blob_size ;
GxB_Matrix_serialize (&blob, &blob_size, A, NULL) ;
FILE *f = fopen ("myblob", "w") ;
fwrite (blob_size, sizeof (size_t), 1, f) ;
fwrite (blob, sizeof (uint8_t), blob_size, f) ;
fclose (f) ;
GrB_Matrix_free (&A) ;
// B is a copy of A
GxB_Matrix_deserialize (&B, MyQtype, blob, blob_size, NULL) ;
GrB_Matrix_free (&B) ;
free (blob) ;
GrB_finalize ( ) ;
// --- in another process, to recreate the GrB_Matrix A:
GrB_init (GrB_NONBLOCKING) ;
FILE *f = fopen ("myblob", "r") ;
fread (&blob_size, sizeof (size_t), 1, f) ;
blob = malloc (blob_size) ;
fread (&blob, sizeof (uint8_t), 1, f) ;
char type_name [GxB_MAX_NAME_LEN] ;
GxB_deserialize_type_name (type_name, blob, blob_size) ;
printf ("blob type is: %s\n", type_name) ;
GrB_Type user_type = NULL ;
if (strncmp (type_name, "myquaternion", GxB_MAX_NAME_LEN) == 0)
user_type = MyQtype ;
GxB_Matrix_deserialize (&A, user_type, blob, blob_size, NULL) ;
free (blob) ; // note, freed by the user, not GraphBLAS
//--------------------------------------------------------------------------
// using GrB serialize/deserialize
//--------------------------------------------------------------------------
// Given a GrB_Matrix A: assuming a user-defined type, MyQType:
void *blob = NULL ;
GrB_Index blob_size = 0 ;
GrB_Matrix A, B = NULL ;
// construct a matrix A, then serialized it:
GrB_Matrix_serializeSize (&blob_size, A) ; // loose upper bound
blob = malloc (blob_size) ;
GrB_Matrix_serialize (blob, &blob_size, A) ; // returns actual size
blob = realloc (blob, blob_size) ; // user can shrink the blob
FILE *f = fopen ("myblob", "w") ;
fwrite (blob_size, sizeof (size_t), 1, f) ;
fwrite (blob, sizeof (uint8_t), blob_size, f) ;
fclose (f) ;
GrB_Matrix_free (&A) ;
// B is a copy of A:
GrB_Matrix_deserialize (&B, MyQtype, blob, blob_size) ;
GrB_Matrix_free (&B) ;
free (blob) ;
GrB_finalize ( ) ;
// --- in another process, to recreate the GrB_Matrix A:
GrB_init (GrB_NONBLOCKING) ;
FILE *f = fopen ("myblob", "r") ;
fread (&blob_size, sizeof (size_t), 1, f) ;
blob = malloc (blob_size) ;
fread (&blob, sizeof (uint8_t), 1, f) ;
// the user must know the type of A is MyQType
GrB_Matrix_deserialize (&A, MyQtype, blob, blob_size) ;
free (blob) ;
*/
// Three methods are currently implemented: no compression, LZ4, and LZ4HC
#define GxB_COMPRESSION_NONE -1 // no compression
#define GxB_COMPRESSION_DEFAULT 0 // LZ4
#define GxB_COMPRESSION_LZ4 1000 // LZ4
#define GxB_COMPRESSION_LZ4HC 2000 // LZ4HC, with default level 9
// possible future methods that could be added:
// #define GxB_COMPRESSION_ZLIB 3000 // ZLIB, with default level 6
// #define GxB_COMPRESSION_LZO 4000 // LZO, with default level 2
// #define GxB_COMPRESSION_BZIP2 5000 // BZIP2, with default level 9
// #define GxB_COMPRESSION_LZSS 6000 // LZSS
// using the Intel IPP versions, if available (not yet supported);
#define GxB_COMPRESSION_INTEL 1000000
// Most of the above methods have a level parameter that controls the tradeoff
// between run time and the amount of compression obtained. Higher levels
// result in a more compact result, at the cost of higher run time:
// LZ4 no level setting
// LZ4HC 1: fast, 9: default, 9: max
// these methos are not yet supported but may be added in the future:
// ZLIB 1: fast, 6: default, 9: max
// LZO 1: fast (X1ST), 2: default (XST)
// BZIP2 1: fast, 9: default, 9: max
// LZSS no level setting
// For all methods, a level of zero results in the default level setting.
// These settings can be added, so to use LZ4HC at level 5, use method =
// GxB_COMPRESSION_LZ4HC + 5.
// If the Intel IPPS compression methods are available, they can be selected
// by adding GxB_COMPRESSION_INTEL. For example, to use the Intel IPPS
// implementation of LZ4HC at level 9, use method = GxB_COMPRESSION_INTEL +
// GxB_COMPRESSION_LZ4HC + 9 = 1,002,009. If the Intel methods are requested
// but not available, this setting is ignored and the non-Intel methods are
// used instead.
// If the level setting is out of range, the default is used for that method.
// If the method is negative, no compression is performed. If the method is
// positive but unrecognized, the default is used (GxB_COMPRESSION_LZ4, with no
// level setting, and the non-Intel version).
// If a method is not implemented, LZ4 is used instead, and the level setting
// is ignored.
GB_PUBLIC
GrB_Info GxB_Matrix_serialize // serialize a GrB_Matrix to a blob
(
// output:
void **blob_handle, // the blob, allocated on output
GrB_Index *blob_size_handle, // size of the blob on output
// input:
GrB_Matrix A, // matrix to serialize
const GrB_Descriptor desc // descriptor to select compression method
// and to control # of threads used
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_serialize // serialize a GrB_Matrix to a blob
(
// output:
void *blob, // the blob, already allocated in input
// input/output:
GrB_Index *blob_size_handle, // size of the blob on input. On output,
// the # of bytes used in the blob.
// input:
GrB_Matrix A // matrix to serialize
) ;
GB_PUBLIC
GrB_Info GxB_Vector_serialize // serialize a GrB_Vector to a blob
(
// output:
void **blob_handle, // the blob, allocated on output
GrB_Index *blob_size_handle, // size of the blob on output
// input:
GrB_Vector u, // vector to serialize
const GrB_Descriptor desc // descriptor to select compression method
// and to control # of threads used
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_serializeSize // estimate the size of a blob
(
// output:
GrB_Index *blob_size_handle, // upper bound on the required size of the
// blob on output.
// input:
GrB_Matrix A // matrix to serialize
) ;
// The GrB* and GxB* deserialize methods are nearly identical. The GxB*
// deserialize methods simply add the descriptor, which allows for optional
// control of the # of threads used to deserialize the blob.
GB_PUBLIC
GrB_Info GxB_Matrix_deserialize // deserialize blob into a GrB_Matrix
(
// output:
GrB_Matrix *C, // output matrix created from the blob
// input:
GrB_Type type, // type of the matrix C. Required if the blob holds a
// matrix of user-defined type. May be NULL if blob
// holds a built-in type; otherwise must match the
// type of C.
const void *blob, // the blob
GrB_Index blob_size, // size of the blob
const GrB_Descriptor desc // to control # of threads used
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_deserialize // deserialize blob into a GrB_Matrix
(
// output:
GrB_Matrix *C, // output matrix created from the blob
// input:
GrB_Type type, // type of the matrix C. Required if the blob holds a
// matrix of user-defined type. May be NULL if blob
// holds a built-in type; otherwise must match the
// type of C.
const void *blob, // the blob
GrB_Index blob_size // size of the blob
) ;
GB_PUBLIC
GrB_Info GxB_Vector_deserialize // deserialize blob into a GrB_Vector
(
// output:
GrB_Vector *w, // output vector created from the blob
// input:
GrB_Type type, // type of the vector w. Required if the blob holds a
// vector of user-defined type. May be NULL if blob
// holds a built-in type; otherwise must match the
// type of w.
const void *blob, // the blob
GrB_Index blob_size, // size of the blob
const GrB_Descriptor desc // to control # of threads used
) ;
// GxB_deserialize_type_name extracts the type_name of the GrB_Type of the
// GrB_Matrix or GrB_Vector held in a serialized blob. On input, type_name
// must point to a user-owned char array of size at least GxB_MAX_NAME_LEN (it
// must not point into the blob itself). On output, type_name will contain a
// null-terminated string with the corresponding C type name. If the blob
// holds a matrix of a built-in type, the name is returned as "bool" for
// GrB_BOOL, "uint8_t" for GrB_UINT8, "float complex" for GxB_FC32, etc.
// See GxB_Type_name to convert this name into a GrB_Type.
GB_PUBLIC
GrB_Info GxB_deserialize_type_name // return the type name of a blob
(
// output:
char *type_name, // name of the type (char array of size at least
// GxB_MAX_NAME_LEN, owned by the user application).
// input, not modified:
const void *blob, // the blob
GrB_Index blob_size // size of the blob
) ;
//==============================================================================
// GxB_Vector_sort and GxB_Matrix_sort: sort a matrix or vector
//==============================================================================
GB_PUBLIC
GrB_Info GxB_Vector_sort
(
// output:
GrB_Vector w, // vector of sorted values
GrB_Vector p, // vector containing the permutation
// input
GrB_BinaryOp op, // comparator op
GrB_Vector u, // vector to sort
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_sort
(
// output:
GrB_Matrix C, // matrix of sorted values
GrB_Matrix P, // matrix containing the permutations
// input
GrB_BinaryOp op, // comparator op
GrB_Matrix A, // matrix to sort
const GrB_Descriptor desc
) ;
#define GxB_sort(arg1,...) \
_Generic \
( \
(arg1), \
GrB_Vector : GxB_Vector_sort , \
GrB_Matrix : GxB_Matrix_sort \
) \
(arg1, __VA_ARGS__)
//==============================================================================
// GxB_Iterator: an object that iterates over the entries of a matrix or vector
//==============================================================================
/* Example usage:
single thread iteration of a whole matrix, one row at a time (in the
outer loop), and one entry at a time within the row (in the inner loop):
// create an iterator
GxB_Iterator iterator ;
GxB_Iterator_new (&iterator) ;
// attach it to the matrix A, known to be type GrB_FP64
GrB_Info info = GxB_rowIterator_attach (iterator, A, NULL) ;
if (info < 0) { handle the failure ... }
// seek to A(0,:)
info = GxB_rowIterator_seekRow (iterator, 0) ;
while (info != GxB_EXHAUSTED)
{
// iterate over entries in A(i,:)
GrB_Index i = GxB_rowIterator_getRowIndex (iterator) ;
while (info == GrB_SUCCESS)
{
// get the entry A(i,j)
GrB_Index j = GxB_rowIterator_getColIndex (iterator) ;
double aij = GxB_Iterator_get_FP64 (iterator) ;
// move to the next entry in A(i,:)
info = GxB_rowIterator_nextCol (iterator) ;
}
// move to the next row, A(i+1,:)
info = GxB_rowIterator_nextRow (iterator) ;
}
GrB_free (&iterator) ;
parallel iteration using 4 threads (work may be imbalanced however):
GrB_Index nrows ;
GrB_wait (A, GrB_MATERIALIZE) ; // this is essential
GrB_Matrix_nrows (&nrows, A) ;
#pragma omp parallel for num_threads(4)
for (int tid = 0 ; tid < 4 ; tid++)
{
// thread tid operates on A(row1:row2-1,:)
GrB_Index row1 = tid * (nrows / 4) ;
GrB_Index row2 = (tid == 3) ? nrows : ((tid+1) * (nrows / 4)) ;
GxB_Iterator iterator ;
GxB_Iterator_new (&iterator) ;
GrB_Info info = GxB_rowIterator_attach (iterator, A, NULL) ;
if (info < 0) { handle the failure ... }
// seek to A(row1,:)
info = GxB_rowIterator_seekRow (iterator, row1) ;
while (info != GxB_EXHAUSTED)
{
// iterate over entries in A(i,:)
GrB_Index i = GxB_rowIterator_getRowIndex (iterator) ;
if (i >= row2) break ;
while (info == GrB_SUCCESS)
{
// get the entry A(i,j)
GrB_Index j = GxB_rowIterator_getColIndex (iterator) ;
double aij = GxB_Iterator_get_FP64 (iterator) ;
// move to the next entry in A(i,:)
info = GxB_rowIterator_nextCol (iterator) ;
}
// move to the next row, A(i+1,:)
info = GxB_rowIterator_nextRow (iterator) ;
}
GrB_free (&iterator) ;
}
In the parallel example above, a more balanced work distribution can be
obtained by first computing the row degree via GrB_mxv (see LAGraph), and
then compute the cumulative sum (ideally in parallel). Next, partition the
cumulative sum into one part per thread via binary search, and divide the
rows into parts accordingly.
*/
//------------------------------------------------------------------------------
// GxB_Iterator: definition and new/free methods
//------------------------------------------------------------------------------
// The contents of an iterator must not be directly accessed by the user
// application. Only the functions and macros provided here may access
// "iterator->..." contents. The iterator is defined here only so that macros
// can be used to speed up the use of the iterator methods. User applications
// must not use "iterator->..." directly.
struct GB_Iterator_opaque
{
// these components change as the iterator moves (via seek or next):
int64_t pstart ; // the start of the current vector
int64_t pend ; // the end of the current vector
int64_t p ; // position of the current entry
int64_t k ; // the current vector
// only changes when the iterator is created:
size_t header_size ; // size of this iterator object
// these components only change when the iterator is attached:
int64_t pmax ; // avlen*avdim for bitmap; nvals(A) otherwise
int64_t avlen ; // length of each vector in the matrix
int64_t avdim ; // number of vectors in the matrix dimension
int64_t anvec ; // # of vectors present in the matrix
const int64_t *GB_restrict Ap ; // pointers for sparse and hypersparse
const int64_t *GB_restrict Ah ; // vector names for hypersparse
const int8_t *GB_restrict Ab ; // bitmap
const int64_t *GB_restrict Ai ; // indices for sparse and hypersparse
const void *GB_restrict Ax ; // values for all 4 data structures
size_t type_size ; // size of the type of A
int A_sparsity ; // sparse, hyper, bitmap, or full
bool iso ; // true if A is iso-valued, false otherwise
bool by_col ; // true if A is held by column, false if by row
} ;
typedef struct GB_Iterator_opaque *GxB_Iterator ;
// GxB_Iterator_new: create a new iterator, not attached to any matrix/vector
GB_PUBLIC GrB_Info GxB_Iterator_new (GxB_Iterator *iterator) ;
// GxB_Iterator_free: free an iterator
GB_PUBLIC GrB_Info GxB_Iterator_free (GxB_Iterator *iterator) ;
//==============================================================================
// GB_Iterator_*: implements user-callable GxB_*Iterator_* methods
//==============================================================================
// GB_* methods are not user-callable. These methods appear here so that the
// iterator methods can be done via macros.
//------------------------------------------------------------------------------
// GB_Iterator_attach: attach a row/col/entry iterator to a matrix
//------------------------------------------------------------------------------
GB_PUBLIC GrB_Info GB_Iterator_attach
(
GxB_Iterator iterator, // iterator to attach to the matrix A
GrB_Matrix A, // matrix to attach
GxB_Format_Value format, // by row, by col, or by entry (GxB_NO_FORMAT)
GrB_Descriptor desc
) ;
//------------------------------------------------------------------------------
// GB_Iterator_rc_seek: seek a row/col iterator to a particular vector
//------------------------------------------------------------------------------
GB_PUBLIC GrB_Info GB_Iterator_rc_seek
(
GxB_Iterator iterator,
GrB_Index j,
bool jth_vector
) ;
//------------------------------------------------------------------------------
// GB_Iterator_rc_bitmap_next: move a row/col iterator to next entry in bitmap
//------------------------------------------------------------------------------
GB_PUBLIC GrB_Info GB_Iterator_rc_bitmap_next (GxB_Iterator iterator) ;
//------------------------------------------------------------------------------
// GB_Iterator_rc_knext: move a row/col iterator to the next vector
//------------------------------------------------------------------------------
#define GB_Iterator_rc_knext(iterator) \
( \
/* move to the next vector, and check if iterator is exhausted */ \
(++(iterator->k) >= iterator->anvec) ? \
( \
/* iterator is at the end of the matrix */ \
iterator->pstart = 0, \
iterator->pend = 0, \
iterator->p = 0, \
iterator->k = iterator->anvec, \
GxB_EXHAUSTED \
) \
: \
( \
/* find first entry in vector, and pstart/pend for this vector */ \
(iterator->A_sparsity <= GxB_SPARSE) ? \
( \
/* matrix is sparse or hypersparse */ \
iterator->pstart = iterator->Ap [iterator->k], \
iterator->pend = iterator->Ap [iterator->k+1], \
iterator->p = iterator->pstart, \
((iterator->p >= iterator->pend) ? GrB_NO_VALUE : GrB_SUCCESS) \
) \
: \
( \
/* matrix is bitmap or full */ \
iterator->pstart += iterator->avlen, \
iterator->pend += iterator->avlen, \
iterator->p = iterator->pstart, \
(iterator->A_sparsity <= GxB_BITMAP) ? \
( \
/* matrix is bitmap */ \
GB_Iterator_rc_bitmap_next (iterator) \
) \
: \
( \
/* matrix is full */ \
((iterator->p >= iterator->pend) ? GrB_NO_VALUE : GrB_SUCCESS) \
) \
) \
) \
)
//------------------------------------------------------------------------------
// GB_Iterator_rc_inext: move a row/col iterator the next entry in the vector
//------------------------------------------------------------------------------
#define GB_Iterator_rc_inext(iterator) \
( \
/* move to the next entry in the vector */ \
(++(iterator->p) >= iterator->pend) ? \
( \
/* no more entries in the current vector */ \
GrB_NO_VALUE \
) \
: \
( \
(iterator->A_sparsity == GxB_BITMAP) ? \
( \
/* the matrix is in bitmap form */ \
GB_Iterator_rc_bitmap_next (iterator) \
) \
: \
( \
GrB_SUCCESS \
) \
) \
)
//------------------------------------------------------------------------------
// GB_Iterator_rc_getj: get index of current vector for row/col iterator
//------------------------------------------------------------------------------
#define GB_Iterator_rc_getj(iterator) \
( \
(iterator->k >= iterator->anvec) ? \
( \
/* iterator is past the end of the matrix */ \
iterator->avdim \
) \
: \
( \
(iterator->A_sparsity == GxB_HYPERSPARSE) ? \
( \
/* return the name of kth vector: j = Ah [k] if it appears */ \
iterator->Ah [iterator->k] \
) \
: \
( \
/* return the kth vector: j = k */ \
iterator->k \
) \
) \
)
//------------------------------------------------------------------------------
// GB_Iterator_rc_geti: return index of current entry for row/col iterator
//------------------------------------------------------------------------------
#define GB_Iterator_rc_geti(iterator) \
( \
(iterator->Ai != NULL) ? \
( \
iterator->Ai [iterator->p] \
) \
: \
( \
(iterator->p - iterator->pstart) \
) \
)
//==============================================================================
// GxB_rowIterator_*: iterate over the rows of a matrix
//==============================================================================
#undef GxB_rowIterator_attach
#undef GxB_rowIterator_kount
#undef GxB_rowIterator_seekRow
#undef GxB_rowIterator_kseek
#undef GxB_rowIterator_nextRow
#undef GxB_rowIterator_nextCol
#undef GxB_rowIterator_getRowIndex
#undef GxB_rowIterator_getColIndex
//------------------------------------------------------------------------------
// GxB_rowIterator_attach: attach a row iterator to a matrix
//------------------------------------------------------------------------------
// On input, the iterator must already exist, having been created by
// GxB_Iterator_new.
// GxB_rowIterator_attach attaches a row iterator to a matrix. If the iterator
// is already attached to a matrix, it is detached and then attached to the
// given matrix A.
// The following error conditions are returned:
// GrB_NULL_POINTER: if the iterator or A are NULL.
// GrB_INVALID_OBJECT: if the matrix A is invalid.
// GrB_NOT_IMPLEMENTED: if the matrix A cannot be iterated by row.
// GrB_OUT_OF_MEMORY: if the method runs out of memory.
// If successful, the row iterator is attached to the matrix, but not to any
// specific row. Use GxB_rowIterator_*seek* to move the iterator to a row.
GB_PUBLIC
GrB_Info GxB_rowIterator_attach
(
GxB_Iterator iterator,
GrB_Matrix A,
GrB_Descriptor desc
) ;
#define GxB_rowIterator_attach(iterator, A, desc) \
( \
GB_Iterator_attach (iterator, A, GxB_BY_ROW, desc) \
)
//------------------------------------------------------------------------------
// GxB_rowIterator_kount: upper bound on the # of nonempty rows of a matrix
//------------------------------------------------------------------------------
// On input, the row iterator must be attached to a matrix, but need not be at
// any specific row; results are undefined if this condition is not met.
// GxB_rowIterator_kount returns an upper bound on the # of non-empty rows of a
// matrix. A GraphBLAS library may always return this as simply nrows(A), but
// in some libraries, it may be a value between the # of rows with at least one
// entry, and nrows(A), inclusive. Any value in this range is a valid return
// value from this function.
// For SuiteSparse:GraphBLAS: If A is m-by-n, and sparse, bitmap, or full, then
// kount == m. If A is hypersparse, kount is the # of vectors held in the data
// structure for the matrix, some of which may be empty, and kount <= m.
GB_PUBLIC
GrB_Index GxB_rowIterator_kount (GxB_Iterator iterator) ;
#define GxB_rowIterator_kount(iterator) \
( \
(iterator)->anvec \
)
//------------------------------------------------------------------------------
// GxB_rowIterator_seekRow: move a row iterator to a different row of a matrix
//------------------------------------------------------------------------------
// On input, the row iterator must be attached to a matrix, but need not be at
// any specific row; results are undefined if this condition is not met.
// GxB_rowIterator_seekRow moves a row iterator to the first entry of A(row,:).
// If A(row,:) has no entries, the iterator may move to the first entry of next
// nonempty row i for some i > row. The row index can be determined by
// GxB_rowIterator_getRowIndex.
// For SuiteSparse:GraphBLAS: If the matrix is hypersparse, and the row
// does not appear in the hyperlist, then the iterator is moved to the first
// row after the given row that does appear in the hyperlist.
// The method is always successful; the following are conditions are returned:
// GxB_EXHAUSTED: if the row index is >= nrows(A); the row iterator is
// exhausted, but is still attached to the matrix.
// GrB_NO_VALUE: if the row index is valid but A(row,:) has no entries; the
// row iterator is positioned at A(row,:).
// GrB_SUCCESS: if the row index is valid and A(row,:) has at least one
// entry. The row iterator is positioned at A(row,:).
// GxB_rowIterator_get* can be used to return the indices of
// the first entry in A(row,:), and GxB_Iterator_get* can
// return its value.
GB_PUBLIC
GrB_Info GxB_rowIterator_seekRow (GxB_Iterator iterator, GrB_Index row) ;
#define GxB_rowIterator_seekRow(iterator, row) \
( \
GB_Iterator_rc_seek (iterator, row, false) \
)
//------------------------------------------------------------------------------
// GxB_rowIterator_kseek: move a row iterator to a different row of a matrix
//------------------------------------------------------------------------------
// On input, the row iterator must be attached to a matrix, but need not be at
// any specific row; results are undefined if this condition is not met.
// GxB_rowIterator_kseek is identical to GxB_rowIterator_seekRow, except for
// how the row index is specified. The row is the kth non-empty row of A.
// More precisely, k is in the range 0 to kount-1, where kount is the value
// returned by GxB_rowIterator_kount.
GB_PUBLIC
GrB_Info GxB_rowIterator_kseek (GxB_Iterator iterator, GrB_Index k) ;
#define GxB_rowIterator_kseek(iterator, k) \
( \
GB_Iterator_rc_seek (iterator, k, true) \
)
//------------------------------------------------------------------------------
// GxB_rowIterator_nextRow: move a row iterator to the next row of a matrix
//------------------------------------------------------------------------------
// On input, the row iterator must already be attached to a matrix via a prior
// call to GxB_rowIterator_attach, and the iterator must be at a specific row,
// via a prior call to GxB_rowIterator_*seek* or GxB_rowIterator_nextRow;
// results are undefined if this condition is not met.
// If the the row iterator is currently at A(row,:), it is moved to A(row+1,:),
// or to the first non-empty row after A(row,:), at the discretion of this
// method. That is, empty rows may be skipped.
// The method is always successful, and the return conditions are identical to
// the return conditions of GxB_rowIterator_seekRow.
GB_PUBLIC
GrB_Info GxB_rowIterator_nextRow (GxB_Iterator iterator) ;
#define GxB_rowIterator_nextRow(iterator) \
( \
GB_Iterator_rc_knext (iterator) \
)
//------------------------------------------------------------------------------
// GxB_rowIterator_nextCol: move a row iterator to the next entry in A(row,:)
//------------------------------------------------------------------------------
// On input, the row iterator must already be attached to a matrix via a prior
// call to GxB_rowIterator_attach, and the iterator must be at a specific row,
// via a prior call to GxB_rowIterator_*seek* or GxB_rowIterator_nextRow;
// results are undefined if this condition is not met.
// The method is always successful, and returns the following conditions:
// GrB_NO_VALUE: If the iterator is already exhausted, or if there is no
// entry in the current A(row,;),
// GrB_SUCCESS: If the row iterator has been moved to the next entry in
// A(row,:).
GB_PUBLIC
GrB_Info GxB_rowIterator_nextCol (GxB_Iterator iterator) ;
#define GxB_rowIterator_nextCol(iterator) \
( \
GB_Iterator_rc_inext ((iterator)) \
)
//------------------------------------------------------------------------------
// GxB_rowIterator_getRowIndex: get current row index of a row iterator
//------------------------------------------------------------------------------
// On input, the iterator must be already successfully attached to matrix as a
// row iterator; results are undefined if this condition is not met.
// The method returns nrows(A) if the iterator is exhausted, or the current
// row index otherwise. There need not be any entry in the current row.
// Zero is returned if the iterator is attached to the matrix but
// GxB_rowIterator_*seek* has not been called, but this does not mean the
// iterator is positioned at row zero.
GB_PUBLIC
GrB_Index GxB_rowIterator_getRowIndex (GxB_Iterator iterator) ;
#define GxB_rowIterator_getRowIndex(iterator) \
( \
GB_Iterator_rc_getj ((iterator)) \
)
//------------------------------------------------------------------------------
// GxB_rowIterator_getColIndex: get current column index of a row iterator
//------------------------------------------------------------------------------
// On input, the iterator must be already successfully attached to matrix as a
// row iterator, and in addition, the row iterator must be positioned at a
// valid entry present in the matrix. That is, the last call to
// GxB_rowIterator_*seek* or GxB_rowIterator_*next*, must have returned
// GrB_SUCCESS. Results are undefined if this condition is not met.
GB_PUBLIC
GrB_Index GxB_rowIterator_getColIndex (GxB_Iterator iterator) ;
#define GxB_rowIterator_getColIndex(iterator) \
( \
GB_Iterator_rc_geti ((iterator)) \
)
//==============================================================================
// GxB_colIterator_*: iterate over columns of a matrix
//==============================================================================
// The column iterator is analoguous to the row iterator.
#undef GxB_colIterator_attach
#undef GxB_colIterator_kount
#undef GxB_colIterator_seekCol
#undef GxB_colIterator_kseek
#undef GxB_colIterator_nextCol
#undef GxB_colIterator_nextRow
#undef GxB_colIterator_getColIndex
#undef GxB_colIterator_getRowIndex
// GxB_colIterator_attach: attach a column iterator to a matrix
GB_PUBLIC
GrB_Info GxB_colIterator_attach
(
GxB_Iterator iterator,
GrB_Matrix A,
GrB_Descriptor desc
) ;
#define GxB_colIterator_attach(iterator, A, desc) \
( \
GB_Iterator_attach (iterator, A, GxB_BY_COL, desc) \
)
// GxB_colIterator_kount: return # of nonempty columns of the matrix
GB_PUBLIC
GrB_Index GxB_colIterator_kount (GxB_Iterator iterator) ;
#define GxB_colIterator_kount(iterator) \
( \
(iterator)->anvec \
)
// GxB_colIterator_seekCol: move a column iterator to A(:,col)
GB_PUBLIC
GrB_Info GxB_colIterator_seekCol (GxB_Iterator iterator, GrB_Index col) ;
#define GxB_colIterator_seekCol(iterator, col) \
( \
GB_Iterator_rc_seek (iterator, col, false) \
)
// GxB_colIterator_kseek: move a column iterator to kth non-empty column of A
GB_PUBLIC
GrB_Info GxB_colIterator_kseek (GxB_Iterator iterator, GrB_Index k) ;
#define GxB_colIterator_kseek(iterator, k) \
( \
GB_Iterator_rc_seek (iterator, k, true) \
)
// GxB_colIterator_nextCol: move a column iterator to first entry of next column
GB_PUBLIC
GrB_Info GxB_colIterator_nextCol (GxB_Iterator iterator) ;
#define GxB_colIterator_nextCol(iterator) \
( \
GB_Iterator_rc_knext ((iterator)) \
)
// GxB_colIterator_nextRow: move a column iterator to next entry in column
GB_PUBLIC
GrB_Info GxB_colIterator_nextRow (GxB_Iterator iterator) ;
#define GxB_colIterator_nextRow(iterator) \
( \
GB_Iterator_rc_inext ((iterator)) \
)
// GxB_colIterator_getColIndex: return the column index of current entry
GB_PUBLIC
GrB_Index GxB_colIterator_getColIndex (GxB_Iterator iterator) ;
#define GxB_colIterator_getColIndex(iterator) \
( \
GB_Iterator_rc_getj ((iterator)) \
)
// GxB_colIterator_getRowIndex: return the row index of current entry
GB_PUBLIC
GrB_Index GxB_colIterator_getRowIndex (GxB_Iterator iterator) ;
#define GxB_colIterator_getRowIndex(iterator) \
( \
GB_Iterator_rc_geti ((iterator)) \
)
//==============================================================================
// GxB_Matrix_Iterator_*: iterate over the entries of a matrix
//==============================================================================
// Example usage:
// single thread iteration of a whole matrix, one entry at at time
/*
// create an iterator
GxB_Iterator iterator ;
GxB_Iterator_new (&iterator) ;
// attach it to the matrix A, known to be type GrB_FP64
GrB_Info info = GxB_Matrix_Iterator_attach (iterator, A, NULL) ;
if (info < 0) { handle the failure ... }
// seek to the first entry
info = GxB_Matrix_Iterator_seek (iterator, 0) ;
while (info != GxB_EXHAUSTED)
{
// get the entry A(i,j)
GrB_Index i, j ;
GxB_Matrix_Iterator_getIndex (iterator, &i, &j) ;
double aij = GxB_Iterator_get_FP64 (iterator) ;
// move to the next entry in A
info = GxB_Matrix_Iterator_next (iterator) ;
}
GrB_free (&iterator) ;
*/
//------------------------------------------------------------------------------
// GxB_Matrix_Iterator_attach: attach an entry iterator to a matrix
//------------------------------------------------------------------------------
// On input, the iterator must already exist, having been created by
// GxB_Iterator_new.
// GxB_Matrix_Iterator_attach attaches an entry iterator to a matrix. If the
// iterator is already attached to a matrix, it is detached and then attached
// to the given matrix A.
// The following error conditions are returned:
// GrB_NULL_POINTER: if the iterator or A are NULL.
// GrB_INVALID_OBJECT: if the matrix A is invalid.
// GrB_OUT_OF_MEMORY: if the method runs out of memory.
// If successful, the entry iterator is attached to the matrix, but not to any
// specific entry. Use GxB_Matrix_Iterator_*seek* to move the iterator to a
// particular entry.
GB_PUBLIC
GrB_Info GxB_Matrix_Iterator_attach
(
GxB_Iterator iterator,
GrB_Matrix A,
GrB_Descriptor desc
) ;
//------------------------------------------------------------------------------
// GxB_Matrix_Iterator_getpmax: return the range of the iterator
//------------------------------------------------------------------------------
// On input, the entry iterator must be already attached to a matrix via
// GxB_Matrix_Iterator_attach; results are undefined if this condition is not
// met.
// Entries in a matrix are given an index p, ranging from 0 to pmax-1, where
// pmax >= nvals(A). For sparse, hypersparse, and full matrices, pmax is equal
// to nvals(A). For an m-by-n bitmap matrix, pmax=m*n, or pmax=0 if the
// matrix has no entries.
GB_PUBLIC
GrB_Index GxB_Matrix_Iterator_getpmax (GxB_Iterator iterator) ;
//------------------------------------------------------------------------------
// GxB_Matrix_Iterator_seek: seek to a specific entry
//------------------------------------------------------------------------------
// On input, the entry iterator must be already attached to a matrix via
// GxB_Matrix_Iterator_attach; results are undefined if this condition is not
// met.
// The input p is in range 0 to pmax-1, which points to an entry in the matrix,
// or p >= pmax if the iterator is exhausted, where pmax is the return value
// from GxB_Matrix_Iterator_getpmax.
// Returns GrB_SUCCESS if the iterator is at an entry that exists in the
// matrix, or GxB_EXHAUSTED if the iterator is exhausted.
GB_PUBLIC
GrB_Info GxB_Matrix_Iterator_seek (GxB_Iterator iterator, GrB_Index p) ;
//------------------------------------------------------------------------------
// GxB_Matrix_Iterator_next: move to the next entry of a matrix
//------------------------------------------------------------------------------
// On input, the entry iterator must be already attached to a matrix via
// GxB_Matrix_Iterator_attach, and the position of the iterator must also have
// been defined by a prior call to GxB_Matrix_Iterator_seek or
// GxB_Matrix_Iterator_next. Results are undefined if these conditions are not
// met.
// Returns GrB_SUCCESS if the iterator is at an entry that exists in the
// matrix, or GxB_EXHAUSTED if the iterator is exhausted.
GB_PUBLIC
GrB_Info GxB_Matrix_Iterator_next (GxB_Iterator iterator) ;
//------------------------------------------------------------------------------
// GxB_Matrix_Iterator_getp: get the current position of a matrix iterator
//------------------------------------------------------------------------------
// On input, the entry iterator must be already attached to a matrix via
// GxB_Matrix_Iterator_attach, and the position of the iterator must also have
// been defined by a prior call to GxB_Matrix_Iterator_seek or
// GxB_Matrix_Iterator_next. Results are undefined if these conditions are not
// met.
GB_PUBLIC
GrB_Index GxB_Matrix_Iterator_getp (GxB_Iterator iterator) ;
//------------------------------------------------------------------------------
// GxB_Matrix_Iterator_getIndex: get the row and column index of a matrix entry
//------------------------------------------------------------------------------
// On input, the entry iterator must be already attached to a matrix via
// GxB_Matrix_Iterator_attach, and the position of the iterator must also have
// been defined by a prior call to GxB_Matrix_Iterator_seek or
// GxB_Matrix_Iterator_next, with a return value of GrB_SUCCESS. Results are
// undefined if these conditions are not met.
GB_PUBLIC
void GxB_Matrix_Iterator_getIndex
(
GxB_Iterator iterator,
GrB_Index *row,
GrB_Index *col
) ;
//==============================================================================
// GxB_Vector_Iterator_*: iterate over the entries of a vector
//==============================================================================
/* Example usage:
single thread iteration of a whole vector, one entry at at time
// create an iterator
GxB_Iterator iterator ;
GxB_Iterator_new (&iterator) ;
// attach it to the vector v, known to be type GrB_FP64
GrB_Info info = GxB_Vector_Iterator_attach (iterator, v, NULL) ;
if (info < 0) { handle the failure ... }
// seek to the first entry
info = GxB_Vector_Iterator_seek (iterator, 0) ;
while (info != GxB_EXHAUSTED)
{
// get the entry v(i)
GrB_Index i = GxB_Vector_Iterator_getIndex (iterator) ;
double vi = GxB_Iterator_get_FP64 (iterator) ;
// move to the next entry in v
info = GxB_Vector_Iterator_next (iterator) ;
}
GrB_free (&iterator) ;
*/
#undef GxB_Vector_Iterator_getpmax
#undef GxB_Vector_Iterator_seek
#undef GxB_Vector_Iterator_next
#undef GxB_Vector_Iterator_getp
#undef GxB_Vector_Iterator_getIndex
//------------------------------------------------------------------------------
// GxB_Vector_Iterator_attach: attach an iterator to a vector
//------------------------------------------------------------------------------
// On input, the iterator must already exist, having been created by
// GxB_Iterator_new.
// GxB_Vector_Iterator_attach attaches an iterator to a vector. If the
// iterator is already attached to a vector or matrix, it is detached and then
// attached to the given vector v.
// The following error conditions are returned:
// GrB_NULL_POINTER: if the iterator or v are NULL.
// GrB_INVALID_OBJECT: if the vector v is invalid.
// GrB_OUT_OF_MEMORY: if the method runs out of memory.
// If successful, the iterator is attached to the vector, but not to any
// specific entry. Use GxB_Vector_Iterator_seek to move the iterator to a
// particular entry.
GB_PUBLIC GrB_Info GxB_Vector_Iterator_attach
(
GxB_Iterator iterator,
GrB_Vector v,
GrB_Descriptor desc
) ;
//------------------------------------------------------------------------------
// GxB_Vector_Iterator_getpmax: return the range of the vector iterator
//------------------------------------------------------------------------------
// On input, the iterator must be already attached to a vector via
// GxB_Vector_Iterator_attach; results are undefined if this condition is not
// met.
// Entries in a vector are given an index p, ranging from 0 to pmax-1, where
// pmax >= nvals(v). For sparse and full vectors, pmax is equal to nvals(v).
// For a size-m bitmap vector, pmax=m, or pmax=0 if the vector has no entries.
GB_PUBLIC
GrB_Index GxB_Vector_Iterator_getpmax (GxB_Iterator iterator) ;
#define GxB_Vector_Iterator_getpmax(iterator) \
( \
(iterator->pmax) \
)
//------------------------------------------------------------------------------
// GxB_Vector_Iterator_seek: seek to a specific entry in the vector
//------------------------------------------------------------------------------
// On input, the iterator must be already attached to a vector via
// GxB_Vector_Iterator_attach; results are undefined if this condition is not
// met.
// The input p is in range 0 to pmax-1, which points to an entry in the vector,
// or p >= pmax if the iterator is exhausted, where pmax is the return value
// from GxB_Vector_Iterator_getpmax.
// Returns GrB_SUCCESS if the iterator is at an entry that exists in the
// vector, or GxB_EXHAUSTED if the iterator is exhausted.
GB_PUBLIC
GrB_Info GB_Vector_Iterator_bitmap_seek (GxB_Iterator iterator, GrB_Index p) ;
GB_PUBLIC
GrB_Info GxB_Vector_Iterator_seek (GxB_Iterator iterator, GrB_Index p) ;
#define GB_Vector_Iterator_seek(iterator, q) \
( \
(q >= iterator->pmax) ? \
( \
/* the iterator is exhausted */ \
iterator->p = iterator->pmax, \
GxB_EXHAUSTED \
) \
: \
( \
/* seek to an arbitrary position in the vector */ \
iterator->p = q, \
(iterator->A_sparsity == GxB_BITMAP) ? \
( \
GB_Vector_Iterator_bitmap_seek (iterator, q) \
) \
: \
( \
GrB_SUCCESS \
) \
) \
)
#define GxB_Vector_Iterator_seek(iterator, p) \
( \
GB_Vector_Iterator_seek (iterator, p) \
)
//------------------------------------------------------------------------------
// GxB_Vector_Iterator_next: move to the next entry of a vector
//------------------------------------------------------------------------------
// On input, the iterator must be already attached to a vector via
// GxB_Vector_Iterator_attach, and the position of the iterator must also have
// been defined by a prior call to GxB_Vector_Iterator_seek or
// GxB_Vector_Iterator_next. Results are undefined if these conditions are not
// met.
// Returns GrB_SUCCESS if the iterator is at an entry that exists in the
// vector, or GxB_EXHAUSTED if the iterator is exhausted.
GB_PUBLIC
GrB_Info GxB_Vector_Iterator_next (GxB_Iterator iterator) ;
#define GB_Vector_Iterator_next(iterator) \
( \
/* move to the next entry */ \
(++(iterator->p) >= iterator->pmax) ? \
( \
/* the iterator is exhausted */ \
iterator->p = iterator->pmax, \
GxB_EXHAUSTED \
) \
: \
( \
GrB_SUCCESS \
) \
)
#define GxB_Vector_Iterator_next(iterator) \
( \
GB_Vector_Iterator_next (iterator) \
)
//------------------------------------------------------------------------------
// GxB_Vector_Iterator_getp: get the current position of a vector iterator
//------------------------------------------------------------------------------
// On input, the iterator must be already attached to a vector via
// GxB_Vector_Iterator_attach, and the position of the iterator must also have
// been defined by a prior call to GxB_Vector_Iterator_seek or
// GxB_Vector_Iterator_next. Results are undefined if these conditions are not
// met.
GB_PUBLIC
GrB_Index GxB_Vector_Iterator_getp (GxB_Iterator iterator) ;
#define GxB_Vector_Iterator_getp(iterator) \
( \
(iterator->p) \
)
//------------------------------------------------------------------------------
// GxB_Vector_Iterator_getIndex: get the index of a vector entry
//------------------------------------------------------------------------------
// On input, the iterator must be already attached to a vector via
// GxB_Vector_Iterator_attach, and the position of the iterator must also have
// been defined by a prior call to GxB_Vector_Iterator_seek or
// GxB_Vector_Iterator_next, with a return value of GrB_SUCCESS. Results are
// undefined if these conditions are not met.
GB_PUBLIC
GrB_Index GxB_Vector_Iterator_getIndex (GxB_Iterator iterator) ;
#define GxB_Vector_Iterator_getIndex(iterator) \
( \
((iterator->Ai != NULL) ? iterator->Ai [iterator->p] : iterator->p) \
)
//==============================================================================
// GxB_Iterator_get_TYPE: get value of the current entry for any iterator
//==============================================================================
// On input, the prior call to GxB_*Iterator_*seek*, or GxB_*Iterator_*next*
// must have returned GrB_SUCCESS, indicating that the iterator is at a valid
// current entry for either a matrix or vector.
// Returns the value of the current entry at the position determined by the
// iterator. No typecasting is permitted; the method name must match the
// type of the matrix or vector.
#undef GxB_Iterator_get_BOOL
#undef GxB_Iterator_get_INT8
#undef GxB_Iterator_get_INT16
#undef GxB_Iterator_get_INT32
#undef GxB_Iterator_get_INT64
#undef GxB_Iterator_get_UINT8
#undef GxB_Iterator_get_UINT16
#undef GxB_Iterator_get_UINT32
#undef GxB_Iterator_get_UINT64
#undef GxB_Iterator_get_FP32
#undef GxB_Iterator_get_FP64
#undef GxB_Iterator_get_FC32
#undef GxB_Iterator_get_FC64
#undef GxB_Iterator_get_UDT
GB_PUBLIC bool GxB_Iterator_get_BOOL (GxB_Iterator iterator) ;
GB_PUBLIC int8_t GxB_Iterator_get_INT8 (GxB_Iterator iterator) ;
GB_PUBLIC int16_t GxB_Iterator_get_INT16 (GxB_Iterator iterator) ;
GB_PUBLIC int32_t GxB_Iterator_get_INT32 (GxB_Iterator iterator) ;
GB_PUBLIC int64_t GxB_Iterator_get_INT64 (GxB_Iterator iterator) ;
GB_PUBLIC uint8_t GxB_Iterator_get_UINT8 (GxB_Iterator iterator) ;
GB_PUBLIC uint16_t GxB_Iterator_get_UINT16 (GxB_Iterator iterator) ;
GB_PUBLIC uint32_t GxB_Iterator_get_UINT32 (GxB_Iterator iterator) ;
GB_PUBLIC uint64_t GxB_Iterator_get_UINT64 (GxB_Iterator iterator) ;
GB_PUBLIC float GxB_Iterator_get_FP32 (GxB_Iterator iterator) ;
GB_PUBLIC double GxB_Iterator_get_FP64 (GxB_Iterator iterator) ;
GB_PUBLIC GxB_FC32_t GxB_Iterator_get_FC32 (GxB_Iterator iterator) ;
GB_PUBLIC GxB_FC64_t GxB_Iterator_get_FC64 (GxB_Iterator iterator) ;
GB_PUBLIC void GxB_Iterator_get_UDT (GxB_Iterator iterator,
void *value) ;
#define GB_Iterator_get(iterator, type) \
( \
(((type *) (iterator)->Ax) [(iterator)->iso ? 0 : (iterator)->p]) \
)
#define GxB_Iterator_get_BOOL(iterator) GB_Iterator_get (iterator, bool)
#define GxB_Iterator_get_INT8(iterator) GB_Iterator_get (iterator, int8_t)
#define GxB_Iterator_get_INT16(iterator) GB_Iterator_get (iterator, int16_t)
#define GxB_Iterator_get_INT32(iterator) GB_Iterator_get (iterator, int32_t)
#define GxB_Iterator_get_INT64(iterator) GB_Iterator_get (iterator, int64_t)
#define GxB_Iterator_get_UINT8(iterator) GB_Iterator_get (iterator, uint8_t)
#define GxB_Iterator_get_UINT16(iterator) GB_Iterator_get (iterator, uint16_t)
#define GxB_Iterator_get_UINT32(iterator) GB_Iterator_get (iterator, uint32_t)
#define GxB_Iterator_get_UINT64(iterator) GB_Iterator_get (iterator, uint64_t)
#define GxB_Iterator_get_FP32(iterator) GB_Iterator_get (iterator, float)
#define GxB_Iterator_get_FP64(iterator) GB_Iterator_get (iterator, double)
#define GxB_Iterator_get_FC32(iterator) GB_Iterator_get (iterator, GxB_FC32_t)
#define GxB_Iterator_get_FC64(iterator) GB_Iterator_get (iterator, GxB_FC64_t)
#define GxB_Iterator_get_UDT(iterator, value) \
( \
(void) memcpy ((void *) value, (iterator)->Ax + \
((iterator)->iso ? 0 : ((iterator)->type_size * (iterator)->p)), \
(iterator)->type_size) \
)
#endif
|
BatchNormalization.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "generic/BatchNormalization.c"
#else
void THNN_(BatchNormalization_updateOutput)(
THNNState *state, THTensor *input, THTensor *output,
THTensor *weight, THTensor *bias,
THTensor *running_mean, THTensor *running_var,
THTensor *save_mean, THTensor *save_std,
bool train, double momentum, double eps)
{
THTensor_(resizeAs)(output, input);
int64_t nInput = THTensor_(size)(input, 1);
int64_t f;
ptrdiff_t n = THTensor_(nElement)(input) / nInput;
if (train) {
THTensor_(resize1d)(save_mean, nInput);
THTensor_(resize1d)(save_std, nInput);
}
#pragma omp parallel for
for (f = 0; f < nInput; ++f) {
THTensor *in = THTensor_(newSelect)(input, 1, f);
THTensor *out = THTensor_(newSelect)(output, 1, f);
real mean, invstd;
if (train) {
// compute mean per input
accreal sum = 0;
TH_TENSOR_APPLY(real, in, sum += *in_data;);
mean = (real) sum / n;
THTensor_(set1d)(save_mean, f, (real) mean);
// compute variance per input
sum = 0;
TH_TENSOR_APPLY(real, in,
sum += (*in_data - mean) * (*in_data - mean););
if (sum == 0 && eps == 0.0) {
invstd = 0;
} else {
invstd = (real) (1 / sqrt(sum/n + eps));
}
THTensor_(set1d)(save_std, f, (real) invstd);
// update running averages
if (running_mean) {
THTensor_(set1d)(running_mean, f,
(real) (momentum * mean + (1 - momentum) * THTensor_(get1d)(running_mean, f)));
}
if (running_var) {
accreal unbiased_var = sum / (n - 1);
THTensor_(set1d)(running_var, f,
(real) (momentum * unbiased_var + (1 - momentum) * THTensor_(get1d)(running_var, f)));
}
} else {
mean = THTensor_(get1d)(running_mean, f);
invstd = 1 / sqrt(THTensor_(get1d)(running_var, f) + eps);
}
// compute output
real w = weight ? THTensor_(get1d)(weight, f) : 1;
real b = bias ? THTensor_(get1d)(bias, f) : 0;
TH_TENSOR_APPLY2(real, in, real, out,
*out_data = (real) (((*in_data - mean) * invstd) * w + b););
THTensor_(free)(out);
THTensor_(free)(in);
}
}
void THNN_(BatchNormalization_backward)(
THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput,
THTensor *gradWeight, THTensor *gradBias, THTensor *weight,
THTensor *running_mean, THTensor *running_var,
THTensor *save_mean, THTensor *save_std,
bool train, double scale, double eps)
{
THNN_CHECK_SHAPE(input, gradOutput);
int64_t nInput = THTensor_(size)(input, 1);
int64_t f;
ptrdiff_t n = THTensor_(nElement)(input) / nInput;
if (gradInput) {
THTensor_(resizeAs)(gradInput, input);
}
#pragma omp parallel for
for (f = 0; f < nInput; ++f) {
THTensor *in = THTensor_(newSelect)(input, 1, f);
THTensor *gradOut = THTensor_(newSelect)(gradOutput, 1, f);
real w = weight ? THTensor_(get1d)(weight, f) : 1;
real mean, invstd;
if (train) {
mean = THTensor_(get1d)(save_mean, f);
invstd = THTensor_(get1d)(save_std, f);
} else {
mean = THTensor_(get1d)(running_mean, f);
invstd = 1 / sqrt(THTensor_(get1d)(running_var, f) + eps);
}
// sum over all gradOutput in feature plane
accreal sum = 0;
TH_TENSOR_APPLY(real, gradOut, sum += *gradOut_data;);
// dot product of the Q(X) and gradOuput
accreal dotp = 0;
TH_TENSOR_APPLY2(real, in, real, gradOut,
dotp += (*in_data - mean) * (*gradOut_data););
if (gradInput) {
THTensor *gradIn = THTensor_(newSelect)(gradInput, 1, f);
if (train) {
// when in training mode
// Q(X) = X - E[x] ; i.e. input centered to zero mean
// Y = Q(X) / σ ; i.e. BN output before weight and bias
// dL/dX = (Q(dL/dY) - dot(Y, dL/dY) * Y) / σ * w
// projection of gradOutput on to output scaled by std
real k = (real) dotp * invstd * invstd / n;
TH_TENSOR_APPLY2(real, gradIn, real, in,
*gradIn_data = (*in_data - mean) * k;);
accreal gradMean = sum / n;
TH_TENSOR_APPLY2(real, gradIn, real, gradOut,
*gradIn_data = (*gradOut_data - gradMean - *gradIn_data) * invstd * w;);
} else {
// when in evaluation mode
// Q(X) = X - running_mean ; i.e. input centered to zero mean
// Y = Q(X) / running_std ; i.e. BN output before weight and bias
// dL/dX = w / running_std
TH_TENSOR_APPLY2(real, gradIn, real, gradOut,
*gradIn_data = *gradOut_data * invstd * w;);
}
THTensor_(free)(gradIn);
}
if (gradWeight) {
real val = THTensor_(get1d)(gradWeight, f);
THTensor_(set1d)(gradWeight, f, val + scale * dotp * invstd);
}
if (gradBias) {
real val = THTensor_(get1d)(gradBias, f);
THTensor_(set1d)(gradBias, f, val + scale * sum);
}
THTensor_(free)(gradOut);
THTensor_(free)(in);
}
}
#endif
|
bug_serial_taskgroup.c | // RUN: %libomp-compile-and-run
/*
GCC failed this test because __kmp_get_gtid() instead of __kmp_entry_gtid()
was called in xexpand(KMP_API_NAME_GOMP_TASKGROUP_START)(void).
__kmp_entry_gtid() will initialize the runtime if not yet done which does not
happen with __kmp_get_gtid().
*/
int main()
{
#pragma omp taskgroup
{ }
return 0;
}
|
threadpool.h | /* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
/* Modifications Copyright (c) Microsoft. */
#pragma once
#include <string>
#include <vector>
#include <functional>
#include <memory>
#include "core/common/common.h"
#include "core/platform/env.h"
#include "core/common/optional.h"
#include <functional>
#include <memory>
// This file use PIMPL to avoid having eigen headers here
namespace Eigen {
class Allocator;
class ThreadPoolInterface;
} // namespace Eigen
namespace onnxruntime {
struct TensorOpCost {
double bytes_loaded;
double bytes_stored;
double compute_cycles;
};
template <typename Environment>
class ThreadPoolTempl;
namespace concurrency {
class ExtendedThreadPoolInterface;
class LoopCounter;
class ThreadPool {
public:
#ifdef _WIN32
using NAME_CHAR_TYPE = wchar_t;
#else
using NAME_CHAR_TYPE = char;
#endif
// Constructs a pool for running with with "degree_of_parallelism" threads with
// specified "name". env->StartThread() is used to create individual threads
// with the given ThreadOptions. If "low_latency_hint" is true the thread pool
// implementation may use it as a hint that lower latency is preferred at the
// cost of higher CPU usage, e.g. by letting one or more idle threads spin
// wait. Conversely, if the threadpool is used to schedule high-latency
// operations like I/O the hint should be set to false.
//
// REQUIRES: degree_of_parallelism > 0
// The allocator parameter is only used for creating a Eigen::ThreadPoolDevice to be used with Eigen Tensor classes.
ThreadPool(Env* env,
const ThreadOptions& thread_options,
const NAME_CHAR_TYPE* name,
int degree_of_parallelism,
bool low_latency_hint);
// Waits until all scheduled work has finished and then destroy the
// set of threads.
~ThreadPool();
// Schedules fn() for execution in the pool of threads. The function may run
// synchronously if it cannot be enqueued. This will occur if the thread pool's
// degree-of-parallelism is 1, but it may also occur for implementation-dependent
// reasons such as if queues used for buffering work are full.
void Schedule(std::function<void()> fn);
// Returns the number of shards used by ParallelForFixedBlockSizeScheduling
// with these parameters.
int NumShardsUsedByFixedBlockSizeScheduling(std::ptrdiff_t total,
std::ptrdiff_t block_size) const;
// ParallelFor shards the "total" units of work assuming each unit of work
// having roughly "cost_per_unit" cost, in cycles. Each unit of work is
// indexed 0, 1, ..., total - 1. Each shard contains 1 or more units of work
// and the total cost of each shard is roughly the same.
//
// "cost_per_unit" is an estimate of the number of CPU cycles (or nanoseconds
// if not CPU-bound) to complete a unit of work. Overestimating creates too
// many shards and CPU time will be dominated by per-shard overhead, such as
// Context creation. Underestimating may not fully make use of the specified
// parallelism, and may also cause inefficiencies due to load balancing
// issues and stragglers.
void ParallelFor(std::ptrdiff_t total, double cost_per_unit,
const std::function<void(std::ptrdiff_t first, std::ptrdiff_t last)>& fn);
static void TryParallelFor(concurrency::ThreadPool* tp, std::ptrdiff_t total, double cost_per_unit,
const std::function<void(std::ptrdiff_t first, std::ptrdiff_t last)>& fn) {
TryParallelFor(tp, total, TensorOpCost{0, 0, static_cast<double>(cost_per_unit)}, fn);
}
void ParallelFor(std::ptrdiff_t total, const TensorOpCost& cost_per_unit,
const std::function<void(std::ptrdiff_t first, std::ptrdiff_t)>& fn);
static void TryParallelFor(concurrency::ThreadPool* tp, std::ptrdiff_t total, const TensorOpCost& cost_per_unit,
const std::function<void(std::ptrdiff_t first, std::ptrdiff_t last)>& fn) {
#ifdef _OPENMP
ORT_UNUSED_PARAMETER(cost_per_unit);
std::ptrdiff_t num_threads = concurrency::ThreadPool::DegreeOfParallelism(tp);
if (total < num_threads) {
num_threads = total;
}
#pragma omp parallel for
for (std::ptrdiff_t i = 0; i < num_threads; i++) {
auto work = PartitionWork(i, num_threads, total);
fn(work.start, work.end);
}
#else
if (tp == nullptr) {
fn(0, total);
return;
}
tp->ParallelFor(total, cost_per_unit, fn);
#endif
}
// Return the degree of parallelism that code should assume when using the thread pool.
// This API takes into account if OpenMP is enabled/disabled, and if the thread pool ptr is
// nullptr. It decouples the degree of parallelism for use with the thread pool from
// the implementation choice of whether this matches the number of threads created in
// the pool.
//
// Currently, a loop with degree-of-parallelism N is supported by a pool of N-1 threads
// working in combination with the thread initiating the loop.
static int DegreeOfParallelism(const concurrency::ThreadPool* tp);
// Directly schedule the 'total' tasks to the underlying threadpool, without
// cutting them by halves
void SimpleParallelFor(std::ptrdiff_t total, const std::function<void(std::ptrdiff_t)>& fn);
inline static void TrySimpleParallelFor(ThreadPool* tp, std::ptrdiff_t total,
const std::function<void(std::ptrdiff_t)>& fn) {
#ifdef _OPENMP
ORT_UNUSED_PARAMETER(tp);
#pragma omp parallel for
for (std::ptrdiff_t i = 0; i < total; ++i) {
fn(i);
}
#else
if (tp != nullptr) {
tp->SimpleParallelFor(total, fn);
} else {
for (std::ptrdiff_t i = 0; i < total; ++i) {
// In many cases, fn can be inlined here.
fn(i);
}
}
#endif
}
/**
* Tries to call the given function in parallel, with calls split into (num_batches) batches.
*\param num_batches If it is zero, it will be replaced to the value of DegreeOfParallelism().
*\param fn A std::function or STL style functor with signature of "void f(int32_t);"
* Pitfall: Caller should cap `num_batches` to a reasonable value based on the cost of `fn` and the value of `total`.
*For example, if fn is as simple as: int sum=0; fn = [&](int i){sum +=i;} and `total` is 100, then num_batches should
*be just 1.
*
* ```
**/
template <typename F>
inline static void TryBatchParallelFor(ThreadPool* tp, std::ptrdiff_t total, F&& fn, std::ptrdiff_t num_batches) {
#ifdef _OPENMP
ORT_UNUSED_PARAMETER(tp);
ORT_UNUSED_PARAMETER(num_batches);
#pragma omp parallel for
for (std::ptrdiff_t i = 0; i < total; ++i) {
fn(i);
}
#else
if (tp == nullptr) {
for (std::ptrdiff_t i = 0; i < total; ++i) {
// In many cases, fn can be inlined here.
fn(i);
}
return;
}
if (total <= 0)
return;
if (total == 1) {
fn(0);
return;
}
if (num_batches <= 0) {
num_batches = std::min<ptrdiff_t>(total, DegreeOfParallelism(tp));
}
if (num_batches <= 1) {
for (int i = 0; i < total; i++) {
fn(i);
}
return;
}
tp->SimpleParallelFor(num_batches, [&](std::ptrdiff_t batch_index) {
auto work = PartitionWork(batch_index, num_batches, total);
for (std::ptrdiff_t i = work.start; i < work.end; i++) {
fn(i);
}
});
#endif
}
struct WorkInfo {
std::ptrdiff_t start;
std::ptrdiff_t end;
};
/** Calculate the start and end offsets for a batch.
@remarks Based on MlasPartitionWork
*/
static WorkInfo PartitionWork(std::ptrdiff_t batch_idx, std::ptrdiff_t num_batches, std::ptrdiff_t total_work) {
const std::ptrdiff_t work_per_batch = total_work / num_batches;
const std::ptrdiff_t work_per_batch_extra = total_work % num_batches;
WorkInfo info;
if (batch_idx < work_per_batch_extra) {
info.start = (work_per_batch + 1) * batch_idx;
info.end = info.start + work_per_batch + 1;
} else {
info.start = work_per_batch * batch_idx + work_per_batch_extra;
info.end = info.start + work_per_batch;
}
return info;
}
ORT_DISALLOW_COPY_AND_ASSIGNMENT(ThreadPool);
private:
friend class LoopCounter;
// Returns the number of threads created in the pool. This may be different from the
// value returned by DegreeOfParallelism to code using the pool.
int NumThreads() const;
// Returns current thread id between 0 and NumThreads() - 1, if called from a
// thread in the pool. Returns -1 otherwise.
int CurrentThreadId() const;
// Run fn with up to n degree-of-parallelism enlisting the thread pool for
// help. The degree-of-parallelism includes the caller, and so if n==1
// then the function will run directly in the caller. The fork-join
// synchronization is handled in the thread pool, and so any state captured
// by fn() is safe from concurrent access once RunWithHelp returns.
void RunInParallel(std::function<void()> fn, int n);
// Divides the work represented by the range [0, total) into k shards.
// Calls fn(i*block_size, (i+1)*block_size) from the ith shard (0 <= i < k).
// Each shard may be executed on a different thread in parallel, depending on
// the number of threads available in the pool.
// When (i+1)*block_size > total, fn(i*block_size, total) is called instead.
// Here, k = NumShardsUsedByFixedBlockSizeScheduling(total, block_size).
// Requires 0 < block_size <= total.
void ParallelForFixedBlockSizeScheduling(std::ptrdiff_t total, std::ptrdiff_t block_size,
const std::function<void(std::ptrdiff_t, std::ptrdiff_t)>& fn);
// Return whether or not the calling thread should run a loop of
// num_iterations divided in chunks of block_size in parallel. If not,
// the caller should run the loop sequentially.
bool ShouldParallelizeLoop(const std::ptrdiff_t num_iterations,
const std::ptrdiff_t block_size = 1) const;
ThreadOptions thread_options_;
// If a thread pool is created with degree_of_parallelism != 1 then an underlying
// EigenThreadPool is used to create OS threads and handle work distribution to them.
// If degree_of_parallelism == 1 then underlying_threadpool_ is left as nullptr
// and parallel work is run directly by the caller.
ExtendedThreadPoolInterface* underlying_threadpool_ = nullptr;
// If used, underlying_threadpool_ is instantiated and owned by the ThreadPool.
std::unique_ptr<ThreadPoolTempl<Env> > extended_eigen_threadpool_;
};
} // namespace concurrency
} // namespace onnxruntime
|
rbms.h | // Copyright 2022 Ivanov Arkady
#ifndef MODULES_TASK_2_IVANOV_A_RADIX_BATCHERS_MERGESORT_OMP_RBMS_H_
#define MODULES_TASK_2_IVANOV_A_RADIX_BATCHERS_MERGESORT_OMP_RBMS_H_
#include <stdint.h>
#include <omp.h>
#include <vector>
#include <random>
#include <algorithm>
#include <utility>
#include <iostream>
// <RadixSortPart>
template <class T>
std::vector<int> createAndPrepareCounters(std::vector<T>* data, int offset, int count) {
std::vector<int> counters(256 * sizeof(T));
std::fill_n(counters.data(), counters.size(), 0);
unsigned char* start = reinterpret_cast<unsigned char*>(
data->data() + offset);
unsigned char* stop = reinterpret_cast<unsigned char*>(
data->data() + offset + count);
while (start != stop) {
for (int i = 0; i < static_cast<int>(sizeof(T)); i++) {
counters[*start + 256 * i]++;
start++;
}
}
for (int i = 0; i < static_cast<int>(sizeof(T)); i++) {
int sum = 0;
if (counters[256 * i] == count)
continue;
for (int j = 0; j < 256; j++) {
int tmp = counters[256 * i + j];
counters[256 * i + j] = sum;
sum += tmp;
}
}
return counters;
}
template<class T>
void radixSort(std::vector<T>* data, int offset, int count) {
std::vector<int> counters =
createAndPrepareCounters(data, offset, count);
std::vector<T> res(count);
int j;
for (j = 0; j < static_cast<int>(sizeof(T)); j++) {
int* countersPtr = counters.data() + 256 * j;
if (*countersPtr == count)
break;
T* dPtr, * rPtr;
unsigned char* dataPtr;
if (j % 2 == 0) {
dPtr = data->data() + offset;
dataPtr = reinterpret_cast<unsigned char*>(
data->data() + offset);
rPtr = res.data();
} else {
dPtr = res.data();
dataPtr = reinterpret_cast<unsigned char*>(res.data());
rPtr = data->data() + offset;
}
dataPtr += j;
for (int i = 0; i < count; i++) {
rPtr[*(countersPtr + *dataPtr)] = dPtr[i];
*(countersPtr + *dataPtr) = *(countersPtr + *dataPtr) + 1;
dataPtr += sizeof(T);
}
}
if (j % 2 == 1) {
for (int i = 0; i < count; i++)
data->operator[](i + offset) = res[i];
}
}
// </RadixSortPart>
// <BatchersMergePart>
template<class T>
void mergeFragments(std::vector<T>* data, std::vector<T>* result,
int offset1, int size1, int offset2, int size2, bool isLeft) {
if (isLeft) {
T* firstPtr = data->data() + offset1;
int usedFirst = 0;
T* secondPtr = data->data() + offset2;
int usedSecond = 0;
for (int i = 0; i < size1; i++) {
if (usedFirst < size1 && usedSecond < size2) {
if (*firstPtr < *secondPtr) {
result->operator[](i) = *firstPtr;
firstPtr++;
usedFirst++;
} else {
result->operator[](i) = *secondPtr;
secondPtr++;
usedSecond++;
}
} else if (usedFirst < size1 && usedSecond >= size2) {
result->operator[](i) = *firstPtr;
firstPtr++;
usedFirst++;
} else if (usedFirst >= size1 && usedSecond < size2) {
result->operator[](i) = *secondPtr;
secondPtr++;
usedSecond++;
} else {
throw "Impossible exception";
}
}
return;
}
// if isLeft = false
T* firstPtr = data->data() + offset1 + size1 - 1;
int usedFirst = 0;
T* secondPtr = data->data() + offset2 + size2 - 1;
int usedSecond = 0;
for (int i = size2 - 1; i >= 0; i--) {
if (usedFirst < size1 && usedSecond < size2) {
if (*firstPtr > *secondPtr) {
result->operator[](i) = *firstPtr;
firstPtr--;
usedFirst++;
} else {
result->operator[](i) = *secondPtr;
secondPtr--;
usedSecond++;
}
} else if (usedFirst < size1 && usedSecond >= size2) {
result->operator[](i) = *firstPtr;
firstPtr--;
usedFirst++;
} else if (usedFirst >= size1 && usedSecond < size2) {
result->operator[](i) = *secondPtr;
secondPtr--;
usedSecond++;
} else {
throw "Impossible exception";
}
}
}
int partner(int nodeIndex, int mergeStage, int mergeStageStep);
// </BatchersMergePart>
// <ServiceFunctions>
template<class T>
T getRandValue(T from, T to) {
static std::random_device rd;
static std::mt19937 gen(rd());
std::uniform_int_distribution<T> dist(from, to);
return dist(gen);
}
template<class T>
void fillVecWithRandValues(T* data, int size, T from, T to) {
static std::random_device rd;
static std::mt19937 gen(rd());
std::uniform_int_distribution<T> dist(from, to);
for (int i = 0; i < size; i++) {
data[i] = dist(gen);
}
}
template<class T>
bool isStrictAscending(T* data, int size, T startValue) {
for (int i = 0; i < size; i++)
if (data[i] != startValue++)
return false;
return true;
}
template<class T>
bool isStrictDescending(T* data, int size, T startValue) {
for (int i = 0; i < size; i++)
if (data[i] != startValue--)
return false;
return true;
}
template<class T>
void fillStrictAscending(T* data, int size, T startValue) {
for (int i = 0; i < size; i++)
data[i] = startValue++;
}
template<class T>
void fillStrictDescending(T* data, int size, T startValue) {
for (int i = 0; i < size; i++)
data[i] = startValue--;
}
template<class T>
void printVector(const std::vector<T>& vec) {
for (int i = 0; i < vec.size(); i++)
std::cout << vec[i] << " ";
std::cout << std::endl;
}
template<class T>
bool isAscending(T* data, int size) {
for (int i = 1; i < size; i++) {
if (data[i - 1] > data[i])
return false;
}
return true;
}
template<class T>
bool isVecSame(const std::vector<T>& v1, const std::vector<T>& v2) {
if (v1.size() != v2.size())
return false;
for (size_t i = 0; i < v1.size(); i++)
if (v1[i] != v2[i])
return false;
return true;
}
// </ServiceFunctions>
// <OMP REALISATION>
// ---------------------------<VER 1>---------------------------
template<class T>
void _parallelMerge(std::vector<T>* data, std::vector<T>* fragment,
int blockSize, int selfID, int partnerID) {
if (selfID == partnerID)
return;
bool isLeft = selfID < partnerID;
int leftID = (isLeft) ? selfID : partnerID;
int rightID = (isLeft) ? partnerID : selfID;
mergeFragments<T>(data, fragment,
leftID * blockSize, blockSize,
rightID * blockSize, blockSize,
isLeft);
}
// 2^degree = number of threads in use to sort
template<class T>
void radixBatchersMergeSort(std::vector<T>* data, int degree) {
if (degree == 0) { // numThreads = 1
radixSort<T>(data, 0, data->size());
return;
}
int numThreads = 1 << degree;
if (numThreads > static_cast<int>(data->size())) {
radixSort<T>(data, 0, data->size());
return;
}
size_t oldSize = data->size();
bool isResized = false;
if (oldSize % numThreads != 0) {
data->resize(oldSize + (numThreads - oldSize % numThreads), ~0);
isResized = true;
}
int size = static_cast<int>(data->size());
int blockSize = size / numThreads;
#pragma omp parallel num_threads(numThreads)
{
int selfID = omp_get_thread_num();
// sort step
radixSort<T>(data, selfID * blockSize, blockSize);
#pragma omp barrier
// merge step
// Batcher's merge network realisation
int partnerID;
std::vector<T> fragment(blockSize);
for (int stage = 1; stage <= degree; stage++) {
for (int step = 1; step <= stage; step++) {
partnerID = partner(selfID, stage, step);
// merge, but merge result will be in fragments of threads
_parallelMerge<T>(data, &fragment,
blockSize, selfID, partnerID);
// barrier to prevent data corruption
#pragma omp barrier
// copying fragments to data
for (int i = 0; i < blockSize; i++)
data->operator[](selfID* blockSize + i) = fragment[i];
// barrier to prevent data corruption
#pragma omp barrier
{}
}
}
}
if (isResized)
data->resize(oldSize);
}
// ---------------------------</VER 1>---------------------------
// ---------------------------<VER 2>---------------------------
template<class T>
void _parallelMerge_v2(T* data, T* res,
int blockSize, int selfID, int partnerID) {
if (selfID == partnerID)
return;
bool isLeft = selfID < partnerID;
int leftID = (isLeft) ? selfID : partnerID;
int rightID = (isLeft) ? partnerID : selfID;
int offset1 = leftID * blockSize;
int offset2 = rightID * blockSize;
if (isLeft) {
T* firstPtr = data + offset1;
int usedFirst = 0;
T* secondPtr = data + offset2;
int usedSecond = 0;
for (int i = 0; i < blockSize; i++) {
if (usedFirst < blockSize && usedSecond < blockSize) {
if (*firstPtr < *secondPtr) {
res[offset1 + i] = *firstPtr;
firstPtr++;
usedFirst++;
} else {
res[offset1 + i] = *secondPtr;
secondPtr++;
usedSecond++;
}
} else if (usedFirst < blockSize && usedSecond >= blockSize) {
res[offset1 + i] = *firstPtr;
firstPtr++;
usedFirst++;
} else if (usedFirst >= blockSize && usedSecond < blockSize) {
res[offset1 + i] = *secondPtr;
secondPtr++;
usedSecond++;
} else {
throw "Impossible exception";
}
}
return;
}
// if isLeft = false
T* firstPtr = data + offset1 + blockSize - 1;
int usedFirst = 0;
T* secondPtr = data + offset2 + blockSize - 1;
int usedSecond = 0;
for (int i = blockSize - 1; i >= 0; i--) {
if (usedFirst < blockSize && usedSecond < blockSize) {
if (*firstPtr > *secondPtr) {
res[offset2 + i] = *firstPtr;
firstPtr--;
usedFirst++;
} else {
res[offset2 + i] = *secondPtr;
secondPtr--;
usedSecond++;
}
} else if (usedFirst < blockSize && usedSecond >= blockSize) {
res[offset2 + i] = *firstPtr;
firstPtr--;
usedFirst++;
} else if (usedFirst >= blockSize && usedSecond < blockSize) {
res[offset2 + i] = *secondPtr;
secondPtr--;
usedSecond++;
} else {
throw "Impossible exception";
}
}
}
// 2^degree = number of threads in use to sort
template<class T>
void radixBatchersMergeSort_v2(std::vector<T>* data, int degree) {
if (degree == 0) { // numThreads = 1
radixSort<T>(data, 0, data->size());
return;
}
int numThreads = 1 << degree;
if (numThreads > static_cast<int>(data->size())) {
radixSort<T>(data, 0, data->size());
return;
}
size_t oldSize = data->size();
bool isResized = false;
if (oldSize % numThreads != 0) {
data->resize(oldSize + (numThreads - oldSize % numThreads), ~0);
isResized = true;
}
int size = data->size();
int blockSize = size / numThreads;
std::vector<T> res(size);
int stepsMade = 0;
T* from = data->data();
T* to = res.data();
T* tmp;
#pragma omp parallel num_threads(numThreads)
{
int selfID = omp_get_thread_num();
// sort step
radixSort<T>(data, selfID * blockSize, blockSize);
#pragma omp barrier
// merge step
// Batcher's merge network realisation
int partnerID;
for (int stage = 1; stage <= degree; stage++) {
for (int step = 1; step <= stage; step++) {
partnerID = partner(selfID, stage, step);
_parallelMerge_v2(from, to, blockSize, selfID, partnerID);
if (selfID == partnerID) {
for (int i = 0; i < blockSize; i++)
to[selfID * blockSize + i] = from[selfID * blockSize + i];
}
#pragma omp barrier
#pragma omp single
{
tmp = from;
from = to;
to = tmp;
stepsMade++;
}
}
}
}
if (stepsMade % 2 == 1)
memmove(data->data(), res.data(), data->size() * sizeof(T));
if (isResized)
data->resize(oldSize);
}
// ---------------------------</VER 2>---------------------------
// ---------------------------<VER 3>---------------------------
template<class T>
void _parallelMerge_v3(T* data, T* res, T* partnerData,
int blockSize, int selfID, int partnerID) {
if (selfID == partnerID)
return;
bool isLeft = selfID < partnerID;
if (isLeft) {
T* firstPtr = data;
int usedFirst = 0;
T* secondPtr = partnerData;
int usedSecond = 0;
for (int i = 0; i < blockSize; i++) {
if (usedFirst < blockSize && usedSecond < blockSize) {
if (*firstPtr < *secondPtr) {
res[i] = *firstPtr;
firstPtr++;
usedFirst++;
} else {
res[i] = *secondPtr;
secondPtr++;
usedSecond++;
}
} else if (usedFirst < blockSize && usedSecond >= blockSize) {
res[i] = *firstPtr;
firstPtr++;
usedFirst++;
} else if (usedFirst >= blockSize && usedSecond < blockSize) {
res[i] = *secondPtr;
secondPtr++;
usedSecond++;
} else {
throw "Impossible exception";
}
}
return;
}
// if isLeft = false
T* firstPtr = data + blockSize - 1;
int usedFirst = 0;
T* secondPtr = partnerData + blockSize - 1;
int usedSecond = 0;
for (int i = blockSize - 1; i >= 0; i--) {
if (usedFirst < blockSize && usedSecond < blockSize) {
if (*firstPtr > *secondPtr) {
res[i] = *firstPtr;
firstPtr--;
usedFirst++;
} else {
res[i] = *secondPtr;
secondPtr--;
usedSecond++;
}
} else if (usedFirst < blockSize && usedSecond >= blockSize) {
res[i] = *firstPtr;
firstPtr--;
usedFirst++;
} else if (usedFirst >= blockSize && usedSecond < blockSize) {
res[i] = *secondPtr;
secondPtr--;
usedSecond++;
} else {
throw "Impossible exception";
}
}
}
// 2^degree = number of threads in use to sort
template<class T>
void radixBatchersMergeSort_v3(std::vector<T>* data, int degree) {
if (degree == 0) { // numThreads = 1
radixSort<T>(data, 0, data->size());
return;
}
int numThreads = 1 << degree;
if (numThreads > static_cast<int>(data->size())) {
radixSort<T>(data, 0, data->size());
return;
}
size_t oldSize = data->size();
bool isResized = false;
if (oldSize % numThreads != 0) {
data->resize(oldSize + (numThreads - oldSize % numThreads), ~0);
isResized = true;
}
int size = data->size();
int blockSize = size / numThreads;
std::vector<T> res(size);
T** from = new T * [numThreads];
T** to = new T * [numThreads];
for (int i = 0; i < numThreads; i++) {
from[i] = data->data() + blockSize * i;
to[i] = res.data() + blockSize * i;
}
T* mainData = data->data();
#pragma omp parallel num_threads(numThreads)
{
int selfID = omp_get_thread_num();
// sort step
radixSort<T>(data, selfID * blockSize, blockSize);
#pragma omp barrier
// merge step
// Batcher's merge network realisation
int partnerID;
for (int stage = 1; stage <= degree; stage++) {
for (int step = 1; step <= stage; step++) {
partnerID = partner(selfID, stage, step);
_parallelMerge_v3(from[selfID], to[selfID], from[partnerID], blockSize, selfID, partnerID);
#pragma omp barrier
if (selfID != partnerID)
std::swap(from[selfID], to[selfID]);
#pragma omp barrier
if (stage == degree && step == degree) {
for (int i = 0; i < blockSize; i++)
mainData[selfID * blockSize + i] = from[selfID][i];
}
}
}
#pragma omp barrier
{}
}
delete[] from;
delete[] to;
if (isResized)
data->resize(oldSize);
}
// </OMP REALISATION>
#endif // MODULES_TASK_2_IVANOV_A_RADIX_BATCHERS_MERGESORT_OMP_RBMS_H_
|
draw.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD RRRR AAA W W %
% D D R R A A W W %
% D D RRRR AAAAA W W W %
% D D R RN A A WW WW %
% DDDD R R A A W W %
% %
% %
% MagickCore Image Drawing Methods %
% %
% %
% Software Design %
% Cristy %
% July 1998 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Bill Radcliffe of Corbis (www.corbis.com) contributed the polygon
% rendering code based on Paul Heckbert's "Concave Polygon Scan Conversion",
% Graphics Gems, 1990. Leonard Rosenthal and David Harr of Appligent
% (www.appligent.com) contributed the dash pattern, linecap stroking
% algorithm, and minor rendering improvements.
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/annotate.h"
#include "magick/artifact.h"
#include "magick/blob.h"
#include "magick/cache.h"
#include "magick/cache-private.h"
#include "magick/cache-view.h"
#include "magick/channel.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/composite-private.h"
#include "magick/constitute.h"
#include "magick/draw.h"
#include "magick/draw-private.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/geometry.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/memory-private.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/paint.h"
#include "magick/pixel-accessor.h"
#include "magick/pixel-private.h"
#include "magick/property.h"
#include "magick/resample.h"
#include "magick/resample-private.h"
#include "magick/resource_.h"
#include "magick/splay-tree.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#include "magick/token.h"
#include "magick/transform.h"
#include "magick/utility.h"
/*
Define declarations.
*/
#define BezierQuantum 200
#define PrimitiveExtentPad 2053
#define MaxBezierCoordinates 67108864
#define ThrowPointExpectedException(image,token) \
{ \
(void) ThrowMagickException(&(image)->exception,GetMagickModule(),DrawError, \
"NonconformingDrawingPrimitiveDefinition","`%s'",token); \
status=MagickFalse; \
break; \
}
/*
Typedef declarations.
*/
typedef struct _EdgeInfo
{
SegmentInfo
bounds;
double
scanline;
PointInfo
*points;
size_t
number_points;
ssize_t
direction;
MagickBooleanType
ghostline;
size_t
highwater;
} EdgeInfo;
typedef struct _ElementInfo
{
double
cx,
cy,
major,
minor,
angle;
} ElementInfo;
typedef struct _MVGInfo
{
PrimitiveInfo
**primitive_info;
size_t
*extent;
ssize_t
offset;
PointInfo
point;
ExceptionInfo
*exception;
} MVGInfo;
typedef struct _PolygonInfo
{
EdgeInfo
*edges;
size_t
number_edges;
} PolygonInfo;
typedef enum
{
MoveToCode,
OpenCode,
GhostlineCode,
LineToCode,
EndCode
} PathInfoCode;
typedef struct _PathInfo
{
PointInfo
point;
PathInfoCode
code;
} PathInfo;
/*
Forward declarations.
*/
static Image
*DrawClippingMask(Image *,const DrawInfo *,const char *,const char *,
ExceptionInfo *);
static MagickBooleanType
DrawStrokePolygon(Image *,const DrawInfo *,const PrimitiveInfo *),
RenderMVGContent(Image *,const DrawInfo *,const size_t),
TraceArc(MVGInfo *,const PointInfo,const PointInfo,const PointInfo),
TraceArcPath(MVGInfo *,const PointInfo,const PointInfo,const PointInfo,
const double,const MagickBooleanType,const MagickBooleanType),
TraceBezier(MVGInfo *,const size_t),
TraceCircle(MVGInfo *,const PointInfo,const PointInfo),
TraceEllipse(MVGInfo *,const PointInfo,const PointInfo,const PointInfo),
TraceLine(PrimitiveInfo *,const PointInfo,const PointInfo),
TraceRectangle(PrimitiveInfo *,const PointInfo,const PointInfo),
TraceRoundRectangle(MVGInfo *,const PointInfo,const PointInfo,PointInfo),
TraceSquareLinecap(PrimitiveInfo *,const size_t,const double);
static PrimitiveInfo
*TraceStrokePolygon(const DrawInfo *,const PrimitiveInfo *,ExceptionInfo *);
static ssize_t
TracePath(Image *,MVGInfo *,const char *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireDrawInfo() returns a DrawInfo structure properly initialized.
%
% The format of the AcquireDrawInfo method is:
%
% DrawInfo *AcquireDrawInfo(void)
%
*/
MagickExport DrawInfo *AcquireDrawInfo(void)
{
DrawInfo
*draw_info;
draw_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*draw_info));
GetDrawInfo((ImageInfo *) NULL,draw_info);
return(draw_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneDrawInfo() makes a copy of the given draw_info structure. If NULL
% is specified, a new DrawInfo structure is created initialized to default
% values.
%
% The format of the CloneDrawInfo method is:
%
% DrawInfo *CloneDrawInfo(const ImageInfo *image_info,
% const DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o draw_info: the draw info.
%
*/
MagickExport DrawInfo *CloneDrawInfo(const ImageInfo *image_info,
const DrawInfo *draw_info)
{
DrawInfo
*clone_info;
clone_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*clone_info));
GetDrawInfo(image_info,clone_info);
if (draw_info == (DrawInfo *) NULL)
return(clone_info);
if (draw_info->id != (char *) NULL)
(void) CloneString(&clone_info->id,draw_info->id);
if (draw_info->primitive != (char *) NULL)
(void) CloneString(&clone_info->primitive,draw_info->primitive);
if (draw_info->geometry != (char *) NULL)
(void) CloneString(&clone_info->geometry,draw_info->geometry);
clone_info->compliance=draw_info->compliance;
clone_info->viewbox=draw_info->viewbox;
clone_info->affine=draw_info->affine;
clone_info->gravity=draw_info->gravity;
clone_info->fill=draw_info->fill;
clone_info->stroke=draw_info->stroke;
clone_info->stroke_width=draw_info->stroke_width;
if (draw_info->fill_pattern != (Image *) NULL)
clone_info->fill_pattern=CloneImage(draw_info->fill_pattern,0,0,MagickTrue,
&draw_info->fill_pattern->exception);
else
if (draw_info->tile != (Image *) NULL)
clone_info->fill_pattern=CloneImage(draw_info->tile,0,0,MagickTrue,
&draw_info->tile->exception);
clone_info->tile=NewImageList(); /* tile is deprecated */
if (draw_info->stroke_pattern != (Image *) NULL)
clone_info->stroke_pattern=CloneImage(draw_info->stroke_pattern,0,0,
MagickTrue,&draw_info->stroke_pattern->exception);
clone_info->stroke_antialias=draw_info->stroke_antialias;
clone_info->text_antialias=draw_info->text_antialias;
clone_info->fill_rule=draw_info->fill_rule;
clone_info->linecap=draw_info->linecap;
clone_info->linejoin=draw_info->linejoin;
clone_info->miterlimit=draw_info->miterlimit;
clone_info->dash_offset=draw_info->dash_offset;
clone_info->decorate=draw_info->decorate;
clone_info->compose=draw_info->compose;
if (draw_info->text != (char *) NULL)
(void) CloneString(&clone_info->text,draw_info->text);
if (draw_info->font != (char *) NULL)
(void) CloneString(&clone_info->font,draw_info->font);
if (draw_info->metrics != (char *) NULL)
(void) CloneString(&clone_info->metrics,draw_info->metrics);
if (draw_info->family != (char *) NULL)
(void) CloneString(&clone_info->family,draw_info->family);
clone_info->style=draw_info->style;
clone_info->stretch=draw_info->stretch;
clone_info->weight=draw_info->weight;
if (draw_info->encoding != (char *) NULL)
(void) CloneString(&clone_info->encoding,draw_info->encoding);
clone_info->pointsize=draw_info->pointsize;
clone_info->kerning=draw_info->kerning;
clone_info->interline_spacing=draw_info->interline_spacing;
clone_info->interword_spacing=draw_info->interword_spacing;
clone_info->direction=draw_info->direction;
if (draw_info->density != (char *) NULL)
(void) CloneString(&clone_info->density,draw_info->density);
clone_info->align=draw_info->align;
clone_info->undercolor=draw_info->undercolor;
clone_info->border_color=draw_info->border_color;
if (draw_info->server_name != (char *) NULL)
(void) CloneString(&clone_info->server_name,draw_info->server_name);
if (draw_info->dash_pattern != (double *) NULL)
{
register ssize_t
x;
for (x=0; fabs(draw_info->dash_pattern[x]) >= MagickEpsilon; x++) ;
clone_info->dash_pattern=(double *) AcquireQuantumMemory((size_t) (2*x+2),
sizeof(*clone_info->dash_pattern));
if (clone_info->dash_pattern == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateDashPattern");
(void) memset(clone_info->dash_pattern,0,(size_t) (2*x+2)*
sizeof(*clone_info->dash_pattern));
(void) memcpy(clone_info->dash_pattern,draw_info->dash_pattern,(size_t)
(x+1)*sizeof(*clone_info->dash_pattern));
}
clone_info->gradient=draw_info->gradient;
if (draw_info->gradient.stops != (StopInfo *) NULL)
{
size_t
number_stops;
number_stops=clone_info->gradient.number_stops;
clone_info->gradient.stops=(StopInfo *) AcquireQuantumMemory((size_t)
number_stops,sizeof(*clone_info->gradient.stops));
if (clone_info->gradient.stops == (StopInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateDashPattern");
(void) memcpy(clone_info->gradient.stops,draw_info->gradient.stops,
(size_t) number_stops*sizeof(*clone_info->gradient.stops));
}
clone_info->bounds=draw_info->bounds;
clone_info->fill_opacity=draw_info->fill_opacity;
clone_info->stroke_opacity=draw_info->stroke_opacity;
clone_info->element_reference=draw_info->element_reference;
clone_info->clip_path=draw_info->clip_path;
clone_info->clip_units=draw_info->clip_units;
if (draw_info->clip_mask != (char *) NULL)
(void) CloneString(&clone_info->clip_mask,draw_info->clip_mask);
if (draw_info->clipping_mask != (Image *) NULL)
clone_info->clipping_mask=CloneImage(draw_info->clipping_mask,0,0,
MagickTrue,&draw_info->clipping_mask->exception);
if (draw_info->composite_mask != (Image *) NULL)
clone_info->composite_mask=CloneImage(draw_info->composite_mask,0,0,
MagickTrue,&draw_info->composite_mask->exception);
clone_info->render=draw_info->render;
clone_info->debug=IsEventLogging();
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n v e r t P a t h T o P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvertPathToPolygon() converts a path to the more efficient sorted
% rendering form.
%
% The format of the ConvertPathToPolygon method is:
%
% PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o ConvertPathToPolygon() returns the path in a more efficient sorted
% rendering form of type PolygonInfo.
%
% o draw_info: Specifies a pointer to an DrawInfo structure.
%
% o path_info: Specifies a pointer to an PathInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info)
{
register ssize_t
i;
if (polygon_info->edges != (EdgeInfo *) NULL)
{
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
if (polygon_info->edges[i].points != (PointInfo *) NULL)
polygon_info->edges[i].points=(PointInfo *)
RelinquishMagickMemory(polygon_info->edges[i].points);
polygon_info->edges=(EdgeInfo *) RelinquishMagickMemory(
polygon_info->edges);
}
return((PolygonInfo *) RelinquishMagickMemory(polygon_info));
}
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int DrawCompareEdges(const void *p_edge,const void *q_edge)
{
#define DrawCompareEdge(p,q) \
{ \
if (((p)-(q)) < 0.0) \
return(-1); \
if (((p)-(q)) > 0.0) \
return(1); \
}
register const PointInfo
*p,
*q;
/*
Edge sorting for right-handed coordinate system.
*/
p=((const EdgeInfo *) p_edge)->points;
q=((const EdgeInfo *) q_edge)->points;
DrawCompareEdge(p[0].y,q[0].y);
DrawCompareEdge(p[0].x,q[0].x);
DrawCompareEdge((p[1].x-p[0].x)*(q[1].y-q[0].y),(p[1].y-p[0].y)*
(q[1].x-q[0].x));
DrawCompareEdge(p[1].y,q[1].y);
DrawCompareEdge(p[1].x,q[1].x);
return(0);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static void LogPolygonInfo(const PolygonInfo *polygon_info)
{
register EdgeInfo
*p;
register ssize_t
i,
j;
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin active-edge");
p=polygon_info->edges;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
{
(void) LogMagickEvent(DrawEvent,GetMagickModule()," edge %.20g:",
(double) i);
(void) LogMagickEvent(DrawEvent,GetMagickModule()," direction: %s",
p->direction != MagickFalse ? "down" : "up");
(void) LogMagickEvent(DrawEvent,GetMagickModule()," ghostline: %s",
p->ghostline != MagickFalse ? "transparent" : "opaque");
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" bounds: %g,%g - %g,%g",p->bounds.x1,p->bounds.y1,
p->bounds.x2,p->bounds.y2);
for (j=0; j < (ssize_t) p->number_points; j++)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %g,%g",
p->points[j].x,p->points[j].y);
p++;
}
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end active-edge");
}
static void ReversePoints(PointInfo *points,const size_t number_points)
{
PointInfo
point;
register ssize_t
i;
for (i=0; i < (ssize_t) (number_points >> 1); i++)
{
point=points[i];
points[i]=points[number_points-(i+1)];
points[number_points-(i+1)]=point;
}
}
static PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info,
ExceptionInfo *exception)
{
long
direction,
next_direction;
PointInfo
point,
*points;
PolygonInfo
*polygon_info;
SegmentInfo
bounds;
register ssize_t
i,
n;
MagickBooleanType
ghostline;
size_t
edge,
number_edges,
number_points;
/*
Convert a path to the more efficient sorted rendering form.
*/
polygon_info=(PolygonInfo *) AcquireMagickMemory(sizeof(*polygon_info));
if (polygon_info == (PolygonInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return((PolygonInfo *) NULL);
}
number_edges=16;
polygon_info->edges=(EdgeInfo *) AcquireQuantumMemory(number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(DestroyPolygonInfo(polygon_info));
}
(void) memset(polygon_info->edges,0,number_edges*
sizeof(*polygon_info->edges));
direction=0;
edge=0;
ghostline=MagickFalse;
n=0;
number_points=0;
points=(PointInfo *) NULL;
(void) memset(&point,0,sizeof(point));
(void) memset(&bounds,0,sizeof(bounds));
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=0.0;
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) direction;
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->number_edges=0;
for (i=0; path_info[i].code != EndCode; i++)
{
if ((path_info[i].code == MoveToCode) || (path_info[i].code == OpenCode) ||
(path_info[i].code == GhostlineCode))
{
/*
Move to.
*/
if ((points != (PointInfo *) NULL) && (n >= 2))
{
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
points=(PointInfo *) RelinquishMagickMemory(points);
return(DestroyPolygonInfo(polygon_info));
}
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
points=(PointInfo *) NULL;
ghostline=MagickFalse;
edge++;
polygon_info->number_edges=edge;
}
if (points == (PointInfo *) NULL)
{
number_points=16;
points=(PointInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(DestroyPolygonInfo(polygon_info));
}
}
ghostline=path_info[i].code == GhostlineCode ? MagickTrue : MagickFalse;
point=path_info[i].point;
points[0]=point;
bounds.x1=point.x;
bounds.x2=point.x;
direction=0;
n=1;
continue;
}
/*
Line to.
*/
next_direction=((path_info[i].point.y > point.y) ||
((fabs(path_info[i].point.y-point.y) < MagickEpsilon) &&
(path_info[i].point.x > point.x))) ? 1 : -1;
if ((points != (PointInfo *) NULL) && (direction != 0) &&
(direction != next_direction))
{
/*
New edge.
*/
point=points[n-1];
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
points=(PointInfo *) RelinquishMagickMemory(points);
return(DestroyPolygonInfo(polygon_info));
}
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
polygon_info->number_edges=edge+1;
points=(PointInfo *) NULL;
number_points=16;
points=(PointInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(DestroyPolygonInfo(polygon_info));
}
n=1;
ghostline=MagickFalse;
points[0]=point;
bounds.x1=point.x;
bounds.x2=point.x;
edge++;
}
direction=next_direction;
if (points == (PointInfo *) NULL)
continue;
if (n == (ssize_t) number_points)
{
number_points<<=1;
points=(PointInfo *) ResizeQuantumMemory(points,(size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(DestroyPolygonInfo(polygon_info));
}
}
point=path_info[i].point;
points[n]=point;
if (point.x < bounds.x1)
bounds.x1=point.x;
if (point.x > bounds.x2)
bounds.x2=point.x;
n++;
}
if (points != (PointInfo *) NULL)
{
if (n < 2)
points=(PointInfo *) RelinquishMagickMemory(points);
else
{
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(DestroyPolygonInfo(polygon_info));
}
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
points=(PointInfo *) NULL;
ghostline=MagickFalse;
edge++;
polygon_info->number_edges=edge;
}
}
polygon_info->number_edges=edge;
polygon_info->number_edges=edge;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(polygon_info->edges,
polygon_info->number_edges,sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(DestroyPolygonInfo(polygon_info));
}
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
{
EdgeInfo
*edge_info;
edge_info=polygon_info->edges+i;
edge_info->points=(PointInfo *) ResizeQuantumMemory(edge_info->points,
edge_info->number_points,sizeof(*edge_info->points));
if (edge_info->points == (PointInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(DestroyPolygonInfo(polygon_info));
}
}
qsort(polygon_info->edges,(size_t) polygon_info->number_edges,
sizeof(*polygon_info->edges),DrawCompareEdges);
if (IsEventLogging() != MagickFalse)
LogPolygonInfo(polygon_info);
return(polygon_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n v e r t P r i m i t i v e T o P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvertPrimitiveToPath() converts a PrimitiveInfo structure into a vector
% path structure.
%
% The format of the ConvertPrimitiveToPath method is:
%
% PathInfo *ConvertPrimitiveToPath(const DrawInfo *draw_info,
% const PrimitiveInfo *primitive_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o ConvertPrimitiveToPath() returns a vector path structure of type
% PathInfo.
%
% o draw_info: a structure of type DrawInfo.
%
% o primitive_info: Specifies a pointer to an PrimitiveInfo structure.
%
%
*/
static void LogPathInfo(const PathInfo *path_info)
{
register const PathInfo
*p;
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin vector-path");
for (p=path_info; p->code != EndCode; p++)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %g,%g %s",p->point.x,p->point.y,p->code == GhostlineCode ?
"moveto ghostline" : p->code == OpenCode ? "moveto open" :
p->code == MoveToCode ? "moveto" : p->code == LineToCode ? "lineto" :
"?");
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end vector-path");
}
static PathInfo *ConvertPrimitiveToPath(
const DrawInfo *magick_unused(draw_info),const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
MagickBooleanType
closed_subpath;
PathInfo
*path_info;
PathInfoCode
code;
PointInfo
p,
q;
register ssize_t
i,
n;
ssize_t
coordinates,
start;
magick_unreferenced(draw_info);
/*
Converts a PrimitiveInfo structure into a vector path structure.
*/
switch (primitive_info->primitive)
{
case PointPrimitive:
case ColorPrimitive:
case MattePrimitive:
case TextPrimitive:
case ImagePrimitive:
return((PathInfo *) NULL);
default:
break;
}
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ;
path_info=(PathInfo *) AcquireQuantumMemory((size_t) (3UL*i+1UL),
sizeof(*path_info));
if (path_info == (PathInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return((PathInfo *) NULL);
}
coordinates=0;
closed_subpath=MagickFalse;
n=0;
p.x=(-1.0);
p.y=(-1.0);
q.x=(-1.0);
q.y=(-1.0);
start=0;
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
code=LineToCode;
if (coordinates <= 0)
{
/*
New subpath.
*/
coordinates=(ssize_t) primitive_info[i].coordinates;
p=primitive_info[i].point;
start=n;
code=MoveToCode;
closed_subpath=primitive_info[i].closed_subpath;
}
coordinates--;
if ((code == MoveToCode) || (coordinates <= 0) ||
(fabs(q.x-primitive_info[i].point.x) >= MagickEpsilon) ||
(fabs(q.y-primitive_info[i].point.y) >= MagickEpsilon))
{
/*
Eliminate duplicate points.
*/
path_info[n].code=code;
path_info[n].point=primitive_info[i].point;
q=primitive_info[i].point;
n++;
}
if (coordinates > 0)
continue; /* next point in current subpath */
if (closed_subpath != MagickFalse)
{
closed_subpath=MagickFalse;
continue;
}
/*
Mark the p point as open if the subpath is not closed.
*/
path_info[start].code=OpenCode;
path_info[n].code=GhostlineCode;
path_info[n].point=primitive_info[i].point;
n++;
path_info[n].code=LineToCode;
path_info[n].point=p;
n++;
}
path_info[n].code=EndCode;
path_info[n].point.x=0.0;
path_info[n].point.y=0.0;
if (IsEventLogging() != MagickFalse)
LogPathInfo(path_info);
path_info=(PathInfo *) ResizeQuantumMemory(path_info,(size_t) (n+1),
sizeof(*path_info));
return(path_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyDrawInfo() deallocates memory associated with an DrawInfo structure.
%
% The format of the DestroyDrawInfo method is:
%
% DrawInfo *DestroyDrawInfo(DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o draw_info: the draw info.
%
*/
MagickExport DrawInfo *DestroyDrawInfo(DrawInfo *draw_info)
{
assert(draw_info != (DrawInfo *) NULL);
if (draw_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(draw_info->signature == MagickCoreSignature);
if (draw_info->id != (char *) NULL)
draw_info->id=DestroyString(draw_info->id);
if (draw_info->primitive != (char *) NULL)
draw_info->primitive=DestroyString(draw_info->primitive);
if (draw_info->text != (char *) NULL)
draw_info->text=DestroyString(draw_info->text);
if (draw_info->geometry != (char *) NULL)
draw_info->geometry=DestroyString(draw_info->geometry);
if (draw_info->tile != (Image *) NULL)
draw_info->tile=DestroyImage(draw_info->tile);
if (draw_info->fill_pattern != (Image *) NULL)
draw_info->fill_pattern=DestroyImage(draw_info->fill_pattern);
if (draw_info->stroke_pattern != (Image *) NULL)
draw_info->stroke_pattern=DestroyImage(draw_info->stroke_pattern);
if (draw_info->font != (char *) NULL)
draw_info->font=DestroyString(draw_info->font);
if (draw_info->metrics != (char *) NULL)
draw_info->metrics=DestroyString(draw_info->metrics);
if (draw_info->family != (char *) NULL)
draw_info->family=DestroyString(draw_info->family);
if (draw_info->encoding != (char *) NULL)
draw_info->encoding=DestroyString(draw_info->encoding);
if (draw_info->density != (char *) NULL)
draw_info->density=DestroyString(draw_info->density);
if (draw_info->server_name != (char *) NULL)
draw_info->server_name=(char *)
RelinquishMagickMemory(draw_info->server_name);
if (draw_info->dash_pattern != (double *) NULL)
draw_info->dash_pattern=(double *) RelinquishMagickMemory(
draw_info->dash_pattern);
if (draw_info->gradient.stops != (StopInfo *) NULL)
draw_info->gradient.stops=(StopInfo *) RelinquishMagickMemory(
draw_info->gradient.stops);
if (draw_info->clip_mask != (char *) NULL)
draw_info->clip_mask=DestroyString(draw_info->clip_mask);
if (draw_info->clipping_mask != (Image *) NULL)
draw_info->clipping_mask=DestroyImage(draw_info->clipping_mask);
if (draw_info->composite_mask != (Image *) NULL)
draw_info->composite_mask=DestroyImage(draw_info->composite_mask);
draw_info->signature=(~MagickCoreSignature);
draw_info=(DrawInfo *) RelinquishMagickMemory(draw_info);
return(draw_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w A f f i n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawAffineImage() composites the source over the destination image as
% dictated by the affine transform.
%
% The format of the DrawAffineImage method is:
%
% MagickBooleanType DrawAffineImage(Image *image,const Image *source,
% const AffineMatrix *affine)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o source: the source image.
%
% o affine: the affine transform.
%
*/
static SegmentInfo AffineEdge(const Image *image,const AffineMatrix *affine,
const double y,const SegmentInfo *edge)
{
double
intercept,
z;
register double
x;
SegmentInfo
inverse_edge;
/*
Determine left and right edges.
*/
inverse_edge.x1=edge->x1;
inverse_edge.y1=edge->y1;
inverse_edge.x2=edge->x2;
inverse_edge.y2=edge->y2;
z=affine->ry*y+affine->tx;
if (affine->sx >= MagickEpsilon)
{
intercept=(-z/affine->sx);
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z+(double) image->columns)/affine->sx;
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if (affine->sx < -MagickEpsilon)
{
intercept=(-z+(double) image->columns)/affine->sx;
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z/affine->sx);
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->columns))
{
inverse_edge.x2=edge->x1;
return(inverse_edge);
}
/*
Determine top and bottom edges.
*/
z=affine->sy*y+affine->ty;
if (affine->rx >= MagickEpsilon)
{
intercept=(-z/affine->rx);
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z+(double) image->rows)/affine->rx;
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if (affine->rx < -MagickEpsilon)
{
intercept=(-z+(double) image->rows)/affine->rx;
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z/affine->rx);
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->rows))
{
inverse_edge.x2=edge->x2;
return(inverse_edge);
}
return(inverse_edge);
}
static AffineMatrix InverseAffineMatrix(const AffineMatrix *affine)
{
AffineMatrix
inverse_affine;
double
determinant;
determinant=PerceptibleReciprocal(affine->sx*affine->sy-affine->rx*
affine->ry);
inverse_affine.sx=determinant*affine->sy;
inverse_affine.rx=determinant*(-affine->rx);
inverse_affine.ry=determinant*(-affine->ry);
inverse_affine.sy=determinant*affine->sx;
inverse_affine.tx=(-affine->tx)*inverse_affine.sx-affine->ty*
inverse_affine.ry;
inverse_affine.ty=(-affine->tx)*inverse_affine.rx-affine->ty*
inverse_affine.sy;
return(inverse_affine);
}
MagickExport MagickBooleanType DrawAffineImage(Image *image,
const Image *source,const AffineMatrix *affine)
{
AffineMatrix
inverse_affine;
CacheView
*image_view,
*source_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickPixelPacket
zero;
PointInfo
extent[4],
min,
max,
point;
register ssize_t
i;
SegmentInfo
edge;
ssize_t
start,
stop,
y;
/*
Determine bounding box.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(source != (const Image *) NULL);
assert(source->signature == MagickCoreSignature);
assert(affine != (AffineMatrix *) NULL);
extent[0].x=0.0;
extent[0].y=0.0;
extent[1].x=(double) source->columns-1.0;
extent[1].y=0.0;
extent[2].x=(double) source->columns-1.0;
extent[2].y=(double) source->rows-1.0;
extent[3].x=0.0;
extent[3].y=(double) source->rows-1.0;
for (i=0; i < 4; i++)
{
point=extent[i];
extent[i].x=point.x*affine->sx+point.y*affine->ry+affine->tx;
extent[i].y=point.x*affine->rx+point.y*affine->sy+affine->ty;
}
min=extent[0];
max=extent[0];
for (i=1; i < 4; i++)
{
if (min.x > extent[i].x)
min.x=extent[i].x;
if (min.y > extent[i].y)
min.y=extent[i].y;
if (max.x < extent[i].x)
max.x=extent[i].x;
if (max.y < extent[i].y)
max.y=extent[i].y;
}
/*
Affine transform image.
*/
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
edge.x1=MagickMax(min.x,0.0);
edge.y1=MagickMax(min.y,0.0);
edge.x2=MagickMin(max.x,(double) image->columns-1.0);
edge.y2=MagickMin(max.y,(double) image->rows-1.0);
inverse_affine=InverseAffineMatrix(affine);
GetMagickPixelPacket(image,&zero);
exception=(&image->exception);
start=CastDoubleToLong(ceil(edge.y1-0.5));
stop=CastDoubleToLong(floor(edge.y2+0.5));
source_view=AcquireVirtualCacheView(source,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(source,image,stop-start,1)
#endif
for (y=start; y <= stop; y++)
{
MagickPixelPacket
composite,
pixel;
PointInfo
point;
register IndexPacket
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
SegmentInfo
inverse_edge;
ssize_t
x_offset;
if (status == MagickFalse)
continue;
inverse_edge=AffineEdge(source,&inverse_affine,(double) y,&edge);
if (inverse_edge.x2 < inverse_edge.x1)
continue;
q=GetCacheViewAuthenticPixels(image_view,CastDoubleToLong(
ceil(inverse_edge.x1-0.5)),y,(size_t) CastDoubleToLong(floor(
inverse_edge.x2+0.5)-ceil(inverse_edge.x1-0.5)+1),1,exception);
if (q == (PixelPacket *) NULL)
continue;
indexes=GetCacheViewAuthenticIndexQueue(image_view);
pixel=zero;
composite=zero;
x_offset=0;
for (x=CastDoubleToLong(ceil(inverse_edge.x1-0.5));
x <= CastDoubleToLong(floor(inverse_edge.x2+0.5)); x++)
{
point.x=(double) x*inverse_affine.sx+y*inverse_affine.ry+
inverse_affine.tx;
point.y=(double) x*inverse_affine.rx+y*inverse_affine.sy+
inverse_affine.ty;
status=InterpolateMagickPixelPacket(source,source_view,
UndefinedInterpolatePixel,point.x,point.y,&pixel,exception);
if (status == MagickFalse)
break;
SetMagickPixelPacket(image,q,indexes+x_offset,&composite);
MagickPixelCompositeOver(&pixel,pixel.opacity,&composite,
composite.opacity,&composite);
SetPixelPacket(image,&composite,q,indexes+x_offset);
x_offset++;
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w B o u n d i n g R e c t a n g l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawBoundingRectangles() draws the bounding rectangles on the image. This
% is only useful for developers debugging the rendering algorithm.
%
% The format of the DrawBoundingRectangles method is:
%
% MagickBooleanType DrawBoundingRectangles(Image *image,
% const DrawInfo *draw_info,PolygonInfo *polygon_info)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o polygon_info: Specifies a pointer to a PolygonInfo structure.
%
*/
static MagickBooleanType DrawBoundingRectangles(Image *image,
const DrawInfo *draw_info,const PolygonInfo *polygon_info)
{
double
mid;
DrawInfo
*clone_info;
MagickStatusType
status;
PointInfo
end,
resolution,
start;
PrimitiveInfo
primitive_info[6];
register ssize_t
i;
SegmentInfo
bounds;
ssize_t
coordinates;
(void) memset(primitive_info,0,sizeof(primitive_info));
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
status=QueryColorDatabase("#0000",&clone_info->fill,&image->exception);
if (status == MagickFalse)
{
clone_info=DestroyDrawInfo(clone_info);
return(MagickFalse);
}
resolution.x=96.0;
resolution.y=96.0;
if (clone_info->density != (char *) NULL)
{
GeometryInfo
geometry_info;
MagickStatusType
flags;
flags=ParseGeometry(clone_info->density,&geometry_info);
resolution.x=geometry_info.rho;
resolution.y=geometry_info.sigma;
if ((flags & SigmaValue) == MagickFalse)
resolution.y=resolution.x;
}
mid=(resolution.x/96.0)*ExpandAffine(&clone_info->affine)*
clone_info->stroke_width/2.0;
bounds.x1=0.0;
bounds.y1=0.0;
bounds.x2=0.0;
bounds.y2=0.0;
if (polygon_info != (PolygonInfo *) NULL)
{
bounds=polygon_info->edges[0].bounds;
for (i=1; i < (ssize_t) polygon_info->number_edges; i++)
{
if (polygon_info->edges[i].bounds.x1 < (double) bounds.x1)
bounds.x1=polygon_info->edges[i].bounds.x1;
if (polygon_info->edges[i].bounds.y1 < (double) bounds.y1)
bounds.y1=polygon_info->edges[i].bounds.y1;
if (polygon_info->edges[i].bounds.x2 > (double) bounds.x2)
bounds.x2=polygon_info->edges[i].bounds.x2;
if (polygon_info->edges[i].bounds.y2 > (double) bounds.y2)
bounds.y2=polygon_info->edges[i].bounds.y2;
}
bounds.x1-=mid;
bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double)
image->columns ? (double) image->columns-1 : bounds.x1;
bounds.y1-=mid;
bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double)
image->rows ? (double) image->rows-1 : bounds.y1;
bounds.x2+=mid;
bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double)
image->columns ? (double) image->columns-1 : bounds.x2;
bounds.y2+=mid;
bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double)
image->rows ? (double) image->rows-1 : bounds.y2;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
{
if (polygon_info->edges[i].direction != 0)
status=QueryColorDatabase("#f00",&clone_info->stroke,
&image->exception);
else
status=QueryColorDatabase("#0f0",&clone_info->stroke,
&image->exception);
if (status == MagickFalse)
break;
start.x=(double) (polygon_info->edges[i].bounds.x1-mid);
start.y=(double) (polygon_info->edges[i].bounds.y1-mid);
end.x=(double) (polygon_info->edges[i].bounds.x2+mid);
end.y=(double) (polygon_info->edges[i].bounds.y2+mid);
primitive_info[0].primitive=RectanglePrimitive;
status&=TraceRectangle(primitive_info,start,end);
primitive_info[0].method=ReplaceMethod;
coordinates=(ssize_t) primitive_info[0].coordinates;
primitive_info[coordinates].primitive=UndefinedPrimitive;
status=DrawPrimitive(image,clone_info,primitive_info);
if (status == MagickFalse)
break;
}
if (i < (ssize_t) polygon_info->number_edges)
{
clone_info=DestroyDrawInfo(clone_info);
return(status == 0 ? MagickFalse : MagickTrue);
}
}
status=QueryColorDatabase("#00f",&clone_info->stroke,&image->exception);
if (status == MagickFalse)
{
clone_info=DestroyDrawInfo(clone_info);
return(MagickFalse);
}
start.x=(double) (bounds.x1-mid);
start.y=(double) (bounds.y1-mid);
end.x=(double) (bounds.x2+mid);
end.y=(double) (bounds.y2+mid);
primitive_info[0].primitive=RectanglePrimitive;
status&=TraceRectangle(primitive_info,start,end);
primitive_info[0].method=ReplaceMethod;
coordinates=(ssize_t) primitive_info[0].coordinates;
primitive_info[coordinates].primitive=UndefinedPrimitive;
status=DrawPrimitive(image,clone_info,primitive_info);
clone_info=DestroyDrawInfo(clone_info);
return(status == 0 ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C l i p P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawClipPath() draws the clip path on the image mask.
%
% The format of the DrawClipPath method is:
%
% MagickBooleanType DrawClipPath(Image *image,const DrawInfo *draw_info,
% const char *id)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the clip path id.
%
*/
MagickExport MagickBooleanType DrawClipPath(Image *image,
const DrawInfo *draw_info,const char *id)
{
const char
*clip_path;
Image
*clipping_mask;
MagickBooleanType
status;
clip_path=GetImageArtifact(image,id);
if (clip_path == (const char *) NULL)
return(MagickFalse);
clipping_mask=DrawClippingMask(image,draw_info,draw_info->clip_mask,clip_path,
&image->exception);
if (clipping_mask == (Image *) NULL)
return(MagickFalse);
status=SetImageClipMask(image,clipping_mask);
clipping_mask=DestroyImage(clipping_mask);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C l i p p i n g M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawClippingMask() draws the clip path and returns it as an image clipping
% mask.
%
% The format of the DrawClippingMask method is:
%
% Image *DrawClippingMask(Image *image,const DrawInfo *draw_info,
% const char *id,const char *clip_path,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the clip path id.
%
% o clip_path: the clip path.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *DrawClippingMask(Image *image,const DrawInfo *draw_info,
const char *id,const char *clip_path,ExceptionInfo *exception)
{
DrawInfo
*clone_info;
Image
*clip_mask;
MagickStatusType
status;
/*
Draw a clip path.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
clip_mask=AcquireImage((const ImageInfo *) NULL);
status=SetImageExtent(clip_mask,image->columns,image->rows);
if (status == MagickFalse)
return(DestroyImage(clip_mask));
status=SetImageClipMask(image,(Image *) NULL);
status=QueryColorCompliance("#0000",AllCompliance,
&clip_mask->background_color,exception);
clip_mask->background_color.opacity=(Quantum) TransparentOpacity;
status=SetImageBackgroundColor(clip_mask);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin clip-path %s",
id);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->primitive,clip_path);
status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill,
exception);
if (clone_info->clip_mask != (char *) NULL)
clone_info->clip_mask=DestroyString(clone_info->clip_mask);
(void) QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke,
exception);
clone_info->stroke_width=0.0;
clone_info->opacity=OpaqueOpacity;
clone_info->clip_path=MagickTrue;
status=RenderMVGContent(clip_mask,clone_info,0);
clone_info=DestroyDrawInfo(clone_info);
status&=SeparateImageChannel(clip_mask,TrueAlphaChannel);
if (draw_info->compliance != SVGCompliance)
status&=NegateImage(clip_mask,MagickFalse);
if (status == MagickFalse)
clip_mask=DestroyImage(clip_mask);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end clip-path");
return(clip_mask);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C o m p o s i t e M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawCompositeMask() draws the mask path and returns it as an image mask.
%
% The format of the DrawCompositeMask method is:
%
% Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info,
% const char *id,const char *mask_path,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the mask path id.
%
% o mask_path: the mask path.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info,
const char *id,const char *mask_path,ExceptionInfo *exception)
{
Image
*composite_mask;
DrawInfo
*clone_info;
MagickStatusType
status;
/*
Draw a mask path.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
composite_mask=AcquireImage((const ImageInfo *) NULL);
status=SetImageExtent(composite_mask,image->columns,image->rows);
if (status == MagickFalse)
return(DestroyImage(composite_mask));
status=SetImageMask(image,(Image *) NULL);
status=QueryColorCompliance("#0000",AllCompliance,
&composite_mask->background_color,exception);
composite_mask->background_color.opacity=(Quantum) TransparentOpacity;
(void) SetImageBackgroundColor(composite_mask);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin mask-path %s",
id);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->primitive,mask_path);
status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill,
exception);
status=QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke,
exception);
clone_info->stroke_width=0.0;
clone_info->opacity=OpaqueOpacity;
status=RenderMVGContent(composite_mask,clone_info,0);
clone_info=DestroyDrawInfo(clone_info);
status&=SeparateImageChannel(composite_mask,TrueAlphaChannel);
status&=NegateImage(composite_mask,MagickFalse);
if (status == MagickFalse)
composite_mask=DestroyImage(composite_mask);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end mask-path");
return(composite_mask);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w D a s h P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawDashPolygon() draws a dashed polygon (line, rectangle, ellipse) on the
% image while respecting the dash offset and dash pattern attributes.
%
% The format of the DrawDashPolygon method is:
%
% MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info,
% const PrimitiveInfo *primitive_info,Image *image)
%
% A description of each parameter follows:
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o image: the image.
%
%
*/
static MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info,
const PrimitiveInfo *primitive_info,Image *image)
{
double
length,
maximum_length,
offset,
scale,
total_length;
DrawInfo
*clone_info;
MagickStatusType
status;
PrimitiveInfo
*dash_polygon;
register double
dx,
dy;
register ssize_t
i;
size_t
number_vertices;
ssize_t
j,
n;
assert(draw_info != (const DrawInfo *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-dash");
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ;
number_vertices=(size_t) i;
dash_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
(2UL*number_vertices+32UL),sizeof(*dash_polygon));
if (dash_polygon == (PrimitiveInfo *) NULL)
{
(void) ThrowMagickException(&image->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
(void) memset(dash_polygon,0,(2UL*number_vertices+32UL)*
sizeof(*dash_polygon));
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->miterlimit=0;
dash_polygon[0]=primitive_info[0];
scale=ExpandAffine(&draw_info->affine);
length=scale*draw_info->dash_pattern[0];
offset=fabs(draw_info->dash_offset) >= MagickEpsilon ?
scale*draw_info->dash_offset : 0.0;
j=1;
for (n=0; offset > 0.0; j=0)
{
if (draw_info->dash_pattern[n] <= 0.0)
break;
length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5));
if (offset > length)
{
offset-=length;
n++;
length=scale*draw_info->dash_pattern[n];
continue;
}
if (offset < length)
{
length-=offset;
offset=0.0;
break;
}
offset=0.0;
n++;
}
status=MagickTrue;
maximum_length=0.0;
total_length=0.0;
for (i=1; (i < (ssize_t) number_vertices) && (length >= 0.0); i++)
{
dx=primitive_info[i].point.x-primitive_info[i-1].point.x;
dy=primitive_info[i].point.y-primitive_info[i-1].point.y;
maximum_length=hypot(dx,dy);
if (maximum_length > (double) (MaxBezierCoordinates >> 2))
continue;
if (fabs(length) < MagickEpsilon)
{
if (fabs(draw_info->dash_pattern[n]) >= MagickEpsilon)
n++;
if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon)
n=0;
length=scale*draw_info->dash_pattern[n];
}
for (total_length=0.0; (length >= 0.0) && (maximum_length >= (total_length+length)); )
{
total_length+=length;
if ((n & 0x01) != 0)
{
dash_polygon[0]=primitive_info[0];
dash_polygon[0].point.x=(double) (primitive_info[i-1].point.x+dx*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[0].point.y=(double) (primitive_info[i-1].point.y+dy*
total_length*PerceptibleReciprocal(maximum_length));
j=1;
}
else
{
if ((j+1) > (ssize_t) number_vertices)
break;
dash_polygon[j]=primitive_info[i-1];
dash_polygon[j].point.x=(double) (primitive_info[i-1].point.x+dx*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[j].point.y=(double) (primitive_info[i-1].point.y+dy*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[j].coordinates=1;
j++;
dash_polygon[0].coordinates=(size_t) j;
dash_polygon[j].primitive=UndefinedPrimitive;
status&=DrawStrokePolygon(image,clone_info,dash_polygon);
if (status == MagickFalse)
break;
}
if (fabs(draw_info->dash_pattern[n]) >= MagickEpsilon)
n++;
if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon)
n=0;
length=scale*draw_info->dash_pattern[n];
}
length-=(maximum_length-total_length);
if ((n & 0x01) != 0)
continue;
dash_polygon[j]=primitive_info[i];
dash_polygon[j].coordinates=1;
j++;
}
if ((status != MagickFalse) && (total_length < maximum_length) &&
((n & 0x01) == 0) && (j > 1))
{
dash_polygon[j]=primitive_info[i-1];
dash_polygon[j].point.x+=MagickEpsilon;
dash_polygon[j].point.y+=MagickEpsilon;
dash_polygon[j].coordinates=1;
j++;
dash_polygon[0].coordinates=(size_t) j;
dash_polygon[j].primitive=UndefinedPrimitive;
status&=DrawStrokePolygon(image,clone_info,dash_polygon);
}
dash_polygon=(PrimitiveInfo *) RelinquishMagickMemory(dash_polygon);
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-dash");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w G r a d i e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawGradientImage() draws a linear gradient on the image.
%
% The format of the DrawGradientImage method is:
%
% MagickBooleanType DrawGradientImage(Image *image,
% const DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
*/
static inline double GetStopColorOffset(const GradientInfo *gradient,
const ssize_t x,const ssize_t y)
{
switch (gradient->type)
{
case UndefinedGradient:
case LinearGradient:
{
double
gamma,
length,
offset,
scale;
PointInfo
p,
q;
const SegmentInfo
*gradient_vector;
gradient_vector=(&gradient->gradient_vector);
p.x=gradient_vector->x2-gradient_vector->x1;
p.y=gradient_vector->y2-gradient_vector->y1;
q.x=(double) x-gradient_vector->x1;
q.y=(double) y-gradient_vector->y1;
length=sqrt(q.x*q.x+q.y*q.y);
gamma=sqrt(p.x*p.x+p.y*p.y)*length;
gamma=PerceptibleReciprocal(gamma);
scale=p.x*q.x+p.y*q.y;
offset=gamma*scale*length;
return(offset);
}
case RadialGradient:
{
PointInfo
v;
if (gradient->spread == RepeatSpread)
{
v.x=(double) x-gradient->center.x;
v.y=(double) y-gradient->center.y;
return(sqrt(v.x*v.x+v.y*v.y));
}
v.x=(double) (((x-gradient->center.x)*cos(DegreesToRadians(
gradient->angle)))+((y-gradient->center.y)*sin(DegreesToRadians(
gradient->angle))))*PerceptibleReciprocal(gradient->radii.x);
v.y=(double) (((x-gradient->center.x)*sin(DegreesToRadians(
gradient->angle)))-((y-gradient->center.y)*cos(DegreesToRadians(
gradient->angle))))*PerceptibleReciprocal(gradient->radii.y);
return(sqrt(v.x*v.x+v.y*v.y));
}
}
return(0.0);
}
MagickExport MagickBooleanType DrawGradientImage(Image *image,
const DrawInfo *draw_info)
{
CacheView
*image_view;
const GradientInfo
*gradient;
const SegmentInfo
*gradient_vector;
double
length;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickPixelPacket
zero;
PointInfo
point;
RectangleInfo
bounding_box;
ssize_t
y;
/*
Draw linear or radial gradient on image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
gradient=(&draw_info->gradient);
gradient_vector=(&gradient->gradient_vector);
point.x=gradient_vector->x2-gradient_vector->x1;
point.y=gradient_vector->y2-gradient_vector->y1;
length=sqrt(point.x*point.x+point.y*point.y);
bounding_box=gradient->bounding_box;
status=MagickTrue;
exception=(&image->exception);
GetMagickPixelPacket(image,&zero);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,bounding_box.height-bounding_box.y,1)
#endif
for (y=bounding_box.y; y < (ssize_t) bounding_box.height; y++)
{
double
alpha,
offset;
MagickPixelPacket
composite,
pixel;
register IndexPacket
*magick_restrict indexes;
register ssize_t
i,
x;
register PixelPacket
*magick_restrict q;
ssize_t
j;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
pixel=zero;
composite=zero;
offset=GetStopColorOffset(gradient,0,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
for (x=bounding_box.x; x < (ssize_t) bounding_box.width; x++)
{
SetMagickPixelPacket(image,q,indexes+x,&pixel);
switch (gradient->spread)
{
case UndefinedSpread:
case PadSpread:
{
if ((x != CastDoubleToLong(ceil(gradient_vector->x1-0.5))) ||
(y != CastDoubleToLong(ceil(gradient_vector->y1-0.5))))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
}
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if ((offset < 0.0) || (i == 0))
composite=gradient->stops[0].color;
else
if ((offset > 1.0) || (i == (ssize_t) gradient->number_stops))
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
MagickPixelCompositeBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
case ReflectSpread:
{
if ((x != CastDoubleToLong(ceil(gradient_vector->x1-0.5))) ||
(y != CastDoubleToLong(ceil(gradient_vector->y1-0.5))))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
}
if (offset < 0.0)
offset=(-offset);
if ((ssize_t) fmod(offset,2.0) == 0)
offset=fmod(offset,1.0);
else
offset=1.0-fmod(offset,1.0);
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if (i == 0)
composite=gradient->stops[0].color;
else
if (i == (ssize_t) gradient->number_stops)
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
MagickPixelCompositeBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
case RepeatSpread:
{
double
repeat;
MagickBooleanType
antialias;
antialias=MagickFalse;
repeat=0.0;
if ((x != CastDoubleToLong(ceil(gradient_vector->x1-0.5))) ||
(y != CastDoubleToLong(ceil(gradient_vector->y1-0.5))))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type == LinearGradient)
{
repeat=fmod(offset,length);
if (repeat < 0.0)
repeat=length-fmod(-repeat,length);
else
repeat=fmod(offset,length);
antialias=(repeat < length) && ((repeat+1.0) > length) ?
MagickTrue : MagickFalse;
offset=PerceptibleReciprocal(length)*repeat;
}
else
{
repeat=fmod(offset,(double) gradient->radius);
if (repeat < 0.0)
repeat=gradient->radius-fmod(-repeat,
(double) gradient->radius);
else
repeat=fmod(offset,(double) gradient->radius);
antialias=repeat+1.0 > gradient->radius ? MagickTrue :
MagickFalse;
offset=repeat/gradient->radius;
}
}
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if (i == 0)
composite=gradient->stops[0].color;
else
if (i == (ssize_t) gradient->number_stops)
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
if (antialias != MagickFalse)
{
if (gradient->type == LinearGradient)
alpha=length-repeat;
else
alpha=gradient->radius-repeat;
i=0;
j=(ssize_t) gradient->number_stops-1L;
}
MagickPixelCompositeBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
}
MagickPixelCompositeOver(&composite,composite.opacity,&pixel,
pixel.opacity,&pixel);
SetPixelPacket(image,&pixel,q,indexes+x);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawImage() draws a graphic primitive on your image. The primitive
% may be represented as a string or filename. Precede the filename with an
% "at" sign (@) and the contents of the file are drawn on the image. You
% can affect how text is drawn by setting one or more members of the draw
% info structure.
%
% The format of the DrawImage method is:
%
% MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
*/
static MagickBooleanType CheckPrimitiveExtent(MVGInfo *mvg_info,
const size_t pad)
{
double
extent;
size_t
quantum;
/*
Check if there is enough storage for drawing pimitives.
*/
extent=(double) mvg_info->offset+pad+PrimitiveExtentPad+1;
quantum=sizeof(**mvg_info->primitive_info);
if (extent <= (double) *mvg_info->extent)
return(MagickTrue);
*mvg_info->primitive_info=(PrimitiveInfo *) ResizeQuantumMemory(
*mvg_info->primitive_info,(size_t) extent,quantum);
if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL)
{
register ssize_t
i;
*mvg_info->extent=(size_t) extent;
for (i=mvg_info->offset+1; i < (ssize_t) extent; i++)
(*mvg_info->primitive_info)[i].primitive=UndefinedPrimitive;
return(MagickTrue);
}
/*
Reallocation failed, allocate a primitive to facilitate unwinding.
*/
if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL)
*mvg_info->primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(
*mvg_info->primitive_info);
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
*mvg_info->primitive_info=(PrimitiveInfo *) AcquireCriticalMemory(
PrimitiveExtentPad*quantum);
(void) memset(*mvg_info->primitive_info,0,PrimitiveExtentPad*quantum);
*mvg_info->extent=1;
return(MagickFalse);
}
static inline double GetDrawValue(const char *magick_restrict string,
char **magick_restrict sentinal)
{
char
**magick_restrict q;
double
value;
q=sentinal;
value=InterpretLocaleValue(string,q);
if ((IsNaN(value) != 0) || (value < -((double) SSIZE_MAX-512.0)) ||
(value > ((double) SSIZE_MAX-512.0)))
return(0.0);
sentinal=q;
return(value);
}
static int MVGMacroCompare(const void *target,const void *source)
{
const char
*p,
*q;
p=(const char *) target;
q=(const char *) source;
return(strcmp(p,q));
}
static SplayTreeInfo *GetMVGMacros(const char *primitive)
{
char
*macro,
*token;
const char
*q;
size_t
extent;
SplayTreeInfo
*macros;
/*
Scan graphic primitives for definitions and classes.
*/
if (primitive == (const char *) NULL)
return((SplayTreeInfo *) NULL);
macros=NewSplayTree(MVGMacroCompare,RelinquishMagickMemory,
RelinquishMagickMemory);
macro=AcquireString(primitive);
token=AcquireString(primitive);
extent=strlen(token)+MagickPathExtent;
for (q=primitive; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (*token == '\0')
break;
if (LocaleCompare("push",token) == 0)
{
register const char
*end,
*start;
(void) GetNextToken(q,&q,extent,token);
if (*q == '"')
{
char
name[MagickPathExtent];
const char
*p;
ssize_t
n;
/*
Named macro (e.g. push graphic-context "wheel").
*/
(void) GetNextToken(q,&q,extent,token);
start=q;
end=q;
(void) CopyMagickString(name,token,MagickPathExtent);
n=1;
for (p=q; *p != '\0'; )
{
if (GetNextToken(p,&p,extent,token) < 1)
break;
if (*token == '\0')
break;
if (LocaleCompare(token,"pop") == 0)
{
end=p-strlen(token)-1;
n--;
}
if (LocaleCompare(token,"push") == 0)
n++;
if ((n == 0) && (end > start))
{
/*
Extract macro.
*/
(void) GetNextToken(p,&p,extent,token);
(void) CopyMagickString(macro,start,(size_t) (end-start));
(void) AddValueToSplayTree(macros,ConstantString(name),
ConstantString(macro));
break;
}
}
}
}
}
token=DestroyString(token);
macro=DestroyString(macro);
return(macros);
}
static inline MagickBooleanType IsPoint(const char *point)
{
char
*p;
double
value;
value=GetDrawValue(point,&p);
return((fabs(value) < MagickEpsilon) && (p == point) ? MagickFalse :
MagickTrue);
}
static inline MagickBooleanType TracePoint(PrimitiveInfo *primitive_info,
const PointInfo point)
{
primitive_info->coordinates=1;
primitive_info->closed_subpath=MagickFalse;
primitive_info->point=point;
return(MagickTrue);
}
static MagickBooleanType RenderMVGContent(Image *image,
const DrawInfo *draw_info,const size_t depth)
{
#define RenderImageTag "Render/Image"
AffineMatrix
affine,
current;
char
key[2*MaxTextExtent],
keyword[MaxTextExtent],
geometry[MaxTextExtent],
name[MaxTextExtent],
*next_token,
pattern[MaxTextExtent],
*primitive,
*token;
const char
*q;
double
angle,
coordinates,
cursor,
factor,
primitive_extent;
DrawInfo
*clone_info,
**graphic_context;
MagickBooleanType
proceed;
MagickStatusType
status;
MVGInfo
mvg_info;
PointInfo
point;
PixelPacket
start_color;
PrimitiveInfo
*primitive_info;
PrimitiveType
primitive_type;
register const char
*p;
register ssize_t
i,
x;
SegmentInfo
bounds;
size_t
extent,
number_points;
SplayTreeInfo
*macros;
ssize_t
defsDepth,
j,
k,
n,
symbolDepth;
TypeMetric
metrics;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
if (depth > MagickMaxRecursionDepth)
ThrowBinaryImageException(DrawError,"VectorGraphicsNestedTooDeeply",
image->filename);
if ((draw_info->primitive == (char *) NULL) ||
(*draw_info->primitive == '\0'))
return(MagickFalse);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"begin draw-image");
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
if (image->matte == MagickFalse)
{
status=SetImageAlphaChannel(image,OpaqueAlphaChannel);
if (status == MagickFalse)
return(MagickFalse);
}
primitive=(char *) NULL;
if ((*draw_info->primitive == '@') && (strlen(draw_info->primitive) > 1) &&
(*(draw_info->primitive+1) != '-') && (depth == 0))
primitive=FileToString(draw_info->primitive+1,~0UL,&image->exception);
else
primitive=AcquireString(draw_info->primitive);
if (primitive == (char *) NULL)
return(MagickFalse);
primitive_extent=(double) strlen(primitive);
(void) SetImageArtifact(image,"mvg:vector-graphics",primitive);
n=0;
/*
Allocate primitive info memory.
*/
graphic_context=(DrawInfo **) AcquireMagickMemory(sizeof(*graphic_context));
if (graphic_context == (DrawInfo **) NULL)
{
primitive=DestroyString(primitive);
ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
number_points=PrimitiveExtentPad;
primitive_info=(PrimitiveInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*primitive_info));
if (primitive_info == (PrimitiveInfo *) NULL)
{
primitive=DestroyString(primitive);
for ( ; n >= 0; n--)
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context);
ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(primitive_info,0,(size_t) number_points*
sizeof(*primitive_info));
(void) memset(&mvg_info,0,sizeof(mvg_info));
mvg_info.primitive_info=(&primitive_info);
mvg_info.extent=(&number_points);
mvg_info.exception=(&image->exception);
graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,draw_info);
graphic_context[n]->viewbox=image->page;
if ((image->page.width == 0) || (image->page.height == 0))
{
graphic_context[n]->viewbox.width=image->columns;
graphic_context[n]->viewbox.height=image->rows;
}
token=AcquireString(primitive);
extent=strlen(token)+MaxTextExtent;
cursor=0.0;
defsDepth=0;
symbolDepth=0;
macros=GetMVGMacros(primitive);
status=QueryColorDatabase("#000000",&start_color,&image->exception);
for (q=primitive; *q != '\0'; )
{
/*
Interpret graphic primitive.
*/
if (GetNextToken(q,&q,MaxTextExtent,keyword) < 1)
break;
if (*keyword == '\0')
break;
if (*keyword == '#')
{
/*
Comment.
*/
while ((*q != '\n') && (*q != '\0'))
q++;
continue;
}
p=q-strlen(keyword)-1;
primitive_type=UndefinedPrimitive;
current=graphic_context[n]->affine;
GetAffineMatrix(&affine);
*token='\0';
switch (*keyword)
{
case ';':
break;
case 'a':
case 'A':
{
if (LocaleCompare("affine",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
affine.sx=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.rx=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.ry=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.sy=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.tx=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.ty=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
break;
}
if (LocaleCompare("arc",keyword) == 0)
{
primitive_type=ArcPrimitive;
break;
}
status=MagickFalse;
break;
}
case 'b':
case 'B':
{
if (LocaleCompare("bezier",keyword) == 0)
{
primitive_type=BezierPrimitive;
break;
}
if (LocaleCompare("border-color",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
status&=QueryColorDatabase(token,&graphic_context[n]->border_color,
&image->exception);
break;
}
status=MagickFalse;
break;
}
case 'c':
case 'C':
{
if (LocaleCompare("class",keyword) == 0)
{
const char
*mvg_class;
(void) GetNextToken(q,&q,extent,token);
if (*token == '\0')
{
status=MagickFalse;
break;
}
if (LocaleCompare(token,graphic_context[n]->id) == 0)
break;
mvg_class=(const char *) GetValueFromSplayTree(macros,token);
if ((mvg_class != (const char *) NULL) && (p > primitive))
{
char
*elements;
ssize_t
offset;
/*
Inject class elements in stream.
*/
offset=(ssize_t) (p-primitive);
elements=AcquireString(primitive);
elements[offset]='\0';
(void) ConcatenateString(&elements,mvg_class);
(void) ConcatenateString(&elements,"\n");
(void) ConcatenateString(&elements,q);
primitive=DestroyString(primitive);
primitive=elements;
q=primitive+offset;
}
break;
}
if (LocaleCompare("clip-path",keyword) == 0)
{
const char
*clip_path;
/*
Take a node from within the MVG document, and duplicate it here.
*/
(void) GetNextToken(q,&q,extent,token);
if (*token == '\0')
{
status=MagickFalse;
break;
}
(void) CloneString(&graphic_context[n]->clip_mask,token);
clip_path=(const char *) GetValueFromSplayTree(macros,token);
if (clip_path != (const char *) NULL)
{
if (graphic_context[n]->clipping_mask != (Image *) NULL)
graphic_context[n]->clipping_mask=
DestroyImage(graphic_context[n]->clipping_mask);
graphic_context[n]->clipping_mask=DrawClippingMask(image,
graphic_context[n],token,clip_path,&image->exception);
if (graphic_context[n]->compliance != SVGCompliance)
{
const char
*clip_path;
clip_path=(const char *) GetValueFromSplayTree(macros,
graphic_context[n]->clip_mask);
if (clip_path != (const char *) NULL)
(void) SetImageArtifact(image,
graphic_context[n]->clip_mask,clip_path);
status&=DrawClipPath(image,graphic_context[n],
graphic_context[n]->clip_mask);
}
}
break;
}
if (LocaleCompare("clip-rule",keyword) == 0)
{
ssize_t
fill_rule;
(void) GetNextToken(q,&q,extent,token);
fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse,
token);
if (fill_rule == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->fill_rule=(FillRule) fill_rule;
break;
}
if (LocaleCompare("clip-units",keyword) == 0)
{
ssize_t
clip_units;
(void) GetNextToken(q,&q,extent,token);
clip_units=ParseCommandOption(MagickClipPathOptions,MagickFalse,
token);
if (clip_units == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->clip_units=(ClipPathUnits) clip_units;
if (clip_units == ObjectBoundingBox)
{
GetAffineMatrix(¤t);
affine.sx=draw_info->bounds.x2;
affine.sy=draw_info->bounds.y2;
affine.tx=draw_info->bounds.x1;
affine.ty=draw_info->bounds.y1;
break;
}
break;
}
if (LocaleCompare("circle",keyword) == 0)
{
primitive_type=CirclePrimitive;
break;
}
if (LocaleCompare("color",keyword) == 0)
{
primitive_type=ColorPrimitive;
break;
}
if (LocaleCompare("compliance",keyword) == 0)
{
/*
MVG compliance associates a clipping mask with an image; SVG
compliance associates a clipping mask with a graphics context.
*/
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->compliance=(ComplianceType) ParseCommandOption(
MagickComplianceOptions,MagickFalse,token);
break;
}
if (LocaleCompare("currentColor",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
break;
}
status=MagickFalse;
break;
}
case 'd':
case 'D':
{
if (LocaleCompare("decorate",keyword) == 0)
{
ssize_t
decorate;
(void) GetNextToken(q,&q,extent,token);
decorate=ParseCommandOption(MagickDecorateOptions,MagickFalse,
token);
if (decorate == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->decorate=(DecorationType) decorate;
break;
}
if (LocaleCompare("density",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->density,token);
break;
}
if (LocaleCompare("direction",keyword) == 0)
{
ssize_t
direction;
(void) GetNextToken(q,&q,extent,token);
direction=ParseCommandOption(MagickDirectionOptions,MagickFalse,
token);
if (direction == -1)
status=MagickFalse;
else
graphic_context[n]->direction=(DirectionType) direction;
break;
}
status=MagickFalse;
break;
}
case 'e':
case 'E':
{
if (LocaleCompare("ellipse",keyword) == 0)
{
primitive_type=EllipsePrimitive;
break;
}
if (LocaleCompare("encoding",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->encoding,token);
break;
}
status=MagickFalse;
break;
}
case 'f':
case 'F':
{
if (LocaleCompare("fill",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
(void) FormatLocaleString(pattern,MaxTextExtent,"%s",token);
if (GetImageArtifact(image,pattern) != (const char *) NULL)
(void) DrawPatternPath(image,draw_info,token,
&graphic_context[n]->fill_pattern);
else
{
status&=QueryColorDatabase(token,&graphic_context[n]->fill,
&image->exception);
if (graphic_context[n]->fill_opacity != OpaqueOpacity)
graphic_context[n]->fill.opacity=ClampToQuantum(
graphic_context[n]->fill_opacity);
}
break;
}
if (LocaleCompare("fill-opacity",keyword) == 0)
{
double
opacity;
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
GetDrawValue(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(image,token);
if (graphic_context[n]->compliance == SVGCompliance)
graphic_context[n]->fill_opacity*=(1.0-opacity);
else
graphic_context[n]->fill_opacity=(QuantumRange-
graphic_context[n]->fill_opacity)*(1.0-opacity);
if (graphic_context[n]->fill.opacity != TransparentOpacity)
graphic_context[n]->fill.opacity=(Quantum)
graphic_context[n]->fill_opacity;
else
graphic_context[n]->fill.opacity=ClampToQuantum(QuantumRange*
opacity);
break;
}
if (LocaleCompare("fill-rule",keyword) == 0)
{
ssize_t
fill_rule;
(void) GetNextToken(q,&q,extent,token);
fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse,
token);
if (fill_rule == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->fill_rule=(FillRule) fill_rule;
break;
}
if (LocaleCompare("font",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->font,token);
if (LocaleCompare("none",token) == 0)
graphic_context[n]->font=(char *) RelinquishMagickMemory(
graphic_context[n]->font);
break;
}
if (LocaleCompare("font-family",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->family,token);
break;
}
if (LocaleCompare("font-size",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->pointsize=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
break;
}
if (LocaleCompare("font-stretch",keyword) == 0)
{
ssize_t
stretch;
(void) GetNextToken(q,&q,extent,token);
stretch=ParseCommandOption(MagickStretchOptions,MagickFalse,token);
if (stretch == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->stretch=(StretchType) stretch;
break;
}
if (LocaleCompare("font-style",keyword) == 0)
{
ssize_t
style;
(void) GetNextToken(q,&q,extent,token);
style=ParseCommandOption(MagickStyleOptions,MagickFalse,token);
if (style == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->style=(StyleType) style;
break;
}
if (LocaleCompare("font-weight",keyword) == 0)
{
ssize_t
weight;
(void) GetNextToken(q,&q,extent,token);
weight=ParseCommandOption(MagickWeightOptions,MagickFalse,token);
if (weight == -1)
weight=(ssize_t) StringToUnsignedLong(token);
graphic_context[n]->weight=(size_t) weight;
break;
}
status=MagickFalse;
break;
}
case 'g':
case 'G':
{
if (LocaleCompare("gradient-units",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("gravity",keyword) == 0)
{
ssize_t
gravity;
(void) GetNextToken(q,&q,extent,token);
gravity=ParseCommandOption(MagickGravityOptions,MagickFalse,token);
if (gravity == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->gravity=(GravityType) gravity;
break;
}
status=MagickFalse;
break;
}
case 'i':
case 'I':
{
if (LocaleCompare("image",keyword) == 0)
{
ssize_t
compose;
primitive_type=ImagePrimitive;
(void) GetNextToken(q,&q,extent,token);
compose=ParseCommandOption(MagickComposeOptions,MagickFalse,token);
if (compose == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->compose=(CompositeOperator) compose;
break;
}
if (LocaleCompare("interline-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->interline_spacing=GetDrawValue(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
break;
}
if (LocaleCompare("interword-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->interword_spacing=GetDrawValue(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
break;
}
status=MagickFalse;
break;
}
case 'k':
case 'K':
{
if (LocaleCompare("kerning",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->kerning=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
break;
}
status=MagickFalse;
break;
}
case 'l':
case 'L':
{
if (LocaleCompare("letter-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (IsPoint(token) == MagickFalse)
break;
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
clone_info->text=AcquireString(" ");
status&=GetTypeMetrics(image,clone_info,&metrics);
graphic_context[n]->kerning=metrics.width*
GetDrawValue(token,&next_token);
clone_info=DestroyDrawInfo(clone_info);
if (token == next_token)
ThrowPointExpectedException(image,token);
break;
}
if (LocaleCompare("line",keyword) == 0)
{
primitive_type=LinePrimitive;
break;
}
status=MagickFalse;
break;
}
case 'm':
case 'M':
{
if (LocaleCompare("mask",keyword) == 0)
{
const char
*mask_path;
/*
Take a node from within the MVG document, and duplicate it here.
*/
(void) GetNextToken(q,&q,extent,token);
mask_path=(const char *) GetValueFromSplayTree(macros,token);
if (mask_path != (const char *) NULL)
{
if (graphic_context[n]->composite_mask != (Image *) NULL)
graphic_context[n]->composite_mask=
DestroyImage(graphic_context[n]->composite_mask);
graphic_context[n]->composite_mask=DrawCompositeMask(image,
graphic_context[n],token,mask_path,&image->exception);
if (graphic_context[n]->compliance != SVGCompliance)
status=SetImageMask(image,graphic_context[n]->composite_mask);
}
break;
}
if (LocaleCompare("matte",keyword) == 0)
{
primitive_type=MattePrimitive;
break;
}
status=MagickFalse;
break;
}
case 'o':
case 'O':
{
if (LocaleCompare("offset",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("opacity",keyword) == 0)
{
double
opacity;
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
GetDrawValue(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(image,token);
if (graphic_context[n]->compliance == SVGCompliance)
{
graphic_context[n]->fill_opacity*=(1.0-opacity);
graphic_context[n]->stroke_opacity*=(1.0-opacity);
}
else
{
graphic_context[n]->fill_opacity=(QuantumRange-
graphic_context[n]->fill_opacity)*(1.0-opacity);
graphic_context[n]->stroke_opacity=(QuantumRange-
graphic_context[n]->stroke_opacity)*(1.0-opacity);
}
break;
}
status=MagickFalse;
break;
}
case 'p':
case 'P':
{
if (LocaleCompare("path",keyword) == 0)
{
primitive_type=PathPrimitive;
break;
}
if (LocaleCompare("point",keyword) == 0)
{
primitive_type=PointPrimitive;
break;
}
if (LocaleCompare("polyline",keyword) == 0)
{
primitive_type=PolylinePrimitive;
break;
}
if (LocaleCompare("polygon",keyword) == 0)
{
primitive_type=PolygonPrimitive;
break;
}
if (LocaleCompare("pop",keyword) == 0)
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare("class",token) == 0)
break;
if (LocaleCompare("clip-path",token) == 0)
break;
if (LocaleCompare("defs",token) == 0)
{
defsDepth--;
graphic_context[n]->render=defsDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
if (LocaleCompare("gradient",token) == 0)
break;
if (LocaleCompare("graphic-context",token) == 0)
{
if (n <= 0)
{
(void) ThrowMagickException(&image->exception,
GetMagickModule(),DrawError,
"UnbalancedGraphicContextPushPop","`%s'",token);
status=MagickFalse;
n=0;
break;
}
if ((graphic_context[n]->clip_mask != (char *) NULL) &&
(graphic_context[n]->compliance != SVGCompliance))
if (LocaleCompare(graphic_context[n]->clip_mask,
graphic_context[n-1]->clip_mask) != 0)
status=SetImageClipMask(image,(Image *) NULL);
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
n--;
break;
}
if (LocaleCompare("mask",token) == 0)
break;
if (LocaleCompare("pattern",token) == 0)
break;
if (LocaleCompare("symbol",token) == 0)
{
symbolDepth--;
graphic_context[n]->render=symbolDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
status=MagickFalse;
break;
}
if (LocaleCompare("push",keyword) == 0)
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare("class",token) == 0)
{
/*
Class context.
*/
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"class") != 0)
continue;
break;
}
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("clip-path",token) == 0)
{
(void) GetNextToken(q,&q,extent,token);
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"clip-path") != 0)
continue;
break;
}
if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p))
{
status=MagickFalse;
break;
}
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("defs",token) == 0)
{
defsDepth++;
graphic_context[n]->render=defsDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
if (LocaleCompare("gradient",token) == 0)
{
char
key[2*MaxTextExtent],
name[MaxTextExtent],
type[MaxTextExtent];
SegmentInfo
segment;
(void) GetNextToken(q,&q,extent,token);
(void) CopyMagickString(name,token,MaxTextExtent);
(void) GetNextToken(q,&q,extent,token);
(void) CopyMagickString(type,token,MaxTextExtent);
(void) GetNextToken(q,&q,extent,token);
segment.x1=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
segment.y1=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
segment.x2=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
segment.y2=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
if (LocaleCompare(type,"radial") == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
}
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"gradient") != 0)
continue;
break;
}
if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p))
{
status=MagickFalse;
break;
}
(void) CopyMagickString(token,p,(size_t) (q-p-4+1));
bounds.x1=graphic_context[n]->affine.sx*segment.x1+
graphic_context[n]->affine.ry*segment.y1+
graphic_context[n]->affine.tx;
bounds.y1=graphic_context[n]->affine.rx*segment.x1+
graphic_context[n]->affine.sy*segment.y1+
graphic_context[n]->affine.ty;
bounds.x2=graphic_context[n]->affine.sx*segment.x2+
graphic_context[n]->affine.ry*segment.y2+
graphic_context[n]->affine.tx;
bounds.y2=graphic_context[n]->affine.rx*segment.x2+
graphic_context[n]->affine.sy*segment.y2+
graphic_context[n]->affine.ty;
(void) FormatLocaleString(key,MaxTextExtent,"%s",name);
(void) SetImageArtifact(image,key,token);
(void) FormatLocaleString(key,MaxTextExtent,"%s-type",name);
(void) SetImageArtifact(image,key,type);
(void) FormatLocaleString(key,MaxTextExtent,"%s-geometry",name);
(void) FormatLocaleString(geometry,MaxTextExtent,
"%gx%g%+.15g%+.15g",
MagickMax(fabs(bounds.x2-bounds.x1+1.0),1.0),
MagickMax(fabs(bounds.y2-bounds.y1+1.0),1.0),
bounds.x1,bounds.y1);
(void) SetImageArtifact(image,key,geometry);
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("graphic-context",token) == 0)
{
n++;
graphic_context=(DrawInfo **) ResizeQuantumMemory(
graphic_context,(size_t) (n+1),sizeof(*graphic_context));
if (graphic_context == (DrawInfo **) NULL)
{
(void) ThrowMagickException(&image->exception,
GetMagickModule(),ResourceLimitError,
"MemoryAllocationFailed","`%s'",image->filename);
break;
}
graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,
graphic_context[n-1]);
if (*q == '"')
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->id,token);
}
break;
}
if (LocaleCompare("mask",token) == 0)
{
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("pattern",token) == 0)
{
RectangleInfo
bounds;
(void) GetNextToken(q,&q,extent,token);
(void) CopyMagickString(name,token,MaxTextExtent);
(void) GetNextToken(q,&q,extent,token);
bounds.x=CastDoubleToLong(ceil(GetDrawValue(token,
&next_token)-0.5));
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
bounds.y=CastDoubleToLong(ceil(GetDrawValue(token,
&next_token)-0.5));
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
bounds.width=(size_t) floor(GetDrawValue(token,&next_token)+
0.5);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
bounds.height=(size_t) floor(GetDrawValue(token,&next_token)+
0.5);
if (token == next_token)
ThrowPointExpectedException(image,token);
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"pattern") != 0)
continue;
break;
}
if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p))
{
status=MagickFalse;
break;
}
(void) CopyMagickString(token,p,(size_t) (q-p-4+1));
(void) FormatLocaleString(key,MaxTextExtent,"%s",name);
(void) SetImageArtifact(image,key,token);
(void) FormatLocaleString(key,MaxTextExtent,"%s-geometry",name);
(void) FormatLocaleString(geometry,MaxTextExtent,
"%.20gx%.20g%+.20g%+.20g",(double) bounds.width,(double)
bounds.height,(double) bounds.x,(double) bounds.y);
(void) SetImageArtifact(image,key,geometry);
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("symbol",token) == 0)
{
symbolDepth++;
graphic_context[n]->render=symbolDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
status=MagickFalse;
break;
}
status=MagickFalse;
break;
}
case 'r':
case 'R':
{
if (LocaleCompare("rectangle",keyword) == 0)
{
primitive_type=RectanglePrimitive;
break;
}
if (LocaleCompare("rotate",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
angle=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
affine.sx=cos(DegreesToRadians(fmod((double) angle,360.0)));
affine.rx=sin(DegreesToRadians(fmod((double) angle,360.0)));
affine.ry=(-sin(DegreesToRadians(fmod((double) angle,360.0))));
affine.sy=cos(DegreesToRadians(fmod((double) angle,360.0)));
break;
}
if (LocaleCompare("roundRectangle",keyword) == 0)
{
primitive_type=RoundRectanglePrimitive;
break;
}
status=MagickFalse;
break;
}
case 's':
case 'S':
{
if (LocaleCompare("scale",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
affine.sx=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.sy=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
break;
}
if (LocaleCompare("skewX",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
angle=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
affine.ry=sin(DegreesToRadians(angle));
break;
}
if (LocaleCompare("skewY",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
angle=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
affine.rx=(-tan(DegreesToRadians(angle)/2.0));
break;
}
if (LocaleCompare("stop-color",keyword) == 0)
{
GradientType
type;
PixelPacket
stop_color;
(void) GetNextToken(q,&q,extent,token);
status&=QueryColorDatabase(token,&stop_color,&image->exception);
type=LinearGradient;
if (draw_info->gradient.type == RadialGradient)
type=RadialGradient;
(void) GradientImage(image,type,PadSpread,&start_color,&stop_color);
start_color=stop_color;
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("stroke",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
(void) FormatLocaleString(pattern,MaxTextExtent,"%s",token);
if (GetImageArtifact(image,pattern) != (const char *) NULL)
(void) DrawPatternPath(image,draw_info,token,
&graphic_context[n]->stroke_pattern);
else
{
status&=QueryColorDatabase(token,&graphic_context[n]->stroke,
&image->exception);
if (graphic_context[n]->stroke_opacity != OpaqueOpacity)
graphic_context[n]->stroke.opacity=ClampToQuantum(
graphic_context[n]->stroke_opacity);
}
break;
}
if (LocaleCompare("stroke-antialias",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->stroke_antialias=StringToLong(token) != 0 ?
MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("stroke-dasharray",keyword) == 0)
{
if (graphic_context[n]->dash_pattern != (double *) NULL)
graphic_context[n]->dash_pattern=(double *)
RelinquishMagickMemory(graphic_context[n]->dash_pattern);
if (IsPoint(q) != MagickFalse)
{
const char
*p;
p=q;
(void) GetNextToken(p,&p,extent,token);
if (*token == ',')
(void) GetNextToken(p,&p,extent,token);
for (x=0; IsPoint(token) != MagickFalse; x++)
{
(void) GetNextToken(p,&p,extent,token);
if (*token == ',')
(void) GetNextToken(p,&p,extent,token);
}
graphic_context[n]->dash_pattern=(double *)
AcquireQuantumMemory((size_t) (2*x+2),
sizeof(*graphic_context[n]->dash_pattern));
if (graphic_context[n]->dash_pattern == (double *) NULL)
{
(void) ThrowMagickException(&image->exception,
GetMagickModule(),ResourceLimitError,
"MemoryAllocationFailed","`%s'",image->filename);
status=MagickFalse;
break;
}
(void) memset(graphic_context[n]->dash_pattern,0,(size_t)
(2*x+2)*sizeof(*graphic_context[n]->dash_pattern));
for (j=0; j < x; j++)
{
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->dash_pattern[j]=GetDrawValue(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
if (graphic_context[n]->dash_pattern[j] < 0.0)
status=MagickFalse;
}
if ((x & 0x01) != 0)
for ( ; j < (2*x); j++)
graphic_context[n]->dash_pattern[j]=
graphic_context[n]->dash_pattern[j-x];
graphic_context[n]->dash_pattern[j]=0.0;
break;
}
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("stroke-dashoffset",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->dash_offset=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
break;
}
if (LocaleCompare("stroke-linecap",keyword) == 0)
{
ssize_t
linecap;
(void) GetNextToken(q,&q,extent,token);
linecap=ParseCommandOption(MagickLineCapOptions,MagickFalse,token);
if (linecap == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->linecap=(LineCap) linecap;
break;
}
if (LocaleCompare("stroke-linejoin",keyword) == 0)
{
ssize_t
linejoin;
(void) GetNextToken(q,&q,extent,token);
linejoin=ParseCommandOption(MagickLineJoinOptions,MagickFalse,
token);
if (linejoin == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->linejoin=(LineJoin) linejoin;
break;
}
if (LocaleCompare("stroke-miterlimit",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->miterlimit=StringToUnsignedLong(token);
break;
}
if (LocaleCompare("stroke-opacity",keyword) == 0)
{
double
opacity;
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
GetDrawValue(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(image,token);
if (graphic_context[n]->compliance == SVGCompliance)
graphic_context[n]->stroke_opacity*=(1.0-opacity);
else
graphic_context[n]->stroke_opacity=(QuantumRange-
graphic_context[n]->stroke_opacity)*(1.0-opacity);
if (graphic_context[n]->stroke.opacity != TransparentOpacity)
graphic_context[n]->stroke.opacity=(Quantum)
graphic_context[n]->stroke_opacity;
else
graphic_context[n]->stroke.opacity=ClampToQuantum(QuantumRange*
opacity);
break;
}
if (LocaleCompare("stroke-width",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
graphic_context[n]->stroke_width=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
break;
}
status=MagickFalse;
break;
}
case 't':
case 'T':
{
if (LocaleCompare("text",keyword) == 0)
{
primitive_type=TextPrimitive;
cursor=0.0;
break;
}
if (LocaleCompare("text-align",keyword) == 0)
{
ssize_t
align;
(void) GetNextToken(q,&q,extent,token);
align=ParseCommandOption(MagickAlignOptions,MagickFalse,token);
if (align == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->align=(AlignType) align;
break;
}
if (LocaleCompare("text-anchor",keyword) == 0)
{
ssize_t
align;
(void) GetNextToken(q,&q,extent,token);
align=ParseCommandOption(MagickAlignOptions,MagickFalse,token);
if (align == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->align=(AlignType) align;
break;
}
if (LocaleCompare("text-antialias",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->text_antialias=StringToLong(token) != 0 ?
MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("text-undercolor",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
status&=QueryColorDatabase(token,&graphic_context[n]->undercolor,
&image->exception);
break;
}
if (LocaleCompare("translate",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
affine.tx=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.ty=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
break;
}
status=MagickFalse;
break;
}
case 'u':
case 'U':
{
if (LocaleCompare("use",keyword) == 0)
{
const char
*use;
/*
Get a macro from the MVG document, and "use" it here.
*/
(void) GetNextToken(q,&q,extent,token);
use=(const char *) GetValueFromSplayTree(macros,token);
if (use != (const char *) NULL)
{
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
(void) CloneString(&clone_info->primitive,use);
status=RenderMVGContent(image,clone_info,depth+1);
clone_info=DestroyDrawInfo(clone_info);
}
break;
}
status=MagickFalse;
break;
}
case 'v':
case 'V':
{
if (LocaleCompare("viewbox",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.x=CastDoubleToLong(ceil(
GetDrawValue(token,&next_token)-0.5));
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.y=CastDoubleToLong(ceil(
GetDrawValue(token,&next_token)-0.5));
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.width=(size_t) floor(GetDrawValue(
token,&next_token)+0.5);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.height=(size_t) floor(GetDrawValue(
token,&next_token)+0.5);
if (token == next_token)
ThrowPointExpectedException(image,token);
break;
}
status=MagickFalse;
break;
}
case 'w':
case 'W':
{
if (LocaleCompare("word-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->interword_spacing=GetDrawValue(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
break;
}
status=MagickFalse;
break;
}
default:
{
status=MagickFalse;
break;
}
}
if (status == MagickFalse)
break;
if ((fabs(affine.sx-1.0) >= MagickEpsilon) ||
(fabs(affine.rx) >= MagickEpsilon) || (fabs(affine.ry) >= MagickEpsilon) ||
(fabs(affine.sy-1.0) >= MagickEpsilon) ||
(fabs(affine.tx) >= MagickEpsilon) || (fabs(affine.ty) >= MagickEpsilon))
{
graphic_context[n]->affine.sx=current.sx*affine.sx+current.ry*affine.rx;
graphic_context[n]->affine.rx=current.rx*affine.sx+current.sy*affine.rx;
graphic_context[n]->affine.ry=current.sx*affine.ry+current.ry*affine.sy;
graphic_context[n]->affine.sy=current.rx*affine.ry+current.sy*affine.sy;
graphic_context[n]->affine.tx=current.sx*affine.tx+current.ry*affine.ty+
current.tx;
graphic_context[n]->affine.ty=current.rx*affine.tx+current.sy*affine.ty+
current.ty;
}
if (primitive_type == UndefinedPrimitive)
{
if ((image->debug != MagickFalse) && (q > p))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int)
(q-p-1),p);
continue;
}
/*
Parse the primitive attributes.
*/
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
if ((primitive_info[i].primitive == TextPrimitive) ||
(primitive_info[i].primitive == ImagePrimitive))
if (primitive_info[i].text != (char *) NULL)
primitive_info[i].text=DestroyString(primitive_info[i].text);
i=0;
mvg_info.offset=i;
j=0;
primitive_info[0].point.x=0.0;
primitive_info[0].point.y=0.0;
primitive_info[0].coordinates=0;
primitive_info[0].method=FloodfillMethod;
primitive_info[0].closed_subpath=MagickFalse;
for (x=0; *q != '\0'; x++)
{
/*
Define points.
*/
if (IsPoint(q) == MagickFalse)
break;
(void) GetNextToken(q,&q,extent,token);
point.x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
point.y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
primitive_info[i].primitive=primitive_type;
primitive_info[i].point=point;
primitive_info[i].coordinates=0;
primitive_info[i].method=FloodfillMethod;
primitive_info[i].closed_subpath=MagickFalse;
i++;
mvg_info.offset=i;
if (i < (ssize_t) number_points)
continue;
status&=CheckPrimitiveExtent(&mvg_info,number_points);
}
if (status == MagickFalse)
break;
primitive_info[j].primitive=primitive_type;
primitive_info[j].coordinates=(size_t) x;
primitive_info[j].method=FloodfillMethod;
primitive_info[j].closed_subpath=MagickFalse;
/*
Circumscribe primitive within a circle.
*/
bounds.x1=primitive_info[j].point.x;
bounds.y1=primitive_info[j].point.y;
bounds.x2=primitive_info[j].point.x;
bounds.y2=primitive_info[j].point.y;
for (k=1; k < (ssize_t) primitive_info[j].coordinates; k++)
{
point=primitive_info[j+k].point;
if (point.x < bounds.x1)
bounds.x1=point.x;
if (point.y < bounds.y1)
bounds.y1=point.y;
if (point.x > bounds.x2)
bounds.x2=point.x;
if (point.y > bounds.y2)
bounds.y2=point.y;
}
/*
Speculate how many points our primitive might consume.
*/
coordinates=(double) primitive_info[j].coordinates;
switch (primitive_type)
{
case RectanglePrimitive:
{
coordinates*=5.0;
break;
}
case RoundRectanglePrimitive:
{
double
alpha,
beta,
radius;
alpha=bounds.x2-bounds.x1;
beta=bounds.y2-bounds.y1;
radius=hypot(alpha,beta);
coordinates*=5.0;
coordinates+=2.0*((size_t) ceil((double) MagickPI*radius))+6.0*
BezierQuantum+360.0;
break;
}
case BezierPrimitive:
{
coordinates=(BezierQuantum*(double) primitive_info[j].coordinates);
if (primitive_info[j].coordinates > (108*BezierQuantum))
{
(void) ThrowMagickException(&image->exception,GetMagickModule(),
DrawError,"TooManyBezierCoordinates","`%s'",token);
status=MagickFalse;
break;
}
break;
}
case PathPrimitive:
{
char
*s,
*t;
(void) GetNextToken(q,&q,extent,token);
coordinates=1.0;
t=token;
for (s=token; *s != '\0'; s=t)
{
double
value;
value=GetDrawValue(s,&t);
(void) value;
if (s == t)
{
t++;
continue;
}
coordinates++;
}
for (s=token; *s != '\0'; s++)
if (strspn(s,"AaCcQqSsTt") != 0)
coordinates+=(20.0*BezierQuantum)+360.0;
break;
}
default:
break;
}
if (status == MagickFalse)
break;
if (((size_t) (i+coordinates)) >= number_points)
{
/*
Resize based on speculative points required by primitive.
*/
number_points+=coordinates+1;
if (number_points < (size_t) coordinates)
{
(void) ThrowMagickException(&image->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
mvg_info.offset=i;
status&=CheckPrimitiveExtent(&mvg_info,number_points);
}
status&=CheckPrimitiveExtent(&mvg_info,PrimitiveExtentPad);
if (status == MagickFalse)
break;
mvg_info.offset=j;
switch (primitive_type)
{
case PointPrimitive:
default:
{
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
status&=TracePoint(primitive_info+j,primitive_info[j].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case LinePrimitive:
{
double
dx,
dy,
maximum_length;
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
dx=primitive_info[i].point.x-primitive_info[i-1].point.x;
dy=primitive_info[i].point.y-primitive_info[i-1].point.y;
maximum_length=hypot(dx,dy);
if (maximum_length > (MaxBezierCoordinates/100.0))
ThrowPointExpectedException(image,keyword);
status&=TraceLine(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case RectanglePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
status&=TraceRectangle(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case RoundRectanglePrimitive:
{
if (primitive_info[j].coordinates != 3)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+2].point.x < 0.0) ||
(primitive_info[j+2].point.y < 0.0))
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.x-primitive_info[j].point.x) < 0.0)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.y-primitive_info[j].point.y) < 0.0)
{
status=MagickFalse;
break;
}
status&=TraceRoundRectangle(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case ArcPrimitive:
{
if (primitive_info[j].coordinates != 3)
{
status=MagickFalse;
break;
}
status&=TraceArc(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case EllipsePrimitive:
{
if (primitive_info[j].coordinates != 3)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.x < 0.0) ||
(primitive_info[j+1].point.y < 0.0))
{
status=MagickFalse;
break;
}
status&=TraceEllipse(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case CirclePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
status&=TraceCircle(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case PolylinePrimitive:
{
if (primitive_info[j].coordinates < 1)
{
status=MagickFalse;
break;
}
break;
}
case PolygonPrimitive:
{
if (primitive_info[j].coordinates < 3)
{
status=MagickFalse;
break;
}
primitive_info[i]=primitive_info[j];
primitive_info[i].coordinates=0;
primitive_info[j].coordinates++;
primitive_info[j].closed_subpath=MagickTrue;
i++;
break;
}
case BezierPrimitive:
{
if (primitive_info[j].coordinates < 3)
{
status=MagickFalse;
break;
}
status&=TraceBezier(&mvg_info,primitive_info[j].coordinates);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case PathPrimitive:
{
coordinates=(double) TracePath(image,&mvg_info,token);
if (coordinates < 0.0)
{
status=MagickFalse;
break;
}
i=(ssize_t) (j+coordinates);
break;
}
case ColorPrimitive:
case MattePrimitive:
{
ssize_t
method;
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
(void) GetNextToken(q,&q,extent,token);
method=ParseCommandOption(MagickMethodOptions,MagickFalse,token);
if (method == -1)
{
status=MagickFalse;
break;
}
primitive_info[j].method=(PaintMethod) method;
break;
}
case TextPrimitive:
{
char
geometry[MagickPathExtent];
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
if (*token != ',')
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&primitive_info[j].text,token);
/*
Compute text cursor offset.
*/
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
if ((fabs(mvg_info.point.x-primitive_info->point.x) < MagickEpsilon) &&
(fabs(mvg_info.point.y-primitive_info->point.y) < MagickEpsilon))
{
mvg_info.point=primitive_info->point;
primitive_info->point.x+=cursor;
}
else
{
mvg_info.point=primitive_info->point;
cursor=0.0;
}
(void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f",
primitive_info->point.x,primitive_info->point.y);
clone_info->render=MagickFalse;
clone_info->text=AcquireString(token);
status&=GetTypeMetrics(image,clone_info,&metrics);
clone_info=DestroyDrawInfo(clone_info);
cursor+=metrics.width;
if (graphic_context[n]->compliance != SVGCompliance)
cursor=0.0;
break;
}
case ImagePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&primitive_info[j].text,token);
break;
}
}
mvg_info.offset=i;
if (status == 0)
break;
primitive_info[i].primitive=UndefinedPrimitive;
if ((image->debug != MagickFalse) && (q > p))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int) (q-p-1),
p);
/*
Sanity check.
*/
status&=CheckPrimitiveExtent(&mvg_info,(size_t)
ExpandAffine(&graphic_context[n]->affine));
if (status == 0)
break;
status&=CheckPrimitiveExtent(&mvg_info,(size_t)
graphic_context[n]->stroke_width);
if (status == 0)
break;
if (i == 0)
continue;
/*
Transform points.
*/
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
point=primitive_info[i].point;
primitive_info[i].point.x=graphic_context[n]->affine.sx*point.x+
graphic_context[n]->affine.ry*point.y+graphic_context[n]->affine.tx;
primitive_info[i].point.y=graphic_context[n]->affine.rx*point.x+
graphic_context[n]->affine.sy*point.y+graphic_context[n]->affine.ty;
point=primitive_info[i].point;
if (point.x < graphic_context[n]->bounds.x1)
graphic_context[n]->bounds.x1=point.x;
if (point.y < graphic_context[n]->bounds.y1)
graphic_context[n]->bounds.y1=point.y;
if (point.x > graphic_context[n]->bounds.x2)
graphic_context[n]->bounds.x2=point.x;
if (point.y > graphic_context[n]->bounds.y2)
graphic_context[n]->bounds.y2=point.y;
if (primitive_info[i].primitive == ImagePrimitive)
break;
if (i >= (ssize_t) number_points)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
}
if (graphic_context[n]->render != MagickFalse)
{
if ((n != 0) && (graphic_context[n]->compliance != SVGCompliance) &&
(graphic_context[n]->clip_mask != (char *) NULL) &&
(LocaleCompare(graphic_context[n]->clip_mask,
graphic_context[n-1]->clip_mask) != 0))
{
const char
*clip_path;
clip_path=(const char *) GetValueFromSplayTree(macros,
graphic_context[n]->clip_mask);
if (clip_path != (const char *) NULL)
(void) SetImageArtifact(image,graphic_context[n]->clip_mask,
clip_path);
status&=DrawClipPath(image,graphic_context[n],
graphic_context[n]->clip_mask);
}
status&=DrawPrimitive(image,graphic_context[n],primitive_info);
}
proceed=SetImageProgress(image,RenderImageTag,q-primitive,(MagickSizeType)
primitive_extent);
if (proceed == MagickFalse)
break;
if (status == 0)
break;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end draw-image");
/*
Relinquish resources.
*/
macros=DestroySplayTree(macros);
token=DestroyString(token);
if (primitive_info != (PrimitiveInfo *) NULL)
{
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
if ((primitive_info[i].primitive == TextPrimitive) ||
(primitive_info[i].primitive == ImagePrimitive))
if (primitive_info[i].text != (char *) NULL)
primitive_info[i].text=DestroyString(primitive_info[i].text);
primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(primitive_info);
}
primitive=DestroyString(primitive);
for ( ; n >= 0; n--)
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context);
if (status == MagickFalse)
ThrowBinaryImageException(DrawError,
"NonconformingDrawingPrimitiveDefinition",keyword);
return(status != 0 ? MagickTrue : MagickFalse);
}
MagickExport MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info)
{
return(RenderMVGContent(image,draw_info,0));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P a t t e r n P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPatternPath() draws a pattern.
%
% The format of the DrawPatternPath method is:
%
% MagickBooleanType DrawPatternPath(Image *image,const DrawInfo *draw_info,
% const char *name,Image **pattern)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o name: the pattern name.
%
% o image: the image.
%
*/
MagickExport MagickBooleanType DrawPatternPath(Image *image,
const DrawInfo *draw_info,const char *name,Image **pattern)
{
char
property[MaxTextExtent];
const char
*geometry,
*path,
*type;
DrawInfo
*clone_info;
ImageInfo
*image_info;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
assert(name != (const char *) NULL);
(void) FormatLocaleString(property,MaxTextExtent,"%s",name);
path=GetImageArtifact(image,property);
if (path == (const char *) NULL)
return(MagickFalse);
(void) FormatLocaleString(property,MaxTextExtent,"%s-geometry",name);
geometry=GetImageArtifact(image,property);
if (geometry == (const char *) NULL)
return(MagickFalse);
if ((*pattern) != (Image *) NULL)
*pattern=DestroyImage(*pattern);
image_info=AcquireImageInfo();
image_info->size=AcquireString(geometry);
*pattern=AcquireImage(image_info);
image_info=DestroyImageInfo(image_info);
(void) QueryColorDatabase("#00000000",&(*pattern)->background_color,
&image->exception);
(void) SetImageBackgroundColor(*pattern);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"begin pattern-path %s %s",name,geometry);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
if (clone_info->fill_pattern != (Image *) NULL)
clone_info->fill_pattern=DestroyImage(clone_info->fill_pattern);
if (clone_info->stroke_pattern != (Image *) NULL)
clone_info->stroke_pattern=DestroyImage(clone_info->stroke_pattern);
(void) FormatLocaleString(property,MaxTextExtent,"%s-type",name);
type=GetImageArtifact(image,property);
if (type != (const char *) NULL)
clone_info->gradient.type=(GradientType) ParseCommandOption(
MagickGradientOptions,MagickFalse,type);
(void) CloneString(&clone_info->primitive,path);
status=RenderMVGContent(*pattern,clone_info,0);
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end pattern-path");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w P o l y g o n P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPolygonPrimitive() draws a polygon on the image.
%
% The format of the DrawPolygonPrimitive method is:
%
% MagickBooleanType DrawPolygonPrimitive(Image *image,
% const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
*/
static PolygonInfo **DestroyPolygonThreadSet(PolygonInfo **polygon_info)
{
register ssize_t
i;
assert(polygon_info != (PolygonInfo **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (polygon_info[i] != (PolygonInfo *) NULL)
polygon_info[i]=DestroyPolygonInfo(polygon_info[i]);
polygon_info=(PolygonInfo **) RelinquishMagickMemory(polygon_info);
return(polygon_info);
}
static PolygonInfo **AcquirePolygonThreadSet(const DrawInfo *draw_info,
const PrimitiveInfo *primitive_info,ExceptionInfo *exception)
{
PathInfo
*magick_restrict path_info;
PolygonInfo
**polygon_info;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
polygon_info=(PolygonInfo **) AcquireQuantumMemory(number_threads,
sizeof(*polygon_info));
if (polygon_info == (PolygonInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return((PolygonInfo **) NULL);
}
(void) memset(polygon_info,0,number_threads*sizeof(*polygon_info));
path_info=ConvertPrimitiveToPath(draw_info,primitive_info,exception);
if (path_info == (PathInfo *) NULL)
return(DestroyPolygonThreadSet(polygon_info));
polygon_info[0]=ConvertPathToPolygon(path_info,exception);
if (polygon_info[0] == (PolygonInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(DestroyPolygonThreadSet(polygon_info));
}
for (i=1; i < (ssize_t) number_threads; i++)
{
EdgeInfo
*edge_info;
register ssize_t
j;
polygon_info[i]=(PolygonInfo *) AcquireMagickMemory(
sizeof(*polygon_info[i]));
if (polygon_info[i] == (PolygonInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(DestroyPolygonThreadSet(polygon_info));
}
polygon_info[i]->number_edges=0;
edge_info=polygon_info[0]->edges;
polygon_info[i]->edges=(EdgeInfo *) AcquireQuantumMemory(
polygon_info[0]->number_edges,sizeof(*edge_info));
if (polygon_info[i]->edges == (EdgeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(DestroyPolygonThreadSet(polygon_info));
}
(void) memcpy(polygon_info[i]->edges,edge_info,
polygon_info[0]->number_edges*sizeof(*edge_info));
for (j=0; j < (ssize_t) polygon_info[i]->number_edges; j++)
polygon_info[i]->edges[j].points=(PointInfo *) NULL;
polygon_info[i]->number_edges=polygon_info[0]->number_edges;
for (j=0; j < (ssize_t) polygon_info[i]->number_edges; j++)
{
edge_info=polygon_info[0]->edges+j;
polygon_info[i]->edges[j].points=(PointInfo *) AcquireQuantumMemory(
edge_info->number_points,sizeof(*edge_info));
if (polygon_info[i]->edges[j].points == (PointInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(DestroyPolygonThreadSet(polygon_info));
}
(void) memcpy(polygon_info[i]->edges[j].points,edge_info->points,
edge_info->number_points*sizeof(*edge_info->points));
}
}
path_info=(PathInfo *) RelinquishMagickMemory(path_info);
return(polygon_info);
}
static size_t DestroyEdge(PolygonInfo *polygon_info,const ssize_t edge)
{
assert(edge < (ssize_t) polygon_info->number_edges);
polygon_info->edges[edge].points=(PointInfo *) RelinquishMagickMemory(
polygon_info->edges[edge].points);
polygon_info->number_edges--;
if (edge < (ssize_t) polygon_info->number_edges)
(void) memmove(polygon_info->edges+edge,polygon_info->edges+edge+1,
(size_t) (polygon_info->number_edges-edge)*sizeof(*polygon_info->edges));
return(polygon_info->number_edges);
}
static double GetOpacityPixel(PolygonInfo *polygon_info,const double mid,
const MagickBooleanType fill,const FillRule fill_rule,const ssize_t x,
const ssize_t y,double *stroke_opacity)
{
double
alpha,
beta,
distance,
subpath_opacity;
PointInfo
delta;
register EdgeInfo
*p;
register const PointInfo
*q;
register ssize_t
i;
ssize_t
j,
winding_number;
/*
Compute fill & stroke opacity for this (x,y) point.
*/
*stroke_opacity=0.0;
subpath_opacity=0.0;
p=polygon_info->edges;
for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++)
{
if ((double) y <= (p->bounds.y1-mid-0.5))
break;
if ((double) y > (p->bounds.y2+mid+0.5))
{
(void) DestroyEdge(polygon_info,j);
continue;
}
if (((double) x <= (p->bounds.x1-mid-0.5)) ||
((double) x > (p->bounds.x2+mid+0.5)))
continue;
i=(ssize_t) MagickMax((double) p->highwater,1.0);
for ( ; i < (ssize_t) p->number_points; i++)
{
if ((double) y <= (p->points[i-1].y-mid-0.5))
break;
if ((double) y > (p->points[i].y+mid+0.5))
continue;
if (p->scanline != (double) y)
{
p->scanline=(double) y;
p->highwater=(size_t) i;
}
/*
Compute distance between a point and an edge.
*/
q=p->points+i-1;
delta.x=(q+1)->x-q->x;
delta.y=(q+1)->y-q->y;
beta=delta.x*(x-q->x)+delta.y*(y-q->y);
if (beta <= 0.0)
{
delta.x=(double) x-q->x;
delta.y=(double) y-q->y;
distance=delta.x*delta.x+delta.y*delta.y;
}
else
{
alpha=delta.x*delta.x+delta.y*delta.y;
if (beta >= alpha)
{
delta.x=(double) x-(q+1)->x;
delta.y=(double) y-(q+1)->y;
distance=delta.x*delta.x+delta.y*delta.y;
}
else
{
alpha=PerceptibleReciprocal(alpha);
beta=delta.x*(y-q->y)-delta.y*(x-q->x)+MagickEpsilon;
distance=alpha*beta*beta;
}
}
/*
Compute stroke & subpath opacity.
*/
beta=0.0;
if (p->ghostline == MagickFalse)
{
alpha=mid+0.5;
if ((*stroke_opacity < 1.0) &&
(distance <= ((alpha+0.25)*(alpha+0.25))))
{
alpha=mid-0.5;
if (distance <= ((alpha+0.25)*(alpha+0.25)))
*stroke_opacity=1.0;
else
{
beta=1.0;
if (fabs(distance-1.0) >= MagickEpsilon)
beta=sqrt((double) distance);
alpha=beta-mid-0.5;
if (*stroke_opacity < ((alpha-0.25)*(alpha-0.25)))
*stroke_opacity=(alpha-0.25)*(alpha-0.25);
}
}
}
if ((fill == MagickFalse) || (distance > 1.0) || (subpath_opacity >= 1.0))
continue;
if (distance <= 0.0)
{
subpath_opacity=1.0;
continue;
}
if (distance > 1.0)
continue;
if (fabs(beta) < MagickEpsilon)
{
beta=1.0;
if (fabs(distance-1.0) >= MagickEpsilon)
beta=sqrt(distance);
}
alpha=beta-1.0;
if (subpath_opacity < (alpha*alpha))
subpath_opacity=alpha*alpha;
}
}
/*
Compute fill opacity.
*/
if (fill == MagickFalse)
return(0.0);
if (subpath_opacity >= 1.0)
return(1.0);
/*
Determine winding number.
*/
winding_number=0;
p=polygon_info->edges;
for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++)
{
if ((double) y <= p->bounds.y1)
break;
if (((double) y > p->bounds.y2) || ((double) x <= p->bounds.x1))
continue;
if ((double) x > p->bounds.x2)
{
winding_number+=p->direction ? 1 : -1;
continue;
}
i=(ssize_t) MagickMax((double) p->highwater,1.0);
for ( ; i < (ssize_t) (p->number_points-1); i++)
if ((double) y <= p->points[i].y)
break;
q=p->points+i-1;
if ((((q+1)->x-q->x)*(y-q->y)) <= (((q+1)->y-q->y)*(x-q->x)))
winding_number+=p->direction ? 1 : -1;
}
if (fill_rule != NonZeroRule)
{
if ((MagickAbsoluteValue(winding_number) & 0x01) != 0)
return(1.0);
}
else
if (MagickAbsoluteValue(winding_number) != 0)
return(1.0);
return(subpath_opacity);
}
static MagickBooleanType DrawPolygonPrimitive(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
{
CacheView
*image_view;
const char
*artifact;
double
mid;
ExceptionInfo
*exception;
MagickBooleanType
fill,
status;
PolygonInfo
**magick_restrict polygon_info;
register EdgeInfo
*p;
register ssize_t
i;
SegmentInfo
bounds;
ssize_t
start_y,
stop_y,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
assert(primitive_info != (PrimitiveInfo *) NULL);
if (primitive_info->coordinates <= 1)
return(MagickTrue);
/*
Compute bounding box.
*/
polygon_info=AcquirePolygonThreadSet(draw_info,primitive_info,
&image->exception);
if (polygon_info == (PolygonInfo **) NULL)
return(MagickFalse);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-polygon");
fill=(primitive_info->method == FillToBorderMethod) ||
(primitive_info->method == FloodfillMethod) ? MagickTrue : MagickFalse;
mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0;
bounds=polygon_info[0]->edges[0].bounds;
artifact=GetImageArtifact(image,"draw:render-bounding-rectangles");
if (IsStringTrue(artifact) != MagickFalse)
(void) DrawBoundingRectangles(image,draw_info,polygon_info[0]);
for (i=1; i < (ssize_t) polygon_info[0]->number_edges; i++)
{
p=polygon_info[0]->edges+i;
if (p->bounds.x1 < bounds.x1)
bounds.x1=p->bounds.x1;
if (p->bounds.y1 < bounds.y1)
bounds.y1=p->bounds.y1;
if (p->bounds.x2 > bounds.x2)
bounds.x2=p->bounds.x2;
if (p->bounds.y2 > bounds.y2)
bounds.y2=p->bounds.y2;
}
bounds.x1-=(mid+1.0);
bounds.y1-=(mid+1.0);
bounds.x2+=(mid+1.0);
bounds.y2+=(mid+1.0);
if ((bounds.x1 >= (double) image->columns) ||
(bounds.y1 >= (double) image->rows) ||
(bounds.x2 <= 0.0) || (bounds.y2 <= 0.0))
{
polygon_info=DestroyPolygonThreadSet(polygon_info);
return(MagickTrue); /* virtual polygon */
}
bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double) image->columns-1.0 ?
(double) image->columns-1.0 : bounds.x1;
bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double) image->rows-1.0 ?
(double) image->rows-1.0 : bounds.y1;
bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double) image->columns-1.0 ?
(double) image->columns-1.0 : bounds.x2;
bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double) image->rows-1.0 ?
(double) image->rows-1.0 : bounds.y2;
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
if ((primitive_info->coordinates == 1) ||
(polygon_info[0]->number_edges == 0))
{
/*
Draw point.
*/
start_y=CastDoubleToLong(ceil(bounds.y1-0.5));
stop_y=CastDoubleToLong(floor(bounds.y2+0.5));
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,stop_y-start_y+1,1)
#endif
for (y=start_y; y <= stop_y; y++)
{
MagickBooleanType
sync;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
ssize_t
start_x,
stop_x;
if (status == MagickFalse)
continue;
start_x=CastDoubleToLong(ceil(bounds.x1-0.5));
stop_x=CastDoubleToLong(floor(bounds.x2+0.5));
x=start_x;
q=GetCacheViewAuthenticPixels(image_view,x,y,(size_t) (stop_x-x+1),1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for ( ; x <= stop_x; x++)
{
if ((x == CastDoubleToLong(ceil(primitive_info->point.x-0.5))) &&
(y == CastDoubleToLong(ceil(primitive_info->point.y-0.5))))
(void) GetFillColor(draw_info,x-start_x,y-start_y,q);
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
polygon_info=DestroyPolygonThreadSet(polygon_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" end draw-polygon");
return(status);
}
/*
Draw polygon or line.
*/
start_y=CastDoubleToLong(ceil(bounds.y1-0.5));
stop_y=CastDoubleToLong(floor(bounds.y2+0.5));
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,stop_y-start_y+1,1)
#endif
for (y=start_y; y <= stop_y; y++)
{
const int
id = GetOpenMPThreadId();
double
fill_opacity,
stroke_opacity;
PixelPacket
fill_color,
stroke_color;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
ssize_t
start_x,
stop_x;
if (status == MagickFalse)
continue;
start_x=CastDoubleToLong(ceil(bounds.x1-0.5));
stop_x=CastDoubleToLong(floor(bounds.x2+0.5));
q=GetCacheViewAuthenticPixels(image_view,start_x,y,(size_t) (stop_x-start_x+
1),1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=start_x; x <= stop_x; x++)
{
/*
Fill and/or stroke.
*/
fill_opacity=GetOpacityPixel(polygon_info[id],mid,fill,
draw_info->fill_rule,x,y,&stroke_opacity);
if (draw_info->stroke_antialias == MagickFalse)
{
fill_opacity=fill_opacity > 0.5 ? 1.0 : 0.0;
stroke_opacity=stroke_opacity > 0.5 ? 1.0 : 0.0;
}
(void) GetFillColor(draw_info,x-start_x,y-start_y,&fill_color);
fill_opacity=(double) (QuantumRange-fill_opacity*(QuantumRange-
fill_color.opacity));
MagickCompositeOver(&fill_color,(MagickRealType) fill_opacity,q,
(MagickRealType) q->opacity,q);
(void) GetStrokeColor(draw_info,x-start_x,y-start_y,&stroke_color);
stroke_opacity=(double) (QuantumRange-stroke_opacity*(QuantumRange-
stroke_color.opacity));
MagickCompositeOver(&stroke_color,(MagickRealType) stroke_opacity,q,
(MagickRealType) q->opacity,q);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
polygon_info=DestroyPolygonThreadSet(polygon_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-polygon");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPrimitive() draws a primitive (line, rectangle, ellipse) on the image.
%
% The format of the DrawPrimitive method is:
%
% MagickBooleanType DrawPrimitive(Image *image,const DrawInfo *draw_info,
% PrimitiveInfo *primitive_info)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
*/
static void LogPrimitiveInfo(const PrimitiveInfo *primitive_info)
{
const char
*methods[] =
{
"point",
"replace",
"floodfill",
"filltoborder",
"reset",
"?"
};
PointInfo
p,
q,
point;
register ssize_t
i,
x;
ssize_t
coordinates,
y;
x=CastDoubleToLong(ceil(primitive_info->point.x-0.5));
y=CastDoubleToLong(ceil(primitive_info->point.y-0.5));
switch (primitive_info->primitive)
{
case PointPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"PointPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case ColorPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"ColorPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case MattePrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"MattePrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case TextPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"TextPrimitive %.20g,%.20g",(double) x,(double) y);
return;
}
case ImagePrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"ImagePrimitive %.20g,%.20g",(double) x,(double) y);
return;
}
default:
break;
}
coordinates=0;
p=primitive_info[0].point;
q.x=(-1.0);
q.y=(-1.0);
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
point=primitive_info[i].point;
if (coordinates <= 0)
{
coordinates=(ssize_t) primitive_info[i].coordinates;
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin open (%.20g)",(double) coordinates);
p=point;
}
point=primitive_info[i].point;
if ((fabs(q.x-point.x) >= MagickEpsilon) ||
(fabs(q.y-point.y) >= MagickEpsilon))
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %.20g: %.18g,%.18g",(double) coordinates,point.x,point.y);
else
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %.20g: %g %g (duplicate)",(double) coordinates,point.x,point.y);
q=point;
coordinates--;
if (coordinates > 0)
continue;
if ((fabs(p.x-point.x) >= MagickEpsilon) ||
(fabs(p.y-point.y) >= MagickEpsilon))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end last (%.20g)",
(double) coordinates);
else
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end open (%.20g)",
(double) coordinates);
}
}
MagickExport MagickBooleanType DrawPrimitive(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickStatusType
status;
register ssize_t
i,
x;
ssize_t
y;
if (image->debug != MagickFalse)
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin draw-primitive");
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" affine: %g,%g,%g,%g,%g,%g",draw_info->affine.sx,
draw_info->affine.rx,draw_info->affine.ry,draw_info->affine.sy,
draw_info->affine.tx,draw_info->affine.ty);
}
exception=(&image->exception);
status=MagickTrue;
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
((IsPixelGray(&draw_info->fill) == MagickFalse) ||
(IsPixelGray(&draw_info->stroke) == MagickFalse)))
status=SetImageColorspace(image,sRGBColorspace);
if (draw_info->compliance == SVGCompliance)
{
status&=SetImageClipMask(image,draw_info->clipping_mask);
status&=SetImageMask(image,draw_info->composite_mask);
}
x=CastDoubleToLong(ceil(primitive_info->point.x-0.5));
y=CastDoubleToLong(ceil(primitive_info->point.y-0.5));
image_view=AcquireAuthenticCacheView(image,exception);
switch (primitive_info->primitive)
{
case ColorPrimitive:
{
switch (primitive_info->method)
{
case PointMethod:
default:
{
PixelPacket
*q;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (PixelPacket *) NULL)
break;
(void) GetFillColor(draw_info,x,y,q);
status&=SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case ReplaceMethod:
{
PixelPacket
target;
status&=GetOneCacheViewVirtualPixel(image_view,x,y,&target,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsColorSimilar(image,q,&target) == MagickFalse)
{
q++;
continue;
}
(void) GetFillColor(draw_info,x,y,q);
q++;
}
status&=SyncCacheViewAuthenticPixels(image_view,exception);
if (status == MagickFalse)
break;
}
break;
}
case FloodfillMethod:
case FillToBorderMethod:
{
MagickPixelPacket
target;
status&=GetOneVirtualMagickPixel(image,x,y,&target,exception);
if (primitive_info->method == FillToBorderMethod)
{
target.red=(MagickRealType) draw_info->border_color.red;
target.green=(MagickRealType) draw_info->border_color.green;
target.blue=(MagickRealType) draw_info->border_color.blue;
}
status&=FloodfillPaintImage(image,DefaultChannels,draw_info,&target,x,
y,primitive_info->method == FloodfillMethod ? MagickFalse :
MagickTrue);
break;
}
case ResetMethod:
{
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*magick_restrict q;
register ssize_t
x;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
(void) GetFillColor(draw_info,x,y,q);
q++;
}
status&=SyncCacheViewAuthenticPixels(image_view,exception);
if (status == MagickFalse)
break;
}
break;
}
}
break;
}
case MattePrimitive:
{
if (image->matte == MagickFalse)
status&=SetImageAlphaChannel(image,OpaqueAlphaChannel);
switch (primitive_info->method)
{
case PointMethod:
default:
{
PixelPacket
pixel;
PixelPacket
*q;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (PixelPacket *) NULL)
break;
(void) GetFillColor(draw_info,x,y,&pixel);
SetPixelOpacity(q,pixel.opacity);
status&=SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case ReplaceMethod:
{
PixelPacket
pixel,
target;
status&=GetOneCacheViewVirtualPixel(image_view,x,y,&target,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*magick_restrict q;
register ssize_t
x;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsColorSimilar(image,q,&target) == MagickFalse)
{
q++;
continue;
}
(void) GetFillColor(draw_info,x,y,&pixel);
SetPixelOpacity(q,pixel.opacity);
q++;
}
status&=SyncCacheViewAuthenticPixels(image_view,exception);
if (status == MagickFalse)
break;
}
break;
}
case FloodfillMethod:
case FillToBorderMethod:
{
MagickPixelPacket
target;
status&=GetOneVirtualMagickPixel(image,x,y,&target,exception);
if (primitive_info->method == FillToBorderMethod)
{
target.red=(MagickRealType) draw_info->border_color.red;
target.green=(MagickRealType) draw_info->border_color.green;
target.blue=(MagickRealType) draw_info->border_color.blue;
}
status&=FloodfillPaintImage(image,OpacityChannel,draw_info,&target,x,
y,primitive_info->method == FloodfillMethod ? MagickFalse :
MagickTrue);
break;
}
case ResetMethod:
{
PixelPacket
pixel;
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*magick_restrict q;
register ssize_t
x;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
(void) GetFillColor(draw_info,x,y,&pixel);
SetPixelOpacity(q,pixel.opacity);
q++;
}
status&=SyncCacheViewAuthenticPixels(image_view,exception);
if (status == MagickFalse)
break;
}
break;
}
}
break;
}
case ImagePrimitive:
{
AffineMatrix
affine;
char
composite_geometry[MaxTextExtent];
Image
*composite_image,
*composite_images;
ImageInfo
*clone_info;
RectangleInfo
geometry;
ssize_t
x1,
y1;
if (primitive_info->text == (char *) NULL)
break;
clone_info=AcquireImageInfo();
composite_images=(Image *) NULL;
if (LocaleNCompare(primitive_info->text,"data:",5) == 0)
composite_images=ReadInlineImage(clone_info,primitive_info->text,
&image->exception);
else
if (*primitive_info->text != '\0')
{
(void) CopyMagickString(clone_info->filename,primitive_info->text,
MagickPathExtent);
status&=SetImageInfo(clone_info,0,exception);
if (clone_info->size != (char *) NULL)
clone_info->size=DestroyString(clone_info->size);
if (clone_info->extract != (char *) NULL)
clone_info->extract=DestroyString(clone_info->extract);
if ((LocaleNCompare(clone_info->magick,"http",4) == 0) ||
(LocaleCompare(clone_info->magick,"mpri") == 0))
(void) CopyMagickString(clone_info->filename,primitive_info->text,
MagickPathExtent);
if (*clone_info->filename != '\0')
composite_images=ReadImage(clone_info,exception);
}
clone_info=DestroyImageInfo(clone_info);
if (composite_images == (Image *) NULL)
{
status=0;
break;
}
composite_image=RemoveFirstImageFromList(&composite_images);
composite_images=DestroyImageList(composite_images);
(void) SetImageProgressMonitor(composite_image,(MagickProgressMonitor)
NULL,(void *) NULL);
x1=CastDoubleToLong(ceil(primitive_info[1].point.x-0.5));
y1=CastDoubleToLong(ceil(primitive_info[1].point.y-0.5));
if (((x1 != 0L) && (x1 != (ssize_t) composite_image->columns)) ||
((y1 != 0L) && (y1 != (ssize_t) composite_image->rows)))
{
char
geometry[MaxTextExtent];
/*
Resize image.
*/
(void) FormatLocaleString(geometry,MaxTextExtent,"%gx%g!",
primitive_info[1].point.x,primitive_info[1].point.y);
composite_image->filter=image->filter;
status&=TransformImage(&composite_image,(char *) NULL,geometry);
}
if (composite_image->matte == MagickFalse)
status&=SetImageAlphaChannel(composite_image,OpaqueAlphaChannel);
if (draw_info->opacity != OpaqueOpacity)
status&=SetImageOpacity(composite_image,draw_info->opacity);
SetGeometry(image,&geometry);
image->gravity=draw_info->gravity;
geometry.x=x;
geometry.y=y;
(void) FormatLocaleString(composite_geometry,MaxTextExtent,
"%.20gx%.20g%+.20g%+.20g",(double) composite_image->columns,(double)
composite_image->rows,(double) geometry.x,(double) geometry.y);
(void) ParseGravityGeometry(image,composite_geometry,&geometry,
&image->exception);
affine=draw_info->affine;
affine.tx=(double) geometry.x;
affine.ty=(double) geometry.y;
composite_image->interpolate=image->interpolate;
if ((draw_info->compose == OverCompositeOp) ||
(draw_info->compose == SrcOverCompositeOp))
status&=DrawAffineImage(image,composite_image,&affine);
else
status&=CompositeImage(image,draw_info->compose,composite_image,
geometry.x,geometry.y);
composite_image=DestroyImage(composite_image);
break;
}
case PointPrimitive:
{
PixelPacket
fill_color;
PixelPacket
*q;
if ((y < 0) || (y >= (ssize_t) image->rows))
break;
if ((x < 0) || (x >= (ssize_t) image->columns))
break;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (PixelPacket *) NULL)
break;
(void) GetFillColor(draw_info,x,y,&fill_color);
MagickCompositeOver(&fill_color,(MagickRealType) fill_color.opacity,q,
(MagickRealType) q->opacity,q);
status&=SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case TextPrimitive:
{
char
geometry[MaxTextExtent];
DrawInfo
*clone_info;
if (primitive_info->text == (char *) NULL)
break;
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->text,primitive_info->text);
(void) FormatLocaleString(geometry,MaxTextExtent,"%+f%+f",
primitive_info->point.x,primitive_info->point.y);
(void) CloneString(&clone_info->geometry,geometry);
status&=AnnotateImage(image,clone_info);
clone_info=DestroyDrawInfo(clone_info);
break;
}
default:
{
double
mid,
scale;
DrawInfo
*clone_info;
if (IsEventLogging() != MagickFalse)
LogPrimitiveInfo(primitive_info);
scale=ExpandAffine(&draw_info->affine);
if ((draw_info->dash_pattern != (double *) NULL) &&
(fabs(draw_info->dash_pattern[0]) >= MagickEpsilon) &&
(fabs(scale*draw_info->stroke_width) >= MagickEpsilon) &&
(draw_info->stroke.opacity != (Quantum) TransparentOpacity))
{
/*
Draw dash polygon.
*/
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->stroke_width=0.0;
clone_info->stroke.opacity=(Quantum) TransparentOpacity;
status&=DrawPolygonPrimitive(image,clone_info,primitive_info);
clone_info=DestroyDrawInfo(clone_info);
if (status != MagickFalse)
status&=DrawDashPolygon(draw_info,primitive_info,image);
break;
}
mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0;
if ((mid > 1.0) &&
((draw_info->stroke.opacity != (Quantum) TransparentOpacity) ||
(draw_info->stroke_pattern != (Image *) NULL)))
{
double
x,
y;
MagickBooleanType
closed_path;
/*
Draw strokes while respecting line cap/join attributes.
*/
closed_path=primitive_info[0].closed_subpath;
i=(ssize_t) primitive_info[0].coordinates;
x=fabs(primitive_info[i-1].point.x-primitive_info[0].point.x);
y=fabs(primitive_info[i-1].point.y-primitive_info[0].point.y);
if ((x < MagickEpsilon) && (y < MagickEpsilon))
closed_path=MagickTrue;
if ((((draw_info->linecap == RoundCap) ||
(closed_path != MagickFalse)) &&
(draw_info->linejoin == RoundJoin)) ||
(primitive_info[i].primitive != UndefinedPrimitive))
{
status&=DrawPolygonPrimitive(image,draw_info,primitive_info);
break;
}
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->stroke_width=0.0;
clone_info->stroke.opacity=(Quantum) TransparentOpacity;
status&=DrawPolygonPrimitive(image,clone_info,primitive_info);
clone_info=DestroyDrawInfo(clone_info);
if (status != MagickFalse)
status&=DrawStrokePolygon(image,draw_info,primitive_info);
break;
}
status&=DrawPolygonPrimitive(image,draw_info,primitive_info);
break;
}
}
image_view=DestroyCacheView(image_view);
if (draw_info->compliance == SVGCompliance)
{
status&=SetImageClipMask(image,(Image *) NULL);
status&=SetImageMask(image,(Image *) NULL);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-primitive");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w S t r o k e P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawStrokePolygon() draws a stroked polygon (line, rectangle, ellipse) on
% the image while respecting the line cap and join attributes.
%
% The format of the DrawStrokePolygon method is:
%
% MagickBooleanType DrawStrokePolygon(Image *image,
% const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
%
*/
static MagickBooleanType DrawRoundLinecap(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
{
PrimitiveInfo
linecap[5];
register ssize_t
i;
for (i=0; i < 4; i++)
linecap[i]=(*primitive_info);
linecap[0].coordinates=4;
linecap[1].point.x+=2.0*MagickEpsilon;
linecap[2].point.x+=2.0*MagickEpsilon;
linecap[2].point.y+=2.0*MagickEpsilon;
linecap[3].point.y+=2.0*MagickEpsilon;
linecap[4].primitive=UndefinedPrimitive;
return(DrawPolygonPrimitive(image,draw_info,linecap));
}
static MagickBooleanType DrawStrokePolygon(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
{
DrawInfo
*clone_info;
MagickBooleanType
closed_path;
MagickStatusType
status;
PrimitiveInfo
*stroke_polygon;
register const PrimitiveInfo
*p,
*q;
/*
Draw stroked polygon.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin draw-stroke-polygon");
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->fill=draw_info->stroke;
if (clone_info->fill_pattern != (Image *) NULL)
clone_info->fill_pattern=DestroyImage(clone_info->fill_pattern);
if (clone_info->stroke_pattern != (Image *) NULL)
clone_info->fill_pattern=CloneImage(clone_info->stroke_pattern,0,0,
MagickTrue,&clone_info->stroke_pattern->exception);
clone_info->stroke.opacity=(Quantum) TransparentOpacity;
clone_info->stroke_width=0.0;
clone_info->fill_rule=NonZeroRule;
status=MagickTrue;
for (p=primitive_info; p->primitive != UndefinedPrimitive; p+=p->coordinates)
{
if (p->coordinates == 1)
continue;
stroke_polygon=TraceStrokePolygon(draw_info,p,&image->exception);
if (stroke_polygon == (PrimitiveInfo *) NULL)
{
status=0;
break;
}
status&=DrawPolygonPrimitive(image,clone_info,stroke_polygon);
stroke_polygon=(PrimitiveInfo *) RelinquishMagickMemory(stroke_polygon);
if (status == 0)
break;
q=p+p->coordinates-1;
closed_path=p->closed_subpath;
if ((draw_info->linecap == RoundCap) && (closed_path == MagickFalse))
{
status&=DrawRoundLinecap(image,draw_info,p);
status&=DrawRoundLinecap(image,draw_info,q);
}
}
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" end draw-stroke-polygon");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A f f i n e M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAffineMatrix() returns an AffineMatrix initialized to the identity
% matrix.
%
% The format of the GetAffineMatrix method is:
%
% void GetAffineMatrix(AffineMatrix *affine_matrix)
%
% A description of each parameter follows:
%
% o affine_matrix: the affine matrix.
%
*/
MagickExport void GetAffineMatrix(AffineMatrix *affine_matrix)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(affine_matrix != (AffineMatrix *) NULL);
(void) memset(affine_matrix,0,sizeof(*affine_matrix));
affine_matrix->sx=1.0;
affine_matrix->sy=1.0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetDrawInfo() initializes draw_info to default values from image_info.
%
% The format of the GetDrawInfo method is:
%
% void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image_info: the image info..
%
% o draw_info: the draw info.
%
*/
MagickExport void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info)
{
char
*next_token;
const char
*option;
ExceptionInfo
*exception;
ImageInfo
*clone_info;
/*
Initialize draw attributes.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(draw_info != (DrawInfo *) NULL);
(void) memset(draw_info,0,sizeof(*draw_info));
clone_info=CloneImageInfo(image_info);
GetAffineMatrix(&draw_info->affine);
exception=AcquireExceptionInfo();
(void) QueryColorDatabase("#000F",&draw_info->fill,exception);
(void) QueryColorDatabase("#FFF0",&draw_info->stroke,exception);
draw_info->stroke_antialias=clone_info->antialias;
draw_info->stroke_width=1.0;
draw_info->fill_rule=EvenOddRule;
draw_info->opacity=OpaqueOpacity;
draw_info->fill_opacity=OpaqueOpacity;
draw_info->stroke_opacity=OpaqueOpacity;
draw_info->linecap=ButtCap;
draw_info->linejoin=MiterJoin;
draw_info->miterlimit=10;
draw_info->decorate=NoDecoration;
if (clone_info->font != (char *) NULL)
draw_info->font=AcquireString(clone_info->font);
if (clone_info->density != (char *) NULL)
draw_info->density=AcquireString(clone_info->density);
draw_info->text_antialias=clone_info->antialias;
draw_info->pointsize=12.0;
if (fabs(clone_info->pointsize) >= MagickEpsilon)
draw_info->pointsize=clone_info->pointsize;
draw_info->undercolor.opacity=(Quantum) TransparentOpacity;
draw_info->border_color=clone_info->border_color;
draw_info->compose=OverCompositeOp;
if (clone_info->server_name != (char *) NULL)
draw_info->server_name=AcquireString(clone_info->server_name);
draw_info->render=MagickTrue;
draw_info->clip_path=MagickFalse;
draw_info->debug=IsEventLogging();
option=GetImageOption(clone_info,"direction");
if (option != (const char *) NULL)
draw_info->direction=(DirectionType) ParseCommandOption(
MagickDirectionOptions,MagickFalse,option);
else
draw_info->direction=UndefinedDirection;
option=GetImageOption(clone_info,"encoding");
if (option != (const char *) NULL)
(void) CloneString(&draw_info->encoding,option);
option=GetImageOption(clone_info,"family");
if (option != (const char *) NULL)
(void) CloneString(&draw_info->family,option);
option=GetImageOption(clone_info,"fill");
if (option != (const char *) NULL)
(void) QueryColorDatabase(option,&draw_info->fill,exception);
option=GetImageOption(clone_info,"gravity");
if (option != (const char *) NULL)
draw_info->gravity=(GravityType) ParseCommandOption(MagickGravityOptions,
MagickFalse,option);
option=GetImageOption(clone_info,"interline-spacing");
if (option != (const char *) NULL)
draw_info->interline_spacing=GetDrawValue(option,&next_token);
option=GetImageOption(clone_info,"interword-spacing");
if (option != (const char *) NULL)
draw_info->interword_spacing=GetDrawValue(option,&next_token);
option=GetImageOption(clone_info,"kerning");
if (option != (const char *) NULL)
draw_info->kerning=GetDrawValue(option,&next_token);
option=GetImageOption(clone_info,"stroke");
if (option != (const char *) NULL)
(void) QueryColorDatabase(option,&draw_info->stroke,exception);
option=GetImageOption(clone_info,"strokewidth");
if (option != (const char *) NULL)
draw_info->stroke_width=GetDrawValue(option,&next_token);
option=GetImageOption(clone_info,"style");
if (option != (const char *) NULL)
draw_info->style=(StyleType) ParseCommandOption(MagickStyleOptions,
MagickFalse,option);
option=GetImageOption(clone_info,"undercolor");
if (option != (const char *) NULL)
(void) QueryColorDatabase(option,&draw_info->undercolor,exception);
option=GetImageOption(clone_info,"weight");
if (option != (const char *) NULL)
{
ssize_t
weight;
weight=ParseCommandOption(MagickWeightOptions,MagickFalse,option);
if (weight == -1)
weight=(ssize_t) StringToUnsignedLong(option);
draw_info->weight=(size_t) weight;
}
exception=DestroyExceptionInfo(exception);
draw_info->signature=MagickCoreSignature;
clone_info=DestroyImageInfo(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P e r m u t a t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Permutate() returns the permuation of the (n,k).
%
% The format of the Permutate method is:
%
% void Permutate(ssize_t n,ssize_t k)
%
% A description of each parameter follows:
%
% o n:
%
% o k:
%
%
*/
static inline double Permutate(const ssize_t n,const ssize_t k)
{
double
r;
register ssize_t
i;
r=1.0;
for (i=k+1; i <= n; i++)
r*=i;
for (i=1; i <= (n-k); i++)
r/=i;
return(r);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ T r a c e P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TracePrimitive is a collection of methods for generating graphic
% primitives such as arcs, ellipses, paths, etc.
%
*/
static MagickBooleanType TraceArc(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end,const PointInfo degrees)
{
PointInfo
center,
radius;
center.x=0.5*(end.x+start.x);
center.y=0.5*(end.y+start.y);
radius.x=fabs(center.x-start.x);
radius.y=fabs(center.y-start.y);
return(TraceEllipse(mvg_info,center,radius,degrees));
}
static MagickBooleanType TraceArcPath(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end,const PointInfo arc,const double angle,
const MagickBooleanType large_arc,const MagickBooleanType sweep)
{
double
alpha,
beta,
delta,
factor,
gamma,
theta;
MagickStatusType
status;
PointInfo
center,
points[3],
radii;
register double
cosine,
sine;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i;
size_t
arc_segments;
ssize_t
offset;
offset=mvg_info->offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=0;
if ((fabs(start.x-end.x) < MagickEpsilon) &&
(fabs(start.y-end.y) < MagickEpsilon))
return(TracePoint(primitive_info,end));
radii.x=fabs(arc.x);
radii.y=fabs(arc.y);
if ((radii.x < MagickEpsilon) || (radii.y < MagickEpsilon))
return(TraceLine(primitive_info,start,end));
cosine=cos(DegreesToRadians(fmod((double) angle,360.0)));
sine=sin(DegreesToRadians(fmod((double) angle,360.0)));
center.x=(double) (cosine*(end.x-start.x)/2+sine*(end.y-start.y)/2);
center.y=(double) (cosine*(end.y-start.y)/2-sine*(end.x-start.x)/2);
delta=(center.x*center.x)/(radii.x*radii.x)+(center.y*center.y)/
(radii.y*radii.y);
if (delta < MagickEpsilon)
return(TraceLine(primitive_info,start,end));
if (delta > 1.0)
{
radii.x*=sqrt((double) delta);
radii.y*=sqrt((double) delta);
}
points[0].x=(double) (cosine*start.x/radii.x+sine*start.y/radii.x);
points[0].y=(double) (cosine*start.y/radii.y-sine*start.x/radii.y);
points[1].x=(double) (cosine*end.x/radii.x+sine*end.y/radii.x);
points[1].y=(double) (cosine*end.y/radii.y-sine*end.x/radii.y);
alpha=points[1].x-points[0].x;
beta=points[1].y-points[0].y;
if (fabs(alpha*alpha+beta*beta) < MagickEpsilon)
return(TraceLine(primitive_info,start,end));
factor=PerceptibleReciprocal(alpha*alpha+beta*beta)-0.25;
if (factor <= 0.0)
factor=0.0;
else
{
factor=sqrt((double) factor);
if (sweep == large_arc)
factor=(-factor);
}
center.x=(double) ((points[0].x+points[1].x)/2-factor*beta);
center.y=(double) ((points[0].y+points[1].y)/2+factor*alpha);
alpha=atan2(points[0].y-center.y,points[0].x-center.x);
theta=atan2(points[1].y-center.y,points[1].x-center.x)-alpha;
if ((theta < 0.0) && (sweep != MagickFalse))
theta+=2.0*MagickPI;
else
if ((theta > 0.0) && (sweep == MagickFalse))
theta-=2.0*MagickPI;
arc_segments=(size_t) CastDoubleToLong(ceil(fabs((double) (theta/(0.5*
MagickPI+MagickEpsilon)))));
p=primitive_info;
status=MagickTrue;
for (i=0; i < (ssize_t) arc_segments; i++)
{
beta=0.5*((alpha+(i+1)*theta/arc_segments)-(alpha+i*theta/arc_segments));
gamma=(8.0/3.0)*sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))*
sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))/
sin(fmod((double) beta,DegreesToRadians(360.0)));
points[0].x=(double) (center.x+cos(fmod((double) (alpha+(double) i*theta/
arc_segments),DegreesToRadians(360.0)))-gamma*sin(fmod((double) (alpha+
(double) i*theta/arc_segments),DegreesToRadians(360.0))));
points[0].y=(double) (center.y+sin(fmod((double) (alpha+(double) i*theta/
arc_segments),DegreesToRadians(360.0)))+gamma*cos(fmod((double) (alpha+
(double) i*theta/arc_segments),DegreesToRadians(360.0))));
points[2].x=(double) (center.x+cos(fmod((double) (alpha+(double) (i+1)*
theta/arc_segments),DegreesToRadians(360.0))));
points[2].y=(double) (center.y+sin(fmod((double) (alpha+(double) (i+1)*
theta/arc_segments),DegreesToRadians(360.0))));
points[1].x=(double) (points[2].x+gamma*sin(fmod((double) (alpha+(double)
(i+1)*theta/arc_segments),DegreesToRadians(360.0))));
points[1].y=(double) (points[2].y-gamma*cos(fmod((double) (alpha+(double)
(i+1)*theta/arc_segments),DegreesToRadians(360.0))));
p->point.x=(p == primitive_info) ? start.x : (p-1)->point.x;
p->point.y=(p == primitive_info) ? start.y : (p-1)->point.y;
(p+1)->point.x=(double) (cosine*radii.x*points[0].x-sine*radii.y*
points[0].y);
(p+1)->point.y=(double) (sine*radii.x*points[0].x+cosine*radii.y*
points[0].y);
(p+2)->point.x=(double) (cosine*radii.x*points[1].x-sine*radii.y*
points[1].y);
(p+2)->point.y=(double) (sine*radii.x*points[1].x+cosine*radii.y*
points[1].y);
(p+3)->point.x=(double) (cosine*radii.x*points[2].x-sine*radii.y*
points[2].y);
(p+3)->point.y=(double) (sine*radii.x*points[2].x+cosine*radii.y*
points[2].y);
if (i == (ssize_t) (arc_segments-1))
(p+3)->point=end;
status&=TraceBezier(mvg_info,4);
if (status == 0)
break;
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
p+=p->coordinates;
}
if (status == 0)
return(MagickFalse);
mvg_info->offset=offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceBezier(MVGInfo *mvg_info,
const size_t number_coordinates)
{
double
alpha,
*coefficients,
weight;
PointInfo
end,
point,
*points;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i,
j;
size_t
control_points,
quantum;
/*
Allocate coefficients.
*/
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
quantum=number_coordinates;
for (i=0; i < (ssize_t) number_coordinates; i++)
{
for (j=i+1; j < (ssize_t) number_coordinates; j++)
{
alpha=fabs(primitive_info[j].point.x-primitive_info[i].point.x);
if (alpha > (double) SSIZE_MAX)
{
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
if (alpha > (double) quantum)
quantum=(size_t) alpha;
alpha=fabs(primitive_info[j].point.y-primitive_info[i].point.y);
if (alpha > (double) SSIZE_MAX)
{
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
if (alpha > (double) quantum)
quantum=(size_t) alpha;
}
}
coefficients=(double *) AcquireQuantumMemory(number_coordinates,
sizeof(*coefficients));
quantum=MagickMin(quantum/number_coordinates,BezierQuantum);
points=(PointInfo *) AcquireQuantumMemory(quantum,number_coordinates*
sizeof(*points));
if ((coefficients == (double *) NULL) || (points == (PointInfo *) NULL))
{
if (points != (PointInfo *) NULL)
points=(PointInfo *) RelinquishMagickMemory(points);
if (coefficients != (double *) NULL)
coefficients=(double *) RelinquishMagickMemory(coefficients);
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
control_points=quantum*number_coordinates;
if (CheckPrimitiveExtent(mvg_info,control_points+1) == MagickFalse)
{
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickFalse);
}
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
/*
Compute bezier points.
*/
end=primitive_info[number_coordinates-1].point;
for (i=0; i < (ssize_t) number_coordinates; i++)
coefficients[i]=Permutate((ssize_t) number_coordinates-1,i);
weight=0.0;
for (i=0; i < (ssize_t) control_points; i++)
{
p=primitive_info;
point.x=0.0;
point.y=0.0;
alpha=pow((double) (1.0-weight),(double) number_coordinates-1.0);
for (j=0; j < (ssize_t) number_coordinates; j++)
{
point.x+=alpha*coefficients[j]*p->point.x;
point.y+=alpha*coefficients[j]*p->point.y;
alpha*=weight/(1.0-weight);
p++;
}
points[i]=point;
weight+=1.0/control_points;
}
/*
Bezier curves are just short segmented polys.
*/
p=primitive_info;
for (i=0; i < (ssize_t) control_points; i++)
{
if (TracePoint(p,points[i]) == MagickFalse)
{
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickFalse);
}
p+=p->coordinates;
}
if (TracePoint(p,end) == MagickFalse)
{
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickFalse);
}
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickTrue);
}
static MagickBooleanType TraceCircle(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end)
{
double
alpha,
beta,
radius;
PointInfo
offset,
degrees;
alpha=end.x-start.x;
beta=end.y-start.y;
radius=hypot((double) alpha,(double) beta);
offset.x=(double) radius;
offset.y=(double) radius;
degrees.x=0.0;
degrees.y=360.0;
return(TraceEllipse(mvg_info,start,offset,degrees));
}
static MagickBooleanType TraceEllipse(MVGInfo *mvg_info,const PointInfo center,
const PointInfo radii,const PointInfo arc)
{
double
coordinates,
delta,
step,
x,
y;
PointInfo
angle,
point;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i;
/*
Ellipses are just short segmented polys.
*/
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=0;
if ((fabs(radii.x) < MagickEpsilon) || (fabs(radii.y) < MagickEpsilon))
return(MagickTrue);
delta=2.0*PerceptibleReciprocal(MagickMax(radii.x,radii.y));
step=MagickPI/8.0;
if ((delta >= 0.0) && (delta < (MagickPI/8.0)))
step=MagickPI/4.0/(MagickPI*PerceptibleReciprocal(delta)/2.0);
angle.x=DegreesToRadians(arc.x);
y=arc.y;
while (y < arc.x)
y+=360.0;
angle.y=DegreesToRadians(y);
coordinates=ceil((angle.y-angle.x)/step+1.0);
if (coordinates > (108.0*BezierQuantum))
{
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
if (CheckPrimitiveExtent(mvg_info,(size_t) coordinates) == MagickFalse)
return(MagickFalse);
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
for (p=primitive_info; angle.x < angle.y; angle.x+=step)
{
point.x=cos(fmod(angle.x,DegreesToRadians(360.0)))*radii.x+center.x;
point.y=sin(fmod(angle.x,DegreesToRadians(360.0)))*radii.y+center.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
}
point.x=cos(fmod(angle.y,DegreesToRadians(360.0)))*radii.x+center.x;
point.y=sin(fmod(angle.y,DegreesToRadians(360.0)))*radii.y+center.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
x=fabs(primitive_info[0].point.x-
primitive_info[primitive_info->coordinates-1].point.x);
y=fabs(primitive_info[0].point.y-
primitive_info[primitive_info->coordinates-1].point.y);
if ((x < MagickEpsilon) && (y < MagickEpsilon))
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceLine(PrimitiveInfo *primitive_info,
const PointInfo start,const PointInfo end)
{
if (TracePoint(primitive_info,start) == MagickFalse)
return(MagickFalse);
if ((fabs(start.x-end.x) < MagickEpsilon) &&
(fabs(start.y-end.y) < MagickEpsilon))
{
primitive_info->primitive=PointPrimitive;
primitive_info->coordinates=1;
return(MagickTrue);
}
if (TracePoint(primitive_info+1,end) == MagickFalse)
return(MagickFalse);
(primitive_info+1)->primitive=primitive_info->primitive;
primitive_info->coordinates=2;
primitive_info->closed_subpath=MagickFalse;
return(MagickTrue);
}
static ssize_t TracePath(Image *image,MVGInfo *mvg_info,const char *path)
{
char
*next_token,
token[MaxTextExtent];
const char
*p;
double
x,
y;
int
attribute,
last_attribute;
MagickStatusType
status;
PointInfo
end = {0.0, 0.0},
points[4] = { {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0} },
point = {0.0, 0.0},
start = {0.0, 0.0};
PrimitiveInfo
*primitive_info;
PrimitiveType
primitive_type;
register PrimitiveInfo
*q;
register ssize_t
i;
size_t
number_coordinates,
z_count;
ssize_t
subpath_offset;
subpath_offset=mvg_info->offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
status=MagickTrue;
attribute=0;
number_coordinates=0;
z_count=0;
primitive_type=primitive_info->primitive;
q=primitive_info;
for (p=path; *p != '\0'; )
{
if (status == MagickFalse)
break;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == '\0')
break;
last_attribute=attribute;
attribute=(int) (*p++);
switch (attribute)
{
case 'a':
case 'A':
{
double
angle = 0.0;
MagickBooleanType
large_arc = MagickFalse,
sweep = MagickFalse;
PointInfo
arc = {0.0, 0.0};
/*
Elliptical arc.
*/
do
{
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
arc.x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
arc.y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
angle=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
large_arc=StringToLong(token) != 0 ? MagickTrue : MagickFalse;
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
sweep=StringToLong(token) != 0 ? MagickTrue : MagickFalse;
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
end.x=(double) (attribute == (int) 'A' ? x : point.x+x);
end.y=(double) (attribute == (int) 'A' ? y : point.y+y);
status&=TraceArcPath(mvg_info,point,end,arc,angle,large_arc,sweep);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'c':
case 'C':
{
/*
Cubic Bézier curve.
*/
do
{
points[0]=point;
for (i=1; i < 4; i++)
{
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
end.x=(double) (attribute == (int) 'C' ? x : point.x+x);
end.y=(double) (attribute == (int) 'C' ? y : point.y+y);
points[i]=end;
}
for (i=0; i < 4; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,4) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'H':
case 'h':
{
do
{
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
point.x=(double) (attribute == (int) 'H' ? x: point.x+x);
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(-1);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'l':
case 'L':
{
/*
Line to.
*/
do
{
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
point.x=(double) (attribute == (int) 'L' ? x : point.x+x);
point.y=(double) (attribute == (int) 'L' ? y : point.y+y);
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(-1);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'M':
case 'm':
{
/*
Move to.
*/
if (mvg_info->offset != subpath_offset)
{
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
number_coordinates+=primitive_info->coordinates;
primitive_info=q;
subpath_offset=mvg_info->offset;
}
i=0;
do
{
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
point.x=(double) (attribute == (int) 'M' ? x : point.x+x);
point.y=(double) (attribute == (int) 'M' ? y : point.y+y);
if (i == 0)
start=point;
i++;
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(-1);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'q':
case 'Q':
{
/*
Quadratic Bézier curve.
*/
do
{
points[0]=point;
for (i=1; i < 3; i++)
{
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
if (*p == ',')
p++;
end.x=(double) (attribute == (int) 'Q' ? x : point.x+x);
end.y=(double) (attribute == (int) 'Q' ? y : point.y+y);
points[i]=end;
}
for (i=0; i < 3; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,3) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 's':
case 'S':
{
/*
Cubic Bézier curve.
*/
do
{
points[0]=points[3];
points[1].x=2.0*points[3].x-points[2].x;
points[1].y=2.0*points[3].y-points[2].y;
for (i=2; i < 4; i++)
{
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
if (*p == ',')
p++;
end.x=(double) (attribute == (int) 'S' ? x : point.x+x);
end.y=(double) (attribute == (int) 'S' ? y : point.y+y);
points[i]=end;
}
if (strchr("CcSs",last_attribute) == (char *) NULL)
{
points[0]=point;
points[1]=point;
}
for (i=0; i < 4; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,4) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
last_attribute=attribute;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 't':
case 'T':
{
/*
Quadratic Bézier curve.
*/
do
{
points[0]=points[2];
points[1].x=2.0*points[2].x-points[1].x;
points[1].y=2.0*points[2].y-points[1].y;
for (i=2; i < 3; i++)
{
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
end.x=(double) (attribute == (int) 'T' ? x : point.x+x);
end.y=(double) (attribute == (int) 'T' ? y : point.y+y);
points[i]=end;
}
if (status == MagickFalse)
break;
if (strchr("QqTt",last_attribute) == (char *) NULL)
{
points[0]=point;
points[1]=point;
}
for (i=0; i < 3; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,3) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
last_attribute=attribute;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'v':
case 'V':
{
/*
Line to.
*/
do
{
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
point.y=(double) (attribute == (int) 'V' ? y : point.y+y);
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(-1);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'z':
case 'Z':
{
/*
Close path.
*/
point=start;
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(-1);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
primitive_info->closed_subpath=MagickTrue;
number_coordinates+=primitive_info->coordinates;
primitive_info=q;
subpath_offset=mvg_info->offset;
z_count++;
break;
}
default:
{
ThrowPointExpectedException(image,token);
break;
}
}
}
if (status == MagickFalse)
return(-1);
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
number_coordinates+=primitive_info->coordinates;
for (i=0; i < (ssize_t) number_coordinates; i++)
{
q--;
q->primitive=primitive_type;
if (z_count > 1)
q->method=FillToBorderMethod;
}
q=primitive_info;
return((ssize_t) number_coordinates);
}
static MagickBooleanType TraceRectangle(PrimitiveInfo *primitive_info,
const PointInfo start,const PointInfo end)
{
PointInfo
point;
register PrimitiveInfo
*p;
register ssize_t
i;
p=primitive_info;
if (TracePoint(p,start) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
point.x=start.x;
point.y=end.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
if (TracePoint(p,end) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
point.x=end.x;
point.y=start.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
if (TracePoint(p,start) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceRoundRectangle(MVGInfo *mvg_info,
const PointInfo start,const PointInfo end,PointInfo arc)
{
PointInfo
degrees,
point,
segment;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i;
ssize_t
offset;
offset=mvg_info->offset;
segment.x=fabs(end.x-start.x);
segment.y=fabs(end.y-start.y);
if ((segment.x < MagickEpsilon) || (segment.y < MagickEpsilon))
{
(*mvg_info->primitive_info+mvg_info->offset)->coordinates=0;
return(MagickTrue);
}
if (arc.x > (0.5*segment.x))
arc.x=0.5*segment.x;
if (arc.y > (0.5*segment.y))
arc.y=0.5*segment.y;
point.x=start.x+segment.x-arc.x;
point.y=start.y+arc.y;
degrees.x=270.0;
degrees.y=360.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+segment.x-arc.x;
point.y=start.y+segment.y-arc.y;
degrees.x=0.0;
degrees.y=90.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+arc.x;
point.y=start.y+segment.y-arc.y;
degrees.x=90.0;
degrees.y=180.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+arc.x;
point.y=start.y+arc.y;
degrees.x=180.0;
degrees.y=270.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(p,(*mvg_info->primitive_info+offset)->point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
mvg_info->offset=offset;
primitive_info=(*mvg_info->primitive_info)+offset;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceSquareLinecap(PrimitiveInfo *primitive_info,
const size_t number_vertices,const double offset)
{
double
distance;
register double
dx,
dy;
register ssize_t
i;
ssize_t
j;
dx=0.0;
dy=0.0;
for (i=1; i < (ssize_t) number_vertices; i++)
{
dx=primitive_info[0].point.x-primitive_info[i].point.x;
dy=primitive_info[0].point.y-primitive_info[i].point.y;
if ((fabs((double) dx) >= MagickEpsilon) ||
(fabs((double) dy) >= MagickEpsilon))
break;
}
if (i == (ssize_t) number_vertices)
i=(ssize_t) number_vertices-1L;
distance=hypot((double) dx,(double) dy);
primitive_info[0].point.x=(double) (primitive_info[i].point.x+
dx*(distance+offset)/distance);
primitive_info[0].point.y=(double) (primitive_info[i].point.y+
dy*(distance+offset)/distance);
for (j=(ssize_t) number_vertices-2; j >= 0; j--)
{
dx=primitive_info[number_vertices-1].point.x-primitive_info[j].point.x;
dy=primitive_info[number_vertices-1].point.y-primitive_info[j].point.y;
if ((fabs((double) dx) >= MagickEpsilon) ||
(fabs((double) dy) >= MagickEpsilon))
break;
}
distance=hypot((double) dx,(double) dy);
primitive_info[number_vertices-1].point.x=(double) (primitive_info[j].point.x+
dx*(distance+offset)/distance);
primitive_info[number_vertices-1].point.y=(double) (primitive_info[j].point.y+
dy*(distance+offset)/distance);
return(MagickTrue);
}
static PrimitiveInfo *TraceStrokePolygon(const DrawInfo *draw_info,
const PrimitiveInfo *primitive_info,ExceptionInfo *exception)
{
#define MaxStrokePad (6*BezierQuantum+360)
#define CheckPathExtent(pad_p,pad_q) \
{ \
if ((pad_p) > MaxBezierCoordinates) \
stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); \
else \
if ((ssize_t) (p+(pad_p)) >= (ssize_t) extent_p) \
{ \
if (~extent_p < (pad_p)) \
stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); \
else \
{ \
extent_p+=(pad_p); \
stroke_p=(PointInfo *) ResizeQuantumMemory(stroke_p,extent_p+ \
MaxStrokePad,sizeof(*stroke_p)); \
} \
} \
if ((pad_q) > MaxBezierCoordinates) \
stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); \
else \
if ((ssize_t) (q+(pad_q)) >= (ssize_t) extent_q) \
{ \
if (~extent_q < (pad_q)) \
stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); \
else \
{ \
extent_q+=(pad_q); \
stroke_q=(PointInfo *) ResizeQuantumMemory(stroke_q,extent_q+ \
MaxStrokePad,sizeof(*stroke_q)); \
} \
} \
if ((stroke_p == (PointInfo *) NULL) || (stroke_q == (PointInfo *) NULL)) \
{ \
if (stroke_p != (PointInfo *) NULL) \
stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); \
if (stroke_q != (PointInfo *) NULL) \
stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); \
polygon_primitive=(PrimitiveInfo *) \
RelinquishMagickMemory(polygon_primitive); \
(void) ThrowMagickException(exception,GetMagickModule(), \
ResourceLimitError,"MemoryAllocationFailed","`%s'",""); \
return((PrimitiveInfo *) NULL); \
} \
}
typedef struct _StrokeSegment
{
double
p,
q;
} StrokeSegment;
double
delta_theta,
dot_product,
mid,
miterlimit;
MagickBooleanType
closed_path;
PointInfo
box_p[5],
box_q[5],
center,
offset,
*stroke_p,
*stroke_q;
PrimitiveInfo
*polygon_primitive,
*stroke_polygon;
register ssize_t
i;
size_t
arc_segments,
extent_p,
extent_q,
number_vertices;
ssize_t
j,
n,
p,
q;
StrokeSegment
dx = {0.0, 0.0},
dy = {0.0, 0.0},
inverse_slope = {0.0, 0.0},
slope = {0.0, 0.0},
theta = {0.0, 0.0};
/*
Allocate paths.
*/
number_vertices=primitive_info->coordinates;
polygon_primitive=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
number_vertices+2UL,sizeof(*polygon_primitive));
if (polygon_primitive == (PrimitiveInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return((PrimitiveInfo *) NULL);
}
(void) memcpy(polygon_primitive,primitive_info,(size_t) number_vertices*
sizeof(*polygon_primitive));
offset.x=primitive_info[number_vertices-1].point.x-primitive_info[0].point.x;
offset.y=primitive_info[number_vertices-1].point.y-primitive_info[0].point.y;
closed_path=(fabs(offset.x) < MagickEpsilon) &&
(fabs(offset.y) < MagickEpsilon) ? MagickTrue : MagickFalse;
if (((draw_info->linejoin == RoundJoin) ||
(draw_info->linejoin == MiterJoin)) && (closed_path != MagickFalse))
{
polygon_primitive[number_vertices]=primitive_info[1];
number_vertices++;
}
polygon_primitive[number_vertices].primitive=UndefinedPrimitive;
/*
Compute the slope for the first line segment, p.
*/
dx.p=0.0;
dy.p=0.0;
for (n=1; n < (ssize_t) number_vertices; n++)
{
dx.p=polygon_primitive[n].point.x-polygon_primitive[0].point.x;
dy.p=polygon_primitive[n].point.y-polygon_primitive[0].point.y;
if ((fabs(dx.p) >= MagickEpsilon) || (fabs(dy.p) >= MagickEpsilon))
break;
}
if (n == (ssize_t) number_vertices)
{
if ((draw_info->linecap != RoundCap) || (closed_path != MagickFalse))
{
/*
Zero length subpath.
*/
stroke_polygon=(PrimitiveInfo *) AcquireCriticalMemory(
sizeof(*stroke_polygon));
stroke_polygon[0]=polygon_primitive[0];
stroke_polygon[0].coordinates=0;
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(
polygon_primitive);
return(stroke_polygon);
}
n=(ssize_t) number_vertices-1L;
}
extent_p=2*number_vertices;
extent_q=2*number_vertices;
stroke_p=(PointInfo *) AcquireQuantumMemory((size_t) extent_p+MaxStrokePad,
sizeof(*stroke_p));
stroke_q=(PointInfo *) AcquireQuantumMemory((size_t) extent_q+MaxStrokePad,
sizeof(*stroke_q));
if ((stroke_p == (PointInfo *) NULL) || (stroke_q == (PointInfo *) NULL))
{
if (stroke_p != (PointInfo *) NULL)
stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p);
if (stroke_q != (PointInfo *) NULL)
stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q);
polygon_primitive=(PrimitiveInfo *)
RelinquishMagickMemory(polygon_primitive);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return((PrimitiveInfo *) NULL);
}
slope.p=0.0;
inverse_slope.p=0.0;
if (fabs(dx.p) < MagickEpsilon)
{
if (dx.p >= 0.0)
slope.p=dy.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
slope.p=dy.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
if (fabs(dy.p) < MagickEpsilon)
{
if (dy.p >= 0.0)
inverse_slope.p=dx.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
inverse_slope.p=dx.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
{
slope.p=dy.p/dx.p;
inverse_slope.p=(-1.0/slope.p);
}
mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0;
miterlimit=(double) (draw_info->miterlimit*draw_info->miterlimit*mid*mid);
if ((draw_info->linecap == SquareCap) && (closed_path == MagickFalse))
(void) TraceSquareLinecap(polygon_primitive,number_vertices,mid);
offset.x=sqrt((double) (mid*mid/(inverse_slope.p*inverse_slope.p+1.0)));
offset.y=(double) (offset.x*inverse_slope.p);
if ((dy.p*offset.x-dx.p*offset.y) > 0.0)
{
box_p[0].x=polygon_primitive[0].point.x-offset.x;
box_p[0].y=polygon_primitive[0].point.y-offset.x*inverse_slope.p;
box_p[1].x=polygon_primitive[n].point.x-offset.x;
box_p[1].y=polygon_primitive[n].point.y-offset.x*inverse_slope.p;
box_q[0].x=polygon_primitive[0].point.x+offset.x;
box_q[0].y=polygon_primitive[0].point.y+offset.x*inverse_slope.p;
box_q[1].x=polygon_primitive[n].point.x+offset.x;
box_q[1].y=polygon_primitive[n].point.y+offset.x*inverse_slope.p;
}
else
{
box_p[0].x=polygon_primitive[0].point.x+offset.x;
box_p[0].y=polygon_primitive[0].point.y+offset.y;
box_p[1].x=polygon_primitive[n].point.x+offset.x;
box_p[1].y=polygon_primitive[n].point.y+offset.y;
box_q[0].x=polygon_primitive[0].point.x-offset.x;
box_q[0].y=polygon_primitive[0].point.y-offset.y;
box_q[1].x=polygon_primitive[n].point.x-offset.x;
box_q[1].y=polygon_primitive[n].point.y-offset.y;
}
/*
Create strokes for the line join attribute: bevel, miter, round.
*/
p=0;
q=0;
stroke_q[p++]=box_q[0];
stroke_p[q++]=box_p[0];
for (i=(ssize_t) n+1; i < (ssize_t) number_vertices; i++)
{
/*
Compute the slope for this line segment, q.
*/
dx.q=polygon_primitive[i].point.x-polygon_primitive[n].point.x;
dy.q=polygon_primitive[i].point.y-polygon_primitive[n].point.y;
dot_product=dx.q*dx.q+dy.q*dy.q;
if (dot_product < 0.25)
continue;
slope.q=0.0;
inverse_slope.q=0.0;
if (fabs(dx.q) < MagickEpsilon)
{
if (dx.q >= 0.0)
slope.q=dy.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
slope.q=dy.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
if (fabs(dy.q) < MagickEpsilon)
{
if (dy.q >= 0.0)
inverse_slope.q=dx.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
inverse_slope.q=dx.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
{
slope.q=dy.q/dx.q;
inverse_slope.q=(-1.0/slope.q);
}
offset.x=sqrt((double) (mid*mid/(inverse_slope.q*inverse_slope.q+1.0)));
offset.y=(double) (offset.x*inverse_slope.q);
dot_product=dy.q*offset.x-dx.q*offset.y;
if (dot_product > 0.0)
{
box_p[2].x=polygon_primitive[n].point.x-offset.x;
box_p[2].y=polygon_primitive[n].point.y-offset.y;
box_p[3].x=polygon_primitive[i].point.x-offset.x;
box_p[3].y=polygon_primitive[i].point.y-offset.y;
box_q[2].x=polygon_primitive[n].point.x+offset.x;
box_q[2].y=polygon_primitive[n].point.y+offset.y;
box_q[3].x=polygon_primitive[i].point.x+offset.x;
box_q[3].y=polygon_primitive[i].point.y+offset.y;
}
else
{
box_p[2].x=polygon_primitive[n].point.x+offset.x;
box_p[2].y=polygon_primitive[n].point.y+offset.y;
box_p[3].x=polygon_primitive[i].point.x+offset.x;
box_p[3].y=polygon_primitive[i].point.y+offset.y;
box_q[2].x=polygon_primitive[n].point.x-offset.x;
box_q[2].y=polygon_primitive[n].point.y-offset.y;
box_q[3].x=polygon_primitive[i].point.x-offset.x;
box_q[3].y=polygon_primitive[i].point.y-offset.y;
}
if (fabs((double) (slope.p-slope.q)) < MagickEpsilon)
{
box_p[4]=box_p[1];
box_q[4]=box_q[1];
}
else
{
box_p[4].x=(double) ((slope.p*box_p[0].x-box_p[0].y-slope.q*box_p[3].x+
box_p[3].y)/(slope.p-slope.q));
box_p[4].y=(double) (slope.p*(box_p[4].x-box_p[0].x)+box_p[0].y);
box_q[4].x=(double) ((slope.p*box_q[0].x-box_q[0].y-slope.q*box_q[3].x+
box_q[3].y)/(slope.p-slope.q));
box_q[4].y=(double) (slope.p*(box_q[4].x-box_q[0].x)+box_q[0].y);
}
CheckPathExtent(MaxStrokePad,MaxStrokePad);
dot_product=dx.q*dy.p-dx.p*dy.q;
if (dot_product <= 0.0)
switch (draw_info->linejoin)
{
case BevelJoin:
{
stroke_q[q++]=box_q[1];
stroke_q[q++]=box_q[2];
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
stroke_p[p++]=box_p[4];
else
{
stroke_p[p++]=box_p[1];
stroke_p[p++]=box_p[2];
}
break;
}
case MiterJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
{
stroke_q[q++]=box_q[4];
stroke_p[p++]=box_p[4];
}
else
{
stroke_q[q++]=box_q[1];
stroke_q[q++]=box_q[2];
stroke_p[p++]=box_p[1];
stroke_p[p++]=box_p[2];
}
break;
}
case RoundJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
stroke_p[p++]=box_p[4];
else
{
stroke_p[p++]=box_p[1];
stroke_p[p++]=box_p[2];
}
center=polygon_primitive[n].point;
theta.p=atan2(box_q[1].y-center.y,box_q[1].x-center.x);
theta.q=atan2(box_q[2].y-center.y,box_q[2].x-center.x);
if (theta.q < theta.p)
theta.q+=2.0*MagickPI;
arc_segments=(size_t) CastDoubleToLong(ceil((double) ((theta.q-
theta.p)/(2.0*sqrt((double) (1.0/mid))))));
CheckPathExtent(MaxStrokePad,arc_segments+MaxStrokePad);
stroke_q[q].x=box_q[1].x;
stroke_q[q].y=box_q[1].y;
q++;
for (j=1; j < (ssize_t) arc_segments; j++)
{
delta_theta=(double) (j*(theta.q-theta.p)/arc_segments);
stroke_q[q].x=(double) (center.x+mid*cos(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
stroke_q[q].y=(double) (center.y+mid*sin(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
q++;
}
stroke_q[q++]=box_q[2];
break;
}
default:
break;
}
else
switch (draw_info->linejoin)
{
case BevelJoin:
{
stroke_p[p++]=box_p[1];
stroke_p[p++]=box_p[2];
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
stroke_q[q++]=box_q[4];
else
{
stroke_q[q++]=box_q[1];
stroke_q[q++]=box_q[2];
}
break;
}
case MiterJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
{
stroke_q[q++]=box_q[4];
stroke_p[p++]=box_p[4];
}
else
{
stroke_q[q++]=box_q[1];
stroke_q[q++]=box_q[2];
stroke_p[p++]=box_p[1];
stroke_p[p++]=box_p[2];
}
break;
}
case RoundJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
stroke_q[q++]=box_q[4];
else
{
stroke_q[q++]=box_q[1];
stroke_q[q++]=box_q[2];
}
center=polygon_primitive[n].point;
theta.p=atan2(box_p[1].y-center.y,box_p[1].x-center.x);
theta.q=atan2(box_p[2].y-center.y,box_p[2].x-center.x);
if (theta.p < theta.q)
theta.p+=2.0*MagickPI;
arc_segments=(size_t) CastDoubleToLong(ceil((double) ((theta.p-
theta.q)/(2.0*sqrt((double) (1.0/mid))))));
CheckPathExtent(arc_segments+MaxStrokePad,MaxStrokePad);
stroke_p[p++]=box_p[1];
for (j=1; j < (ssize_t) arc_segments; j++)
{
delta_theta=(double) (j*(theta.q-theta.p)/arc_segments);
stroke_p[p].x=(double) (center.x+mid*cos(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
stroke_p[p].y=(double) (center.y+mid*sin(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
p++;
}
stroke_p[p++]=box_p[2];
break;
}
default:
break;
}
slope.p=slope.q;
inverse_slope.p=inverse_slope.q;
box_p[0]=box_p[2];
box_p[1]=box_p[3];
box_q[0]=box_q[2];
box_q[1]=box_q[3];
dx.p=dx.q;
dy.p=dy.q;
n=i;
}
stroke_p[p++]=box_p[1];
stroke_q[q++]=box_q[1];
/*
Trace stroked polygon.
*/
stroke_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
(p+q+2UL*closed_path+2UL),sizeof(*stroke_polygon));
if (stroke_polygon == (PrimitiveInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p);
stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q);
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(
polygon_primitive);
return(stroke_polygon);
}
for (i=0; i < (ssize_t) p; i++)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_p[i];
}
if (closed_path != MagickFalse)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[0].point;
i++;
}
for ( ; i < (ssize_t) (p+q+closed_path); i++)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_q[p+q+closed_path-(i+1)];
}
if (closed_path != MagickFalse)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[p+closed_path].point;
i++;
}
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[0].point;
i++;
stroke_polygon[i].primitive=UndefinedPrimitive;
stroke_polygon[0].coordinates=(size_t) (p+q+2*closed_path+1);
stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p);
stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q);
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive);
return(stroke_polygon);
}
|
AnySCF.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <math.h>
#include <omp.h>
#include "TinyDFT.h"
void save_mat_to_file(
const char *env_str, const char *format_str, const int nbf,
const char *mol_name, const char *bas_name, const double *mat
)
{
if (env_str == NULL) return;
int need_save = atoi(env_str);
if (need_save != 1) return;
char ouf_name[256];
sprintf(ouf_name, format_str, nbf, mol_name, bas_name);
FILE *ouf = fopen(ouf_name, "wb");
fwrite(mat, sizeof(double), nbf * nbf, ouf);
fclose(ouf);
printf("Binary file %s output finished\n", ouf_name);
}
void TinyDFT_SCF(TinyDFT_p TinyDFT, const int max_iter, int J_op, int K_op)
{
// Start SCF iterations
printf("Self-Consistent Field iteration started...\n");
printf("Nuclear repulsion energy = %.10lf\n", TinyDFT->E_nuc_rep);
TinyDFT->iter = 0;
TinyDFT->max_iter = max_iter;
double E_prev, E_curr, E_delta = 19241112.0;
int nbf = TinyDFT->nbf;
int mat_size = TinyDFT->mat_size;
int xf_id = TinyDFT->xf_id;
int xf_family = TinyDFT->xf_family;
double *Hcore_mat = TinyDFT->Hcore_mat;
double *S_mat = TinyDFT->S_mat;
double *X_mat = TinyDFT->X_mat;
double *J_mat = TinyDFT->J_mat;
double *K_mat = TinyDFT->K_mat;
double *XC_mat = TinyDFT->XC_mat;
double *F_mat = TinyDFT->F_mat;
double *Cocc_mat = TinyDFT->Cocc_mat;
double *D_mat = TinyDFT->D_mat;
double *E_nuc_rep = &TinyDFT->E_nuc_rep;
double *E_one_elec = &TinyDFT->E_one_elec;
double *E_two_elec = &TinyDFT->E_two_elec;
double *E_HF_exchange = &TinyDFT->E_HF_exchange;
double *E_DFT_XC = &TinyDFT->E_DFT_XC;
int J_direct = 0, K_direct = 0, JK_direct = 0;
int J_denfit = 0, K_denfit = 0, K_xc = 0, xc_hybrid = 0;
if (xf_family == FAMILY_HYB_GGA) xc_hybrid = 1;
if (J_op == 0) J_direct = 1;
if (J_op == 1) J_denfit = 1;
if (K_op == 0) K_direct = 1;
if (K_op == 1) K_denfit = 1;
if (K_op == 2) K_xc = 1;
if (xc_hybrid == 1)
{
if (J_direct == 1) K_direct = 1;
if (J_denfit == 1) K_denfit = 1;
}
JK_direct = J_direct & K_direct;
double HF_x_coef;
if (xf_id == HYB_GGA_XC_B3LYP || xf_id == HYB_GGA_XC_B3LYP5) HF_x_coef = 0.2;
while ((TinyDFT->iter < TinyDFT->max_iter) && (fabs(E_delta) >= TinyDFT->E_tol))
{
printf("--------------- Iteration %d ---------------\n", TinyDFT->iter);
double st0, et0, st1, et1, st2;
double J_time = 0, K_time = 0, XC_time = 0;
st0 = get_wtime_sec();
// Build the Fock matrix
if (JK_direct == 1)
{
st1 = get_wtime_sec();
TinyDFT_build_JKmat(TinyDFT, D_mat, J_mat, K_mat);
st2 = get_wtime_sec();
J_time = 0.5 * (st2 - st1);
K_time = 0.5 * (st2 - st1);
}
if (JK_direct == 0 && J_direct == 1)
{
st1 = get_wtime_sec();
TinyDFT_build_JKmat(TinyDFT, D_mat, J_mat, NULL);
st2 = get_wtime_sec();
J_time = st2 - st1;
}
if (J_denfit == 1)
{
st1 = get_wtime_sec();
TinyDFT_build_JKmat_DF(TinyDFT, D_mat, Cocc_mat, J_mat, NULL);
st2 = get_wtime_sec();
J_time = st2 - st1;
}
if (JK_direct == 0 && K_direct == 1)
{
st1 = get_wtime_sec();
TinyDFT_build_JKmat(TinyDFT, D_mat, NULL, K_mat);
st2 = get_wtime_sec();
K_time = st2 - st1;
}
if (K_denfit == 1)
{
st1 = get_wtime_sec();
TinyDFT_build_JKmat_DF(TinyDFT, D_mat, Cocc_mat, NULL, K_mat);
st2 = get_wtime_sec();
K_time = st2 - st1;
}
if (K_xc == 1)
{
st1 = get_wtime_sec();
*E_DFT_XC = TinyDFT_build_XC_mat(TinyDFT, D_mat, XC_mat);
st2 = get_wtime_sec();
XC_time = st2 - st1;
}
if (K_op == 0 || K_op == 1)
{
#pragma omp parallel for simd
for (int i = 0; i < mat_size; i++)
F_mat[i] = Hcore_mat[i] + 2 * J_mat[i] - K_mat[i];
}
if (K_op == 2 && xc_hybrid == 0)
{
#pragma omp parallel for simd
for (int i = 0; i < mat_size; i++)
F_mat[i] = Hcore_mat[i] + 2 * J_mat[i] + XC_mat[i];
}
if (K_op == 2 && xc_hybrid == 1)
{
#pragma omp parallel for simd
for (int i = 0; i < mat_size; i++)
F_mat[i] = Hcore_mat[i] + 2 * J_mat[i] + XC_mat[i] - HF_x_coef * K_mat[i];
}
et1 = get_wtime_sec();
printf(
"* Build Fock matrix : %.3lf (s), J, K, XC = %.3lf, %.3lf, %.3lf (s)\n",
et1 - st0, J_time, K_time, XC_time
);
// Calculate new system energy
st1 = get_wtime_sec();
if (K_direct == 1 || K_denfit == 1)
{
TinyDFT_calc_HF_energy(
mat_size, D_mat, Hcore_mat, J_mat, K_mat,
E_one_elec, E_two_elec, E_HF_exchange
);
} else {
TinyDFT_calc_HF_energy(
mat_size, D_mat, Hcore_mat, J_mat, NULL,
E_one_elec, E_two_elec, NULL
);
}
E_curr = (*E_nuc_rep) + (*E_one_elec) + (*E_two_elec);
if (K_op == 0 || K_op == 1) E_curr += (*E_HF_exchange);
if (K_op == 2) E_curr += (*E_DFT_XC);
if (K_op == 2 && xc_hybrid == 1) E_curr += HF_x_coef * (*E_HF_exchange);
et1 = get_wtime_sec();
printf("* Calculate energy : %.3lf (s)\n", et1 - st1);
E_delta = E_curr - E_prev;
E_prev = E_curr;
if (TinyDFT->iter == max_iter - 1)
{
char *mol_name = TinyDFT->mol_name;
char *bas_name = TinyDFT->bas_name;
save_mat_to_file(getenv("OUTPUT_D"), "D_%d_%s_%s.bin", nbf, mol_name, bas_name, D_mat);
save_mat_to_file(getenv("OUTPUT_J"), "J_%d_%s_%s.bin", nbf, mol_name, bas_name, J_mat);
save_mat_to_file(getenv("OUTPUT_K"), "K_%d_%s_%s.bin", nbf, mol_name, bas_name, K_mat);
save_mat_to_file(getenv("OUTPUT_XC"), "XC_%d_%s_%s.bin", nbf, mol_name, bas_name, XC_mat);
}
// CDIIS acceleration (Pulay mixing)
st1 = get_wtime_sec();
TinyDFT_CDIIS(TinyDFT, X_mat, S_mat, D_mat, F_mat);
et1 = get_wtime_sec();
printf("* CDIIS procedure : %.3lf (s)\n", et1 - st1);
// Diagonalize and build the density matrix
st1 = get_wtime_sec();
TinyDFT_build_Dmat_eig(TinyDFT, F_mat, X_mat, D_mat, Cocc_mat);
et1 = get_wtime_sec();
printf("* Build density matrix : %.3lf (s)\n", et1 - st1);
et0 = get_wtime_sec();
printf("* Iteration runtime = %.3lf (s)\n", et0 - st0);
printf("* Energy = %.10lf", E_curr);
if (TinyDFT->iter > 0)
{
printf(", delta = %e\n", E_delta);
} else {
printf("\n");
E_delta = 19241112.0; // Prevent the SCF exit after 1st iteration when no SAD initial guess
}
TinyDFT->iter++;
fflush(stdout);
}
printf("--------------- SCF iterations finished ---------------\n");
}
void print_usage(const char *argv0)
{
printf("Usage: %s <basis> <xyz> <niter> <direct/DF J> <direct/DF/DFT K/XC> <df_basis> <X-func> <C-func>\n", argv0);
printf(" * direct/DF J: 0 for direct method, 1 for density fitting\n");
printf(" * direct/DF/DFT K/XC: 0 for direct method K, 1 for density fitting K, 2 for DFT XC\n");
printf(" * available XC functions: LDA_X, LDA_C_XA, LDA_C_PZ, LDA_C_PW,\n");
printf(" GGA_X_PBE, GGA_X_B88, GGA_C_PBE, GGA_C_LYP, \n");
printf(" HYB_GGA_XC_B3LYP, HYB_GGA_XC_B3LYP5\n");
printf(" Note: if you use hybrid GGA functionals, enter it twice for both <X-func> and <C-func>.\n");
}
int main(int argc, char **argv)
{
if (argc < 6)
{
print_usage(argv[0]);
return 255;
}
double st, et;
int niter, J_op, K_op, use_DF = 0;
niter = atoi(argv[3]);
J_op = atoi(argv[4]);
K_op = atoi(argv[5]);
if (J_op < 0 || J_op > 1) J_op = 0;
if (K_op < 0 || K_op > 2) K_op = 0;
printf("%s will use: ", argv[0]);
if (J_op == 0) printf("direct J, ");
if (J_op == 1) printf("denfit J, ");
if (K_op == 0) printf("direct K\n");
if (K_op == 1) printf("denfit K\n");
if (K_op == 2) printf("DFT XC\n");
// Initialize TinyDFT
TinyDFT_p TinyDFT;
TinyDFT_init(&TinyDFT, argv[1], argv[2]);
// Compute constant matrices and get initial guess for D
st = get_wtime_sec();
TinyDFT_build_Hcore_S_X_mat(TinyDFT, TinyDFT->Hcore_mat, TinyDFT->S_mat, TinyDFT->X_mat);
TinyDFT_build_Dmat_SAD(TinyDFT, TinyDFT->D_mat);
et = get_wtime_sec();
printf("TinyDFT compute Hcore, S, X matrices over, elapsed time = %.3lf (s)\n", et - st);
// Set up density fitting
if (J_op == 1 || K_op == 1)
{
if (argc < 7)
{
printf("You need to provide a density fitting auxiliary basis set!\n");
print_usage(argv[0]);
return 255;
}
use_DF = 1;
// If we don't need DF for K build, reduce memory usage in DF, only DF tensor build
// will become slower; otherwise, use more memory in DF for better K build performance
if (K_op == 1)
{
TinyDFT_setup_DF(TinyDFT, argv[6], argv[2], 0);
} else {
TinyDFT_setup_DF(TinyDFT, argv[6], argv[2], 1);
}
TinyDFT_build_Cocc_from_Dmat(TinyDFT, TinyDFT->D_mat, TinyDFT->Cocc_mat);
}
// Set up XC numerical integral environments
if (K_op == 2)
{
char default_xf_str[6] = "LDA_X\0";
char default_cf_str[10] = "LDA_C_PW\0";
char *xf_str = &default_xf_str[0];
char *cf_str = &default_cf_str[0];
if (use_DF == 1 && argc >= 9)
{
xf_str = argv[7];
cf_str = argv[8];
}
if (use_DF == 0 && argc >= 8)
{
xf_str = argv[6];
cf_str = argv[7];
}
st = get_wtime_sec();
TinyDFT_setup_XC_integral(TinyDFT, xf_str, cf_str);
et = get_wtime_sec();
printf("TinyDFT set up XC integral over, elapsed time = %.3lf (s)\n", et - st);
}
// Do SCF calculation
TinyDFT_SCF(TinyDFT, niter, J_op, K_op);
// Free TinyDFT and H2P-ERI
TinyDFT_destroy(&TinyDFT);
return 0;
}
|
zherk.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @precisions normal z -> c
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_tuning.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
/***************************************************************************//**
*
* @ingroup plasma_herk
*
* Performs one of the Hermitian rank k operations
*
* \f[ C = \alpha A \times A^H + \beta C, \f]
* or
* \f[ C = \alpha A^H \times A + \beta C, \f]
*
* where alpha and beta are real scalars, C is an n-by-n Hermitian
* matrix, and A is an n-by-k matrix in the first case and a k-by-n
* matrix in the second case.
*
*******************************************************************************
*
* @param[in] uplo
* - PlasmaUpper: Upper triangle of C is stored;
* - PlasmaLower: Lower triangle of C is stored.
*
* @param[in] trans
* - PlasmaNoTrans: \f[ C = \alpha A \times A^H + \beta C; \f]
* - PlasmaConjTrans: \f[ C = \alpha A^H \times A + \beta C. \f]
*
* @param[in] n
* The order of the matrix C. n >= 0.
*
* @param[in] k
* If trans = PlasmaNoTrans, number of columns of the A matrix;
* if trans = PlasmaConjTrans, number of rows of the A matrix.
*
* @param[in] alpha
* The scalar alpha.
*
* @param[in] pA
* A is an lda-by-ka matrix.
* If trans = PlasmaNoTrans, ka = k;
* if trans = PlasmaConjTrans, ka = n.
*
* @param[in] lda
* The leading dimension of the array A.
* If trans = PlasmaNoTrans, lda >= max(1, n);
* if trans = PlasmaConjTrans, lda >= max(1, k).
*
* @param[in] beta
* The scalar beta.
*
* @param[in,out] pC
* C is an ldc-by-n matrix.
* On exit, the uplo part of the matrix is overwritten
* by the uplo part of the updated matrix.
*
* @param[in] ldc
* The leading dimension of the array C. ldc >= max(1, n).
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit
*
*******************************************************************************
*
* @sa plasma_omp_zherk
* @sa plasma_cherk
*
******************************************************************************/
int plasma_zherk(plasma_enum_t uplo, plasma_enum_t trans,
int n, int k,
double alpha, plasma_complex64_t *pA, int lda,
double beta, plasma_complex64_t *pC, int ldc)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if ((uplo != PlasmaUpper) &&
(uplo != PlasmaLower)) {
plasma_error("illegal value of uplo");
return -1;
}
if ((trans != PlasmaNoTrans) &&
(trans != PlasmaConjTrans)) {
plasma_error("illegal value of trans");
return -2;
}
if (n < 0) {
plasma_error("illegal value of n");
return -3;
}
if (k < 0) {
plasma_error("illegal value of k");
return -4;
}
int am, an;
if (trans == PlasmaNoTrans) {
am = n;
an = k;
}
else {
am = k;
an = n;
}
if (lda < imax(1, am)) {
plasma_error("illegal value of lda");
return -7;
}
if (ldc < imax(1, n)) {
plasma_error("illegal value of ldc");
return -10;
}
// quick return
if (n == 0 || ((alpha == 0.0 || k == 0) && beta == 1.0))
return PlasmaSuccess;
// Tune parameters.
if (plasma->tuning)
plasma_tune_syrk(plasma, PlasmaComplexDouble, n, k);
// Set tiling parameters.
int nb = plasma->nb;
// Initialize tile matrix descriptors.
plasma_desc_t A;
plasma_desc_t C;
int retval;
retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb,
am, an, 0, 0, am, an, &A);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval;
}
retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb,
n, n, 0, 0, n, n, &C);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&A);
return retval;
}
// Initialize sequence.
plasma_sequence_t sequence;
retval = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval = plasma_request_init(&request);
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_zge2desc(pA, lda, A, &sequence, &request);
plasma_omp_zge2desc(pC, ldc, C, &sequence, &request);
// Call the tile async function.
plasma_omp_zherk(uplo, trans,
alpha, A,
beta, C,
&sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_zdesc2ge(C, pC, ldc, &sequence, &request);
}
// implicit synchronization
// Free matrices in tile layout.
plasma_desc_destroy(&A);
plasma_desc_destroy(&C);
// Return status.
int status = sequence.status;
return status;
}
/***************************************************************************//**
*
* @ingroup plasma_herk
*
* Performs rank k update.
* Non-blocking tile version of plasma_zherk().
* May return before the computation is finished.
* Operates on matrices stored by tiles.
* All matrices are passed through descriptors.
* All dimensions are taken from the descriptors.
* Allows for pipelining of operations at runtime.
*
*******************************************************************************
*
* @param[in] uplo
* - PlasmaUpper: Upper triangle of C is stored;
* - PlasmaLower: Lower triangle of C is stored.
*
* @param[in] trans
* - PlasmaNoTrans: \f[ C = \alpha A \times A^H + \beta C; \f]
* - PlasmaConjTrans: \f[ C = \alpha A^H \times A + \beta C. \f]
*
* @param[in] alpha
* The scalar alpha.
*
* @param[in] A
* Descriptor of matrix A.
*
* @param[in] beta
* The scalar beta.
*
* @param[in,out] C
* Descriptor of matrix C.
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes). Check
* the sequence->status for errors.
*
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
* @retval void
* Errors are returned by setting sequence->status and
* request->status to error values. The sequence->status and
* request->status should never be set to PlasmaSuccess (the
* initial values) since another async call may be setting a
* failure value at the same time.
*
*******************************************************************************
*
* @sa plasma_zherk
* @sa plasma_omp_zherk
* @sa plasma_omp_cherk
* @sa plasma_omp_dherk
* @sa plasma_omp_sherk
*
******************************************************************************/
void plasma_omp_zherk(plasma_enum_t uplo, plasma_enum_t trans,
double alpha, plasma_desc_t A,
double beta, plasma_desc_t C,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if ((uplo != PlasmaUpper) &&
(uplo != PlasmaLower)) {
plasma_error("illegal value of uplo");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if ((trans != PlasmaNoTrans) &&
(trans != PlasmaConjTrans)) {
plasma_error("illegal value of trans");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(A) != PlasmaSuccess) {
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
plasma_error("invalid A");
return;
}
if (plasma_desc_check(C) != PlasmaSuccess) {
plasma_error("invalid C");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
int k = trans == PlasmaNoTrans ? A.n : A.m;
if (C.m == 0 || ((alpha == 0.0 || k == 0) && beta == 1.0))
return;
// Call the parallel function.
plasma_pzherk(uplo, trans,
alpha, A,
beta, C,
sequence, request);
}
|
GB_unop__identity_int64_uint16.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_int64_uint16
// op(A') function: GB_unop_tran__identity_int64_uint16
// C type: int64_t
// A type: uint16_t
// cast: int64_t cij = (int64_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
int64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int64_t z = (int64_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint16_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int64_t z = (int64_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT64 || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_int64_uint16
(
int64_t *Cx, // Cx and Ax may be aliased
const uint16_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint16_t aij = Ax [p] ;
int64_t z = (int64_t) aij ;
Cx [p] = z ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_int64_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__pow_fc64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__pow_fc64)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__pow_fc64)
// A.*B function (eWiseMult): GB (_AemultB_03__pow_fc64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__pow_fc64)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((node))
// C+=B function (dense accum): GB (_Cdense_accumB__pow_fc64)
// C+=b function (dense accum): GB (_Cdense_accumb__pow_fc64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pow_fc64)
// C=scalar+B GB (_bind1st__pow_fc64)
// C=scalar+B' GB (_bind1st_tran__pow_fc64)
// C=A+scalar GB (_bind2nd__pow_fc64)
// C=A'+scalar GB (_bind2nd_tran__pow_fc64)
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// B,b type: GxB_FC64_t
// BinaryOp: cij = GB_cpow (aij, bij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_BTYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
GxB_FC64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
GxB_FC64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = GB_cpow (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_POW || GxB_NO_FC64 || GxB_NO_POW_FC64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__pow_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__pow_fc64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__pow_fc64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type GxB_FC64_t
GxB_FC64_t bwork = (*((GxB_FC64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((node))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__pow_fc64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__pow_fc64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__pow_fc64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__pow_fc64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__pow_fc64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__pow_fc64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ;
GxB_FC64_t x = (*((GxB_FC64_t *) x_input)) ;
GxB_FC64_t *Bx = (GxB_FC64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
GxB_FC64_t bij = Bx [p] ;
Cx [p] = GB_cpow (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__pow_fc64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ;
GxB_FC64_t *Ax = (GxB_FC64_t *) Ax_input ;
GxB_FC64_t y = (*((GxB_FC64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
GxB_FC64_t aij = Ax [p] ;
Cx [p] = GB_cpow (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC64_t aij = Ax [pA] ; \
Cx [pC] = GB_cpow (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__pow_fc64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t x = (*((const GxB_FC64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC64_t aij = Ax [pA] ; \
Cx [pC] = GB_cpow (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__pow_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t y = (*((const GxB_FC64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__identity_int16_int16.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_int16_int16
// op(A') function: GB_unop_tran__identity_int16_int16
// C type: int16_t
// A type: int16_t
// cast: int16_t cij = aij
// unaryop: cij = aij
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int16_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int16_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int16_t z = aij ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
1
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_int16_int16
(
int16_t *Cx, // Cx and Ax may be aliased
const int16_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (int16_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int16_t aij = Ax [p] ;
int16_t z = aij ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int16_t aij = Ax [p] ;
int16_t z = aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_int16_int16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ten_tusscher_2004_epi_S3_16.c | //Original Ten Tusscher
#include <assert.h>
#include <stdlib.h>
#include "ten_tusscher_2004_epi_S3_16.h"
GET_CELL_MODEL_DATA(init_cell_model_data) {
assert(cell_model);
if(get_initial_v)
cell_model->initial_v = INITIAL_V;
if(get_neq)
cell_model->number_of_ode_equations = NEQ;
}
//TODO: this should be called only once for the whole mesh, like in the GPU code
SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) {
// Default initial conditions
/*
sv[0] = INITIAL_V; // V; millivolt
sv[1] = 0.f; //M
sv[2] = 0.75; //H
sv[3] = 0.75f; //J
sv[4] = 0.f; //Xr1
sv[5] = 1.f; //Xr2
sv[6] = 0.f; //Xs
sv[7] = 1.f; //S
sv[8] = 0.f; //R
sv[9] = 0.f; //D
sv[10] = 1.f; //F
sv[11] = 1.f; //FCa
sv[12] = 1.f; //G
sv[13] = 0.0002; //Cai
sv[14] = 0.2f; //CaSR
sv[15] = 11.6f; //Nai
sv[16] = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.4101823021155,0.00133434531581905,0.775801436097579,0.775630650075024,0.000178798899835041,0.483445172389446,0.00297358826426905,0.999998294908988,1.98511017433873e-08,1.93134593622805e-05,0.999767678471148,1.00747346281009,0.999998771737102,3.62124580505117e-05,0.994388476899862,10.7201427162277,138.622841516637};
for (uint32_t i = 0; i < NEQ; i++)
sv[i] = sv_sst[i];
}
SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) {
uint32_t sv_id;
int i;
#pragma omp parallel for private(sv_id)
for (i = 0; i < num_cells_to_solve; i++) {
if(cells_to_solve)
sv_id = cells_to_solve[i];
else
sv_id = i;
for (int j = 0; j < num_steps; ++j) {
solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i]);
}
}
}
void solve_model_ode_cpu(real dt, real *sv, real stim_current) {
assert(sv);
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu(rY, rDY, stim_current, dt);
for(int i = 0; i < NEQ; i++)
sv[i] = rDY[i];
}
void RHS_cpu(const real *sv, real *rDY_, real stim_current, real dt) {
// State variables
real svolt = sv[0];
real sm = sv[1];
real sh = sv[2];
real sj = sv[3];
real sxr1 = sv[4];
real sxr2 = sv[5];
real sxs = sv[6];
real ss = sv[7];
real sr = sv[8];
real sd = sv[9];
real sf = sv[10];
real sfca = sv[11];
real sg = sv[12];
real Cai = sv[13];
real CaSR = sv[14];
real Nai = sv[15];
real Ki = sv[16];
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
///#ifdef EPI
real Gks=0.245;
///#endif
///#ifdef ENDO
/// real Gks=0.245;
///#endif
///#ifdef MCELL
/// real Gks=0.062;
///#endif
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
//#ifdef EPI
real Gto=0.294;
//#endif
// #ifdef ENDO
// real Gto=0.073;
//#endif
//#ifdef MCELL
// real Gto=0.294;
///#endif
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real parameters []={14.6308932004730,0.000282361685463076,0.000142426743913113,0.000252033865002963,0.255054406607973,0.152097143073378,0.193466524254039,4.62793530963706,0.0159188322586997,1.02955629608011,1099.63767852806,0.000551702404861208,0.190978122105716,0.0185961496689314,0.00250383087811998,1.53846595460414e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
///A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
///Ileak=0.00008f*(CaSR-Cai);
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
#ifdef EPI
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
#ifdef ENDO
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+28)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.;
#endif
#ifdef MCELL
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10));
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
DRB038-truedepseconddimension-var-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Only the outmost loop can be parallelized in this program.
Data race pair: b[i][j]@65:7 vs. b[i][j-1]@65:15
*/
#include <stdlib.h>
int main(int argc, char* argv[])
{
int i,j;
int len = 1000;
if (argc>1)
len = atoi(argv[1]);
int n=len, m=len;
double b[n][m];
for (i=0;i<n;i++)
#pragma omp parallel for schedule(dynamic)
for (j=1;j<m;j++)
b[i][j]=b[i][j-1];
return 0;
}
|
parallel.h | #ifndef PARALLEL
#define PARALLEL
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
int nThreads = -1;
void crout_par1(int n, double **A, double **L, double **U){
#pragma omp parallel for num_threads(nThreads)
for(int i = 0; i < n; i++){
U[i][i] = 1;
}
for(int j = 0; j < n; j++){
#pragma omp parallel num_threads(nThreads) shared(A, L, U)
{
#pragma omp for schedule(static)
for(int i = j; i < n; i++){
double sum = 0;
for(int k = 0; k < j; k++){
sum = sum + L[i][k] * U[k][j];
}
L[i][j] = A[i][j] - sum;
}
#pragma omp for schedule(static)
// TODO: yahan pe i = j+1 ho skta h? bcoz u[j][j] = 1 ofc
for(int i = j; i < n; i++){
double sum = 0;
for(int k = 0; k < j; k++){
sum = sum + L[j][k] * U[k][i];
}
if (L[j][j] == 0){
printf("What the fuck\n");
exit(0);
}
U[j][i] = (A[j][i] - sum) / L[j][j];
}
}
}
}
void task_2(int n, double **A, double **L, double **U, int j, int i){
double sum = 0;
for(int k = 0; k < j; k++){
sum = sum + L[i][k] * U[k][j];
}
L[i][j] = A[i][j] - sum;
sum = 0;
for(int k = 0; k < j; k++){
sum = sum + L[j][k] * U[k][i];
}
if (L[j][j] == 0){
exit(0);
}
U[j][i] = (A[j][i] - sum) / L[j][j];
}
void crout_par2(int n, double **A, double **L, double **U){
omp_set_num_threads(nThreads);
for(int i = 0; i < n; i++){
U[i][i] = 1;
}
for(int j = 0; j < n; j++){
double sum = 0;
for(int k = 0; k < j; k++){
sum = sum + L[j][k] * U[k][j];
}
L[j][j] = A[j][j] - sum;
if (L[j][j] == 0){
exit(0);
}
U[j][j] = (A[j][j] - sum) / L[j][j];
int count = (n-1-j)/8;
#pragma omp parallel sections
{
#pragma omp section
{
for(int i = j+1; i < j+1+count ; i++){
task_2(n, A, L, U, j, i);
}
}
#pragma omp section
{
for(int i = j+1+count; i < j+1+2*count ; i++){
task_2(n, A, L, U, j, i);
}
}
#pragma omp section
{
for(int i = j+1+2*count; i < j+1+3*count ; i++){
task_2(n, A, L, U, j, i);
}
}
#pragma omp section
{
for(int i = j+1+3*count; i < j+1+4*count ; i++){
task_2(n, A, L, U, j, i);
}
}
#pragma omp section
{
for(int i = j+1+4*count; i < j+1+5*count ; i++){
task_2(n, A, L, U, j, i);
}
}
#pragma omp section
{
for(int i = j+1+5*count; i < j+1+6*count ; i++){
task_2(n, A, L, U, j, i);
}
}
#pragma omp section
{
for(int i = j+1+6*count; i < j+1+7*count ; i++){
task_2(n, A, L, U, j, i);
}
}
#pragma omp section
{
for(int i = j+1+7*count; i < n ; i++){
task_2(n, A, L, U, j, i);
}
}
}
}
}
void Task1(int n, double **A,double **L, double **U, int j){
#pragma omp parallel for schedule(static) num_threads(nThreads/2)
for(int i = j+1; i < n; i++){
double sum = 0;
for(int k = 0; k < j; k++){
sum = sum + L[i][k] * U[k][j];
}
L[i][j] = A[i][j] - sum;
}
}
void Task2(int n, double **A,double **L, double **U, int j){
#pragma omp parallel for schedule(static) num_threads(nThreads/2)
for(int i = j+1; i < n; i++){
double sum = 0;
for(int k = 0; k < j; k++){
sum = sum + L[j][k] * U[k][i];
}
if (L[j][j] == 0){
exit(0);
}
U[j][i] = (A[j][i] - sum) / L[j][j];
}
}
void doTask(int n, double **A,double **L, double **U, int j, int i){
#pragma omp parallel sections
{
#pragma omp section
{
double sum = 0;
for(int k = 0; k < j; k++){
sum = sum + L[i][k] * U[k][j];
}
L[i][j] = A[i][j] - sum;
}
#pragma omp section
{
double sum = 0;
for(int k = 0; k < j; k++){
sum = sum + L[j][k] * U[k][i];
}
if (L[j][j] == 0){
exit(0);
}
U[j][i] = (A[j][i] - sum) / L[j][j];
}
}
}
void crout_par3(int n, double **A, double **L, double **U){
#pragma omp parallel for num_threads(nThreads)
for(int i = 0; i < n; i++){
U[i][i] = 1;
}
// for(int j = 0; j < n; j++){
// // for i == j
// double sum = 0;
// double localsum = 0;
// omp_set_nested(0);
// #pragma omp parallel num_threads(nThreads) shared(sum) private(localsum)
// {
// localsum = 0;
// #pragma omp for nowait
// for(int k = 0; k < j; k++){
// localsum = localsum + L[j][k] * U[k][j];
// }
// #pragma omp critical
// {
// sum += localsum;
// }
// }
// L[j][j] = A[j][j] - sum;
// if (L[j][j] == 0){
// exit(0);
// }
// U[j][j] = (A[j][j] - sum) / L[j][j];
// omp_set_nested(1);
// #pragma omp parallel for schedule(dynamic) num_threads(nThreads)
// for(int i = j+1; i < n; i++){
// doTask(n, A, L, U, j, i);
// }
// }
omp_set_nested(1);
for(int j = 0; j < n; j++){
// for i == j
double sum = 0;
for(int k = 0; k < j; k++){
sum = sum + L[j][k] * U[k][j];
}
L[j][j] = A[j][j] - sum;
if (L[j][j] == 0){
exit(0);
}
U[j][j] = (A[j][j] - sum) / L[j][j];
#pragma omp parallel sections
{
#pragma omp section
{
Task1(n, A, L, U, j);
}
#pragma omp section
{
Task2(n, A, L, U, j);
}
}
}
}
#endif
|
GB_sparse_add_template.c | //------------------------------------------------------------------------------
// GB_sparse_add_template: C=A+B, C<M>=A+B when C is sparse/hypersparse
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// C is sparse or hypersparse:
// ------------------------------------------
// C = A + B
// ------------------------------------------
// sparse . sparse sparse
// ------------------------------------------
// C <M> = A + B
// ------------------------------------------
// sparse sparse sparse sparse
// sparse sparse sparse bitmap
// sparse sparse sparse full
// sparse sparse bitmap sparse
// sparse sparse bitmap bitmap
// sparse sparse bitmap full
// sparse sparse full sparse
// sparse sparse full bitmap
// sparse sparse full full
// sparse bitmap sparse sparse
// sparse full sparse sparse
// ------------------------------------------
// C <!M> = A + B
// ------------------------------------------
// sparse bitmap sparse sparse
// sparse full sparse sparse
// If all four matrices are sparse/hypersparse, and C<!M>=A+B is being
// computed, then M is passed in as NULL to GB_add_phase*. GB_add_sparsity
// returns apply_mask as false. The methods below do not handle the case when
// C is sparse, M is sparse, and !M is used. All other uses of !M when M
// is sparse result in a bitmap structure for C, and this is handled by
// GB_bitmap_add_template.
// For this case: the mask is done later, so C=A+B is computed here:
// ------------------------------------------
// C <!M> = A + B
// ------------------------------------------
// sparse sparse sparse sparse (mask later)
{
//--------------------------------------------------------------------------
// phase1: count entries in each C(:,j)
// phase2: compute C
//--------------------------------------------------------------------------
#pragma omp parallel for num_threads(C_nthreads) schedule(dynamic,1)
for (taskid = 0 ; taskid < C_ntasks ; taskid++)
{
//----------------------------------------------------------------------
// get the task descriptor
//----------------------------------------------------------------------
int64_t kfirst = TaskList [taskid].kfirst ;
int64_t klast = TaskList [taskid].klast ;
bool fine_task = (klast == -1) ;
int64_t len ;
if (fine_task)
{
// a fine task operates on a slice of a single vector
klast = kfirst ;
len = TaskList [taskid].len ;
}
else
{
// a coarse task operates on one or more whole vectors
len = vlen ;
}
//----------------------------------------------------------------------
// compute all vectors in this task
//----------------------------------------------------------------------
for (int64_t k = kfirst ; k <= klast ; k++)
{
//------------------------------------------------------------------
// get j, the kth vector of C
//------------------------------------------------------------------
int64_t j = GBH (Ch, k) ;
#if defined ( GB_PHASE_1_OF_2 )
int64_t cjnz = 0 ;
#else
int64_t pC, pC_end ;
if (fine_task)
{
// A fine task computes a slice of C(:,j)
pC = TaskList [taskid ].pC ;
pC_end = TaskList [taskid+1].pC ;
ASSERT (Cp [k] <= pC && pC <= pC_end && pC_end <= Cp [k+1]) ;
}
else
{
// The vectors of C are never sliced for a coarse task.
pC = Cp [k ] ;
pC_end = Cp [k+1] ;
}
int64_t cjnz = pC_end - pC ;
if (cjnz == 0) continue ;
#endif
//------------------------------------------------------------------
// get A(:,j)
//------------------------------------------------------------------
int64_t pA = -1, pA_end = -1 ;
if (fine_task)
{
// A fine task operates on Ai,Ax [pA...pA_end-1], which is
// a subset of the vector A(:,j)
pA = TaskList [taskid].pA ;
pA_end = TaskList [taskid].pA_end ;
}
else
{
// A coarse task operates on the entire vector A (:,j)
int64_t kA = (C_to_A == NULL) ? j : C_to_A [k] ;
if (kA >= 0)
{
pA = GBP (Ap, kA, vlen) ;
pA_end = GBP (Ap, kA+1, vlen) ;
}
}
int64_t ajnz = pA_end - pA ; // nnz in A(:,j) for this slice
int64_t pA_start = pA ;
bool adense = (ajnz == len) ;
// get the first and last indices in A(:,j) for this vector
int64_t iA_first = -1, iA_last = -1 ;
if (ajnz > 0)
{
iA_first = GBI (Ai, pA, vlen) ;
iA_last = GBI (Ai, pA_end-1, vlen) ;
}
//------------------------------------------------------------------
// get B(:,j)
//------------------------------------------------------------------
int64_t pB = -1, pB_end = -1 ;
if (fine_task)
{
// A fine task operates on Bi,Bx [pB...pB_end-1], which is
// a subset of the vector B(:,j)
pB = TaskList [taskid].pB ;
pB_end = TaskList [taskid].pB_end ;
}
else
{
// A coarse task operates on the entire vector B (:,j)
int64_t kB = (C_to_B == NULL) ? j : C_to_B [k] ;
if (kB >= 0)
{
pB = GBP (Bp, kB, vlen) ;
pB_end = GBP (Bp, kB+1, vlen) ;
}
}
int64_t bjnz = pB_end - pB ; // nnz in B(:,j) for this slice
int64_t pB_start = pB ;
bool bdense = (bjnz == len) ;
// get the first and last indices in B(:,j) for this vector
int64_t iB_first = -1, iB_last = -1 ;
if (bjnz > 0)
{
iB_first = GBI (Bi, pB, vlen) ;
iB_last = GBI (Bi, pB_end-1, vlen) ;
}
//------------------------------------------------------------------
// get M(:,j) if M is sparse or hypersparse
//------------------------------------------------------------------
bool sparse_mask_is_easy = false ;
int64_t pM = -1 ;
int64_t pM_end = -1 ;
if (M_is_sparse_or_hyper)
{
if (fine_task)
{
// A fine task operates on Mi,Mx [pM...pM_end-1],
// which is a subset of the vector M(:,j)
pM = TaskList [taskid].pM ;
pM_end = TaskList [taskid].pM_end ;
}
else
{
int64_t kM = -1 ;
if (Ch_is_Mh)
{
// Ch is the same as Mh (a deep copy)
ASSERT (Ch != NULL) ;
ASSERT (M_is_hyper) ;
ASSERT (Ch [k] == M->h [k]) ;
kM = k ;
}
else
{
kM = (C_to_M == NULL) ? j : C_to_M [k] ;
}
if (kM >= 0)
{
pM = GBP (Mp, kM , vlen) ;
pM_end = GBP (Mp, kM+1, vlen) ;
}
}
// The "easy mask" condition requires M to be sparse/hyper
// and structural. A and B cannot be bitmap. Also one of
// the following 3 conditions must hold:
// (1) all entries are present in A(:,j) and B == M
// (2) all entries are present in B(:,j) and A == M
// (3) both A and B are aliased to M
sparse_mask_is_easy =
Mask_struct && // M must be structural
!A_is_bitmap && // A must not be bitmap
!B_is_bitmap && // B must not be bitmap
((adense && B == M) || // one of 3 conditions holds
(bdense && A == M) ||
(A == M && B == M)) ;
// TODO: add the condition above to GB_add_sparsity,
// where adense/bdense are true for the whole matrix
// (adense is true if A is full, or sparse/hypersparse with
// all entries present). The test here is done vector by
// vector, for each A(:,j) and B(:,j). This is a finer grain
// test, as compared to a test for all of A and B.
}
//------------------------------------------------------------------
// C(:,j)<optional mask> = A (:,j) + B (:,j) or subvector
//------------------------------------------------------------------
if (M == NULL)
{
//--------------------------------------------------------------
// M is not present, or !M is sparse but not applied here
//--------------------------------------------------------------
// ------------------------------------------
// C = A + B
// ------------------------------------------
// sparse . sparse sparse
// ------------------------------------------
// C <!M> = A + B
// ------------------------------------------
// sparse sparse sparse sparse (mask later)
// If all four matrices are sparse or hypersparse, and
// Mask_comp is true, the mask M is passed in to this method as
// NULL. C=A+B is computed with no mask, and !M is applied
// later.
// A and B are both sparse or hypersparse, not bitmap or
// full, but individual vectors of A and B might have all
// entries present (adense and/or bdense).
ASSERT (A_is_sparse || A_is_hyper) ;
ASSERT (B_is_sparse || B_is_hyper) ;
#if defined ( GB_PHASE_1_OF_2 )
if (A_and_B_are_disjoint)
{
// only used by GB_wait, which computes A+T where T is the
// matrix of pending tuples for A. The pattern of pending
// tuples is always disjoint with the pattern of A.
cjnz = ajnz + bjnz ;
}
else
#endif
if (adense && bdense)
{
//----------------------------------------------------------
// Method01: A(:,j) and B(:,j) dense: thus C(:,j) dense
//----------------------------------------------------------
ASSERT (ajnz == bjnz) ;
ASSERT (iA_first == iB_first) ;
ASSERT (iA_last == iB_last ) ;
#if defined ( GB_PHASE_1_OF_2 )
cjnz = ajnz ;
#else
ASSERT (cjnz == ajnz) ;
GB_PRAGMA_SIMD_VECTORIZE
for (int64_t p = 0 ; p < ajnz ; p++)
{
// C (i,j) = A (i,j) + B (i,j)
int64_t i = p + iA_first ;
Ci [pC + p] = i ;
ASSERT (Ai [pA + p] == i) ;
ASSERT (Bi [pB + p] == i) ;
#ifndef GB_ISO_ADD
GB_LOAD_A (aij, Ax, pA + p, A_iso) ;
GB_LOAD_B (bij, Bx, pB + p, B_iso) ;
GB_BINOP (GB_CX (pC + p), aij, bij, i, j) ;
#endif
}
#endif
}
else if (adense)
{
//----------------------------------------------------------
// Method02: A(:,j) dense, B(:,j) sparse: C(:,j) dense
//----------------------------------------------------------
#if defined ( GB_PHASE_1_OF_2 )
cjnz = ajnz ;
#else
ASSERT (cjnz == ajnz) ;
GB_PRAGMA_SIMD_VECTORIZE
for (int64_t p = 0 ; p < ajnz ; p++)
{
int64_t i = p + iA_first ;
Ci [pC + p] = i ;
ASSERT (Ai [pA + p] == i) ;
#ifndef GB_ISO_ADD
#ifdef GB_EWISEUNION
{
// C (i,j) = A(i,j) + beta
GB_LOAD_A (aij, Ax, pA+p, A_iso) ;
GB_BINOP (GB_CX (pC+p), aij, beta_scalar, i, j) ;
}
#else
{
// C (i,j) = A (i,j)
GB_COPY_A_TO_C (GB_CX (pC+p), Ax, pA+p, A_iso) ;
}
#endif
#endif
}
GB_PRAGMA_SIMD_VECTORIZE
for (int64_t p = 0 ; p < bjnz ; p++)
{
// C (i,j) = A (i,j) + B (i,j)
int64_t i = Bi [pB + p] ;
int64_t ii = i - iA_first ;
ASSERT (Ai [pA + ii] == i) ;
#ifndef GB_ISO_ADD
GB_LOAD_A (aij, Ax, pA + ii, A_iso) ;
GB_LOAD_B (bij, Bx, pB + p, B_iso) ;
GB_BINOP (GB_CX (pC + ii), aij, bij, i, j) ;
#endif
}
#endif
}
else if (bdense)
{
//----------------------------------------------------------
// Method03: A(:,j) sparse, B(:,j) dense: C(:,j) dense
//----------------------------------------------------------
#if defined ( GB_PHASE_1_OF_2 )
cjnz = bjnz ;
#else
ASSERT (cjnz == bjnz) ;
GB_PRAGMA_SIMD_VECTORIZE
for (int64_t p = 0 ; p < bjnz ; p++)
{
int64_t i = p + iB_first ;
Ci [pC + p] = i ;
ASSERT (Bi [pB + p] == i) ;
#ifndef GB_ISO_ADD
#ifdef GB_EWISEUNION
{
// C (i,j) = alpha + B(i,j)
GB_LOAD_B (bij, Bx, pB+p, B_iso) ;
GB_BINOP (GB_CX (pC+p), alpha_scalar, bij, i, j) ;
}
#else
{
// C (i,j) = B (i,j)
GB_COPY_B_TO_C (GB_CX (pC+p), Bx, pB+p, B_iso) ;
}
#endif
#endif
}
GB_PRAGMA_SIMD_VECTORIZE
for (int64_t p = 0 ; p < ajnz ; p++)
{
// C (i,j) = A (i,j) + B (i,j)
int64_t i = Ai [pA + p] ;
int64_t ii = i - iB_first ;
ASSERT (Bi [pB + ii] == i) ;
#ifndef GB_ISO_ADD
GB_LOAD_A (aij, Ax, pA + p, A_iso) ;
GB_LOAD_B (bij, Bx, pB + ii, B_iso) ;
GB_BINOP (GB_CX (pC + ii), aij, bij, i, j) ;
#endif
}
#endif
}
else if (ajnz == 0)
{
//----------------------------------------------------------
// Method04: A(:,j) is empty
//----------------------------------------------------------
#if defined ( GB_PHASE_1_OF_2 )
cjnz = bjnz ;
#else
ASSERT (cjnz == bjnz) ;
memcpy (Ci + pC, Bi + pB, bjnz * sizeof (int64_t)) ;
#ifndef GB_ISO_ADD
GB_PRAGMA_SIMD_VECTORIZE
for (int64_t p = 0 ; p < bjnz ; p++)
{
#ifdef GB_EWISEUNION
{
// C (i,j) = alpha + B(i,j)
GB_LOAD_B (bij, Bx, pB+p, B_iso) ;
GB_BINOP (GB_CX (pC+p), alpha_scalar, bij,
Bi [pB+p], j) ;
}
#else
{
// C (i,j) = B (i,j)
GB_COPY_B_TO_C (GB_CX (pC+p), Bx, pB+p, B_iso) ;
}
#endif
}
#endif
#endif
}
else if (bjnz == 0)
{
//----------------------------------------------------------
// Method05: B(:,j) is empty
//----------------------------------------------------------
#if defined ( GB_PHASE_1_OF_2 )
cjnz = ajnz ;
#else
ASSERT (cjnz == ajnz) ;
memcpy (Ci + pC, Ai + pA, ajnz * sizeof (int64_t)) ;
#ifndef GB_ISO_ADD
GB_PRAGMA_SIMD_VECTORIZE
for (int64_t p = 0 ; p < ajnz ; p++)
{
#ifdef GB_EWISEUNION
{
// C (i,j) = A(i,j) + beta
GB_LOAD_A (aij, Ax, pA+p, A_iso) ;
GB_BINOP (GB_CX (pC+p), aij, beta_scalar,
Ai [pA+p], j) ;
}
#else
{
// C (i,j) = A (i,j)
GB_COPY_A_TO_C (GB_CX (pC+p), Ax, pA+p, A_iso) ;
}
#endif
}
#endif
#endif
}
else if (iA_last < iB_first)
{
//----------------------------------------------------------
// Method06: last A(:,j) comes before 1st B(:,j)
//----------------------------------------------------------
#if defined ( GB_PHASE_1_OF_2 )
cjnz = ajnz + bjnz ;
#else
ASSERT (cjnz == ajnz + bjnz) ;
memcpy (Ci + pC, Ai + pA, ajnz * sizeof (int64_t)) ;
#ifndef GB_ISO_ADD
GB_PRAGMA_SIMD_VECTORIZE
for (int64_t p = 0 ; p < ajnz ; p++)
{
#ifdef GB_EWISEUNION
{
// C (i,j) = A(i,j) + beta
GB_LOAD_A (aij, Ax, pA+p, A_iso) ;
GB_BINOP (GB_CX (pC+p), aij, beta_scalar,
Ai [pA+p], j) ;
}
#else
{
// C (i,j) = A (i,j)
GB_COPY_A_TO_C (GB_CX (pC+p), Ax, pA+p, A_iso) ;
}
#endif
}
#endif
pC += ajnz ;
memcpy (Ci + pC, Bi + pB, bjnz * sizeof (int64_t)) ;
#ifndef GB_ISO_ADD
GB_PRAGMA_SIMD_VECTORIZE
for (int64_t p = 0 ; p < bjnz ; p++)
{
#ifdef GB_EWISEUNION
{
// C (i,j) = alpha + B(i,j)
GB_LOAD_B (bij, Bx, pB+p, B_iso) ;
GB_BINOP (GB_CX (pC+p), alpha_scalar, bij,
Bi [pB+p], j) ;
}
#else
{
// C (i,j) = B (i,j)
GB_COPY_B_TO_C (GB_CX (pC+p), Bx, pB+p, B_iso) ;
}
#endif
}
#endif
#endif
}
else if (iB_last < iA_first)
{
//----------------------------------------------------------
// Method07: last B(:,j) comes before 1st A(:,j)
//----------------------------------------------------------
#if defined ( GB_PHASE_1_OF_2 )
cjnz = ajnz + bjnz ;
#else
ASSERT (cjnz == ajnz + bjnz) ;
memcpy (Ci + pC, Bi + pB, bjnz * sizeof (int64_t)) ;
#ifndef GB_ISO_ADD
GB_PRAGMA_SIMD_VECTORIZE
for (int64_t p = 0 ; p < bjnz ; p++)
{
#ifdef GB_EWISEUNION
{
// C (i,j) = alpha + B(i,j)
GB_LOAD_B (bij, Bx, pB+p, B_iso) ;
GB_BINOP (GB_CX (pC+p), alpha_scalar, bij,
Bi [pB+p], j) ;
}
#else
{
// C (i,j) = B (i,j)
GB_COPY_B_TO_C (GB_CX (pC+p), Bx, pB+p, B_iso) ;
}
#endif
}
#endif
pC += bjnz ;
memcpy (Ci + pC, Ai + pA, ajnz * sizeof (int64_t)) ;
#ifndef GB_ISO_ADD
GB_PRAGMA_SIMD_VECTORIZE
for (int64_t p = 0 ; p < ajnz ; p++)
{
#ifdef GB_EWISEUNION
{
// C (i,j) = A(i,j) + beta
GB_LOAD_A (aij, Ax, pA+p, A_iso) ;
GB_BINOP (GB_CX (pC+p), aij, beta_scalar,
Ai [pA+p], j) ;
}
#else
{
// C (i,j) = A (i,j)
GB_COPY_A_TO_C (GB_CX (pC+p), Ax, pA+p, A_iso) ;
}
#endif
}
#endif
#endif
}
#if defined ( GB_PHASE_1_OF_2 )
else if (ajnz > 32 * bjnz)
{
//----------------------------------------------------------
// Method08: A(:,j) is much denser than B(:,j)
//----------------------------------------------------------
// cjnz = ajnz + bjnz - nnz in the intersection
cjnz = ajnz + bjnz ;
for ( ; pB < pB_end ; pB++)
{
int64_t i = Bi [pB] ;
// find i in A(:,j)
int64_t pright = pA_end - 1 ;
bool found ;
GB_BINARY_SEARCH (i, Ai, pA, pright, found) ;
if (found) cjnz-- ;
}
}
else if (bjnz > 32 * ajnz)
{
//----------------------------------------------------------
// Method09: B(:,j) is much denser than A(:,j)
//----------------------------------------------------------
// cjnz = ajnz + bjnz - nnz in the intersection
cjnz = ajnz + bjnz ;
for ( ; pA < pA_end ; pA++)
{
int64_t i = Ai [pA] ;
// find i in B(:,j)
int64_t pright = pB_end - 1 ;
bool found ;
GB_BINARY_SEARCH (i, Bi, pB, pright, found) ;
if (found) cjnz-- ;
}
}
#endif
else
{
//----------------------------------------------------------
// Method10: A(:,j) and B(:,j) about the same sparsity
//----------------------------------------------------------
while (pA < pA_end && pB < pB_end)
{
int64_t iA = Ai [pA] ;
int64_t iB = Bi [pB] ;
if (iA < iB)
{
#if defined ( GB_PHASE_2_OF_2 )
Ci [pC] = iA ;
#ifndef GB_ISO_ADD
#ifdef GB_EWISEUNION
{
// C (iA,j) = A(iA,j) + beta
GB_LOAD_A (aij, Ax, pA, A_iso) ;
GB_BINOP (GB_CX (pC), aij, beta_scalar, iA, j) ;
}
#else
{
// C (iA,j) = A (iA,j)
GB_COPY_A_TO_C (GB_CX (pC), Ax, pA, A_iso) ;
}
#endif
#endif
#endif
pA++ ;
}
else if (iA > iB)
{
#if defined ( GB_PHASE_2_OF_2 )
Ci [pC] = iB ;
#ifndef GB_ISO_ADD
#ifdef GB_EWISEUNION
{
// C (iB,j) = alpha + B(iB,j)
GB_LOAD_B (bij, Bx, pB, B_iso) ;
GB_BINOP (GB_CX (pC), alpha_scalar, bij,
iB, j) ;
}
#else
{
// C (iB,j) = B (iB,j)
GB_COPY_B_TO_C (GB_CX (pC), Bx, pB, B_iso) ;
}
#endif
#endif
#endif
pB++ ;
}
else
{
// C (i,j) = A (i,j) + B (i,j)
#if defined ( GB_PHASE_2_OF_2 )
Ci [pC] = iB ;
#ifndef GB_ISO_ADD
GB_LOAD_A (aij, Ax, pA, A_iso) ;
GB_LOAD_B (bij, Bx, pB, B_iso) ;
GB_BINOP (GB_CX (pC), aij, bij, iB, j) ;
#endif
#endif
pA++ ;
pB++ ;
}
#if defined ( GB_PHASE_2_OF_2 )
pC++ ;
#else
cjnz++ ;
#endif
}
//----------------------------------------------------------
// A (:,j) or B (:,j) have entries left; not both
//----------------------------------------------------------
ajnz = (pA_end - pA) ;
bjnz = (pB_end - pB) ;
ASSERT (ajnz == 0 || bjnz == 0) ;
#if defined ( GB_PHASE_1_OF_2 )
cjnz += ajnz + bjnz ;
#else
memcpy (Ci + pC, Ai + pA, ajnz * sizeof (int64_t)) ;
#ifndef GB_ISO_ADD
for (int64_t p = 0 ; p < ajnz ; p++)
{
#ifdef GB_EWISEUNION
{
// C (i,j) = A(i,j) + beta
GB_LOAD_A (aij, Ax, pA+p, A_iso) ;
GB_BINOP (GB_CX (pC+p), aij, beta_scalar,
Ai [pA+p], j) ;
}
#else
{
// C (i,j) = A (i,j)
GB_COPY_A_TO_C (GB_CX (pC+p), Ax, pA+p, A_iso) ;
}
#endif
}
#endif
memcpy (Ci + pC, Bi + pB, bjnz * sizeof (int64_t)) ;
#ifndef GB_ISO_ADD
for (int64_t p = 0 ; p < bjnz ; p++)
{
#ifdef GB_EWISEUNION
{
// C (i,j) = alpha + B(i,j)
GB_LOAD_B (bij, Bx, pB+p, B_iso) ;
GB_BINOP (GB_CX (pC+p), alpha_scalar, bij,
Bi [pB+p], j) ;
}
#else
{
// C (i,j) = B (i,j)
GB_COPY_B_TO_C (GB_CX (pC+p), Bx, pB+p, B_iso) ;
}
#endif
}
#endif
ASSERT (pC + ajnz + bjnz == pC_end) ;
#endif
}
}
else if (sparse_mask_is_easy)
{
//--------------------------------------------------------------
// special case: M is present and very easy to use
//--------------------------------------------------------------
// ------------------------------------------
// C <M> = A + B
// ------------------------------------------
// sparse sparse sparse sparse
// sparse sparse sparse full
// sparse sparse full sparse
// sparse sparse full full
// A and B are sparse, hypersparse or full, not bitmap.
ASSERT (!A_is_bitmap) ;
ASSERT (!B_is_bitmap) ;
ASSERT (Mask_struct) ;
int64_t mjnz = pM_end - pM ; // nnz (M (:,j))
#if defined ( GB_PHASE_1_OF_2 )
// M is structural, and sparse or hypersparse, so every entry
// in the mask is guaranteed to appear in A+B. The symbolic
// count is thus trivial.
cjnz = mjnz ;
#else
// copy the pattern into C (:,j)
int64_t pC_start = pC ;
int64_t pM_start = pM ;
memcpy (Ci + pC, Mi + pM, mjnz * sizeof (int64_t)) ;
int64_t pA_offset = pA_start - iA_first ;
int64_t pB_offset = pB_start - iB_first ;
if (adense && B == M)
{
//----------------------------------------------------------
// Method11: A dense, B == M
//----------------------------------------------------------
GB_PRAGMA_SIMD_VECTORIZE
for (int64_t p = 0 ; p < mjnz ; p++)
{
int64_t pM = p + pM_start ;
int64_t pC = p + pC_start ;
int64_t i = Mi [pM] ;
ASSERT (GB_mcast (Mx, pM, msize)) ;
ASSERT (GBI (Ai, pA_offset + i, vlen) == i) ;
ASSERT (GBI (Bi, pM, vlen) == i) ;
#ifndef GB_ISO_ADD
GB_LOAD_A (aij, Ax, pA_offset + i, A_iso) ;
GB_LOAD_B (bij, Bx, pM, B_iso) ;
GB_BINOP (GB_CX (pC), aij, bij, i, j) ;
#endif
}
}
else if (bdense && A == M)
{
//----------------------------------------------------------
// Method12: B dense, A == M
//----------------------------------------------------------
GB_PRAGMA_SIMD_VECTORIZE
for (int64_t p = 0 ; p < mjnz ; p++)
{
int64_t pM = p + pM_start ;
int64_t pC = p + pC_start ;
int64_t i = Mi [pM] ;
ASSERT (GB_mcast (Mx, pM, msize)) ;
ASSERT (GBI (Ai, pM, vlen) == i) ;
ASSERT (GBI (Bi, pB_offset + i, vlen) == i) ;
#ifndef GB_ISO_ADD
GB_LOAD_A (aij, Ax, pM, A_iso) ;
GB_LOAD_B (bij, Bx, pB_offset + i, B_iso) ;
GB_BINOP (GB_CX (pC), aij, bij, i, j) ;
#endif
}
}
else // (A == M) && (B == M)
{
//----------------------------------------------------------
// Method13: A == M == B: all three matrices the same
//----------------------------------------------------------
#ifndef GB_ISO_ADD
GB_PRAGMA_SIMD_VECTORIZE
for (int64_t p = 0 ; p < mjnz ; p++)
{
int64_t pM = p + pM_start ;
int64_t pC = p + pC_start ;
#if GB_OP_IS_SECOND
GB_LOAD_B (t, Bx, pM, B_iso) ;
#else
GB_LOAD_A (t, Ax, pM, A_iso) ;
#endif
GB_BINOP (GB_CX (pC), t, t, Mi [pM], j) ;
}
#endif
}
#endif
}
else if (M_is_sparse_or_hyper)
{
//--------------------------------------------------------------
// Method14: C and M are sparse or hypersparse
//--------------------------------------------------------------
// ------------------------------------------
// C <M> = A + B
// ------------------------------------------
// sparse sparse sparse sparse (*)
// sparse sparse sparse bitmap (*)
// sparse sparse sparse full (*)
// sparse sparse bitmap sparse (*)
// sparse sparse bitmap bitmap (+)
// sparse sparse bitmap full (+)
// sparse sparse full sparse (*)
// sparse sparse full bitmap (+)
// sparse sparse full full (+)
// (*) This method is efficient except when either A or B are
// sparse, and when M is sparse but with many entries. When M
// is sparse and either A or B are sparse, the method is
// designed to be very efficient when M is very sparse compared
// with A and/or B. It traverses all entries in the sparse M,
// and (for sparse A or B) does a binary search for entries in
// A or B. In that case, if M has many entries, the mask M
// should be ignored, and C=A+B should be computed without any
// mask. The test for when to use M here should ignore A or B
// if they are bitmap or full.
// (+) TODO: if C and M are sparse/hyper, and A and B are
// both bitmap/full, then use GB_emult_04_template instead,
// but with (Ab [p] || Bb [p]) instead of (Ab [p] && Bb [p]).
// A and B can have any sparsity pattern (hypersparse,
// sparse, bitmap, or full).
for ( ; pM < pM_end ; pM++)
{
//----------------------------------------------------------
// get M(i,j) for A(i,j) + B (i,j)
//----------------------------------------------------------
int64_t i = Mi [pM] ;
bool mij = GB_mcast (Mx, pM, msize) ;
if (!mij) continue ;
//----------------------------------------------------------
// get A(i,j)
//----------------------------------------------------------
bool afound ;
if (adense)
{
// A is dense, bitmap, or full; use quick lookup
pA = pA_start + (i - iA_first) ;
afound = GBB (Ab, pA) ;
}
else if (A == M)
{
// A is aliased to M
pA = pM ;
afound = true ;
}
else
{
// A is sparse; use binary search. This is slow unless
// M is very sparse compared with A.
int64_t apright = pA_end - 1 ;
GB_BINARY_SEARCH (i, Ai, pA, apright, afound) ;
}
ASSERT (GB_IMPLIES (afound, GBI (Ai, pA, vlen) == i)) ;
//----------------------------------------------------------
// get B(i,j)
//----------------------------------------------------------
bool bfound ;
if (bdense)
{
// B is dense; use quick lookup
pB = pB_start + (i - iB_first) ;
bfound = GBB (Bb, pB) ;
}
else if (B == M)
{
// B is aliased to M
pB = pM ;
bfound = true ;
}
else
{
// B is sparse; use binary search. This is slow unless
// M is very sparse compared with B.
int64_t bpright = pB_end - 1 ;
GB_BINARY_SEARCH (i, Bi, pB, bpright, bfound) ;
}
ASSERT (GB_IMPLIES (bfound, GBI (Bi, pB, vlen) == i)) ;
//----------------------------------------------------------
// C(i,j) = A(i,j) + B(i,j)
//----------------------------------------------------------
if (afound && bfound)
{
// C (i,j) = A (i,j) + B (i,j)
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
Ci [pC] = i ;
#ifndef GB_ISO_ADD
GB_LOAD_A (aij, Ax, pA, A_iso) ;
GB_LOAD_B (bij, Bx, pB, B_iso) ;
GB_BINOP (GB_CX (pC), aij, bij, i, j) ;
#endif
pC++ ;
#endif
}
else if (afound)
{
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
Ci [pC] = i ;
#ifndef GB_ISO_ADD
#ifdef GB_EWISEUNION
{
// C (i,j) = A(i,j) + beta
GB_LOAD_A (aij, Ax, pA, A_iso) ;
GB_BINOP (GB_CX (pC), aij, beta_scalar, i, j) ;
}
#else
{
// C (i,j) = A (i,j)
GB_COPY_A_TO_C (GB_CX (pC), Ax, pA, A_iso) ;
}
#endif
#endif
pC++ ;
#endif
}
else if (bfound)
{
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
Ci [pC] = i ;
#ifndef GB_ISO_ADD
#ifdef GB_EWISEUNION
{
// C (i,j) = alpha + B(i,j)
GB_LOAD_B (bij, Bx, pB, B_iso) ;
GB_BINOP (GB_CX (pC), alpha_scalar, bij, i, j) ;
}
#else
{
// C (i,j) = B (i,j)
GB_COPY_B_TO_C (GB_CX (pC), Bx, pB, B_iso) ;
}
#endif
#endif
pC++ ;
#endif
}
}
#if defined ( GB_PHASE_2_OF_2 )
ASSERT (pC == pC_end) ;
#endif
}
else
{
//--------------------------------------------------------------
// M is bitmap or full, for either C<M>=A+B or C<!M>=A+B
//--------------------------------------------------------------
// ------------------------------------------
// C <M> = A + B
// ------------------------------------------
// sparse bitmap sparse sparse
// sparse full sparse sparse
// ------------------------------------------
// C <!M> = A + B
// ------------------------------------------
// sparse bitmap sparse sparse
// sparse full sparse sparse
// This method is very efficient for any mask, and should
// always be used if M is bitmap or full, even if the mask must
// also be applied later in GB_mask or GB_accum_mask.
// Exploiting the mask here adds no extra search time, and it
// reduces the size of C on output.
// GB_GET_MIJ: get M(i,j) where M is bitmap or full
#undef GB_GET_MIJ
#define GB_GET_MIJ(i) \
int64_t pM = pM_start + i ; \
bool mij = GBB (Mb, pM) && GB_mcast (Mx, pM, msize) ; \
if (Mask_comp) mij = !mij ;
// A and B are sparse or hypersparse, not bitmap or full,
// but individual vectors of A and B might have all entries
// present (adense and/or bdense).
ASSERT (A_is_sparse || A_is_hyper) ;
ASSERT (B_is_sparse || B_is_hyper) ;
int64_t pM_start = j * vlen ;
if (adense && bdense)
{
//----------------------------------------------------------
// Method15: A(:,j) and B(:,j) dense, M bitmap/full
//----------------------------------------------------------
ASSERT (ajnz == bjnz) ;
ASSERT (iA_first == iB_first) ;
ASSERT (iA_last == iB_last ) ;
for (int64_t p = 0 ; p < ajnz ; p++)
{
int64_t i = p + iA_first ;
ASSERT (Ai [pA + p] == i) ;
ASSERT (Bi [pB + p] == i) ;
GB_GET_MIJ (i) ;
if (mij)
{
// C (i,j) = A (i,j) + B (i,j)
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
Ci [pC] = i ;
#ifndef GB_ISO_ADD
GB_LOAD_A (aij, Ax, pA + p, A_iso) ;
GB_LOAD_B (bij, Bx, pB + p, B_iso) ;
GB_BINOP (GB_CX (pC), aij, bij, i, j) ;
#endif
pC++ ;
#endif
}
}
}
else if (ajnz == 0)
{
//----------------------------------------------------------
// Method16: A(:,j) is empty, M bitmap/full
//----------------------------------------------------------
for ( ; pB < pB_end ; pB++)
{
int64_t i = Bi [pB] ;
GB_GET_MIJ (i) ;
if (mij)
{
// C (i,j) = B (i,j), or alpha + B(i,j)
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
Ci [pC] = i ;
#ifndef GB_ISO_ADD
#ifdef GB_EWISEUNION
{
// C (i,j) = alpha + B(i,j)
GB_LOAD_B (bij, Bx, pB, B_iso) ;
GB_BINOP (GB_CX (pC), alpha_scalar, bij, i, j) ;
}
#else
{
// C (i,j) = B (i,j)
GB_COPY_B_TO_C (GB_CX (pC), Bx, pB, B_iso) ;
}
#endif
#endif
pC++ ;
#endif
}
}
}
else if (bjnz == 0)
{
//----------------------------------------------------------
// Method17: B(:,j) is empty, M bitmap/full
//----------------------------------------------------------
for ( ; pA < pA_end ; pA++)
{
int64_t i = Ai [pA] ;
GB_GET_MIJ (i) ;
if (mij)
{
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
Ci [pC] = i ;
#ifndef GB_ISO_ADD
#ifdef GB_EWISEUNION
{
// C (i,j) = A(i,j) + beta
GB_LOAD_A (aij, Ax, pA, A_iso) ;
GB_BINOP (GB_CX (pC), aij, beta_scalar, i, j) ;
}
#else
{
// C (i,j) = A (i,j)
GB_COPY_A_TO_C (GB_CX (pC), Ax, pA, A_iso) ;
}
#endif
#endif
pC++ ;
#endif
}
}
}
else if (iA_last < iB_first)
{
//----------------------------------------------------------
// Method18:last A(:,j) before 1st B(:,j), M bitmap/full
//----------------------------------------------------------
for ( ; pA < pA_end ; pA++)
{
int64_t i = Ai [pA] ;
GB_GET_MIJ (i) ;
if (mij)
{
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
Ci [pC] = i ;
#ifndef GB_ISO_ADD
#ifdef GB_EWISEUNION
{
// C (i,j) = A(i,j) + beta
GB_LOAD_A (aij, Ax, pA, A_iso) ;
GB_BINOP (GB_CX (pC), aij, beta_scalar, i, j) ;
}
#else
{
// C (i,j) = A (i,j)
GB_COPY_A_TO_C (GB_CX (pC), Ax, pA, A_iso) ;
}
#endif
#endif
pC++ ;
#endif
}
}
for ( ; pB < pB_end ; pB++)
{
int64_t i = Bi [pB] ;
GB_GET_MIJ (i) ;
if (mij)
{
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
Ci [pC] = i ;
#ifndef GB_ISO_ADD
#ifdef GB_EWISEUNION
{
// C (i,j) = alpha + B(i,j)
GB_LOAD_B (bij, Bx, pB, B_iso) ;
GB_BINOP (GB_CX (pC), alpha_scalar, bij, i, j) ;
}
#else
{
// C (i,j) = B (i,j)
GB_COPY_B_TO_C (GB_CX (pC), Bx, pB, B_iso) ;
}
#endif
#endif
pC++ ;
#endif
}
}
}
else if (iB_last < iA_first)
{
//----------------------------------------------------------
// Method19:last B(:,j) before 1st A(:,j), M bitmap/full
//----------------------------------------------------------
for ( ; pB < pB_end ; pB++)
{
int64_t i = Bi [pB] ;
GB_GET_MIJ (i) ;
if (mij)
{
// C (i,j) = B (i,j), or alpha + B(i,j)
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
Ci [pC] = i ;
#ifndef GB_ISO_ADD
#ifdef GB_EWISEUNION
{
// C (i,j) = alpha + B(i,j)
GB_LOAD_B (bij, Bx, pB, B_iso) ;
GB_BINOP (GB_CX (pC), alpha_scalar, bij, i, j) ;
}
#else
{
// C (i,j) = B (i,j)
GB_COPY_B_TO_C (GB_CX (pC), Bx, pB, B_iso) ;
}
#endif
#endif
pC++ ;
#endif
}
}
for ( ; pA < pA_end ; pA++)
{
int64_t i = Ai [pA] ;
GB_GET_MIJ (i) ;
if (mij)
{
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
Ci [pC] = i ;
#ifndef GB_ISO_ADD
#ifdef GB_EWISEUNION
{
// C (i,j) = A(i,j) + beta
GB_LOAD_A (aij, Ax, pA, A_iso) ;
GB_BINOP (GB_CX (pC), aij, beta_scalar, i, j) ;
}
#else
{
// C (i,j) = A (i,j)
GB_COPY_A_TO_C (GB_CX (pC), Ax, pA, A_iso) ;
}
#endif
#endif
pC++ ;
#endif
}
}
}
else
{
//----------------------------------------------------------
// Method20: merge A(:,j) and B(:,j), M bitmap/full
//----------------------------------------------------------
while (pA < pA_end && pB < pB_end)
{
int64_t iA = Ai [pA] ;
int64_t iB = Bi [pB] ;
if (iA < iB)
{
GB_GET_MIJ (iA) ;
if (mij)
{
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
Ci [pC] = iA ;
#ifndef GB_ISO_ADD
#ifdef GB_EWISEUNION
{
// C (iA,j) = A(iA,j) + beta
GB_LOAD_A (aij, Ax, pA, A_iso) ;
GB_BINOP (GB_CX (pC), aij, beta_scalar,
iA, j);
}
#else
{
// C (iA,j) = A (iA,j)
GB_COPY_A_TO_C (GB_CX (pC), Ax, pA, A_iso) ;
}
#endif
#endif
pC++ ;
#endif
}
pA++ ;
}
else if (iA > iB)
{
GB_GET_MIJ (iB) ;
if (mij)
{
// C (iB,j) = B (iB,j), or alpha + B(iB,j)
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
Ci [pC] = iB ;
#ifndef GB_ISO_ADD
#ifdef GB_EWISEUNION
{
// C (iB,j) = alpha + B(iB,j)
GB_LOAD_B (bij, Bx, pB, B_iso) ;
GB_BINOP (GB_CX (pC), alpha_scalar, bij,
iB, j) ;
}
#else
{
// C (iB,j) = B (iB,j)
GB_COPY_B_TO_C (GB_CX (pC), Bx, pB, B_iso) ;
}
#endif
#endif
pC++ ;
#endif
}
pB++ ;
}
else
{
GB_GET_MIJ (iB) ;
if (mij)
{
// C (i,j) = A (i,j) + B (i,j)
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
Ci [pC] = iB ;
#ifndef GB_ISO_ADD
GB_LOAD_A (aij, Ax, pA, A_iso) ;
GB_LOAD_B (bij, Bx, pB, B_iso) ;
GB_BINOP (GB_CX (pC), aij, bij, iB, j) ;
#endif
pC++ ;
#endif
}
pA++ ;
pB++ ;
}
}
//----------------------------------------------------------
// A (:,j) or B (:,j) have entries left; not both
//----------------------------------------------------------
for ( ; pA < pA_end ; pA++)
{
int64_t iA = Ai [pA] ;
GB_GET_MIJ (iA) ;
if (mij)
{
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
Ci [pC] = iA ;
#ifndef GB_ISO_ADD
#ifdef GB_EWISEUNION
{
// C (iA,j) = A(iA,j) + beta
GB_LOAD_A (aij, Ax, pA, A_iso) ;
GB_BINOP (GB_CX (pC), aij, beta_scalar, iA, j) ;
}
#else
{
// C (iA,j) = A (iA,j)
GB_COPY_A_TO_C (GB_CX (pC), Ax, pA, A_iso) ;
}
#endif
#endif
pC++ ;
#endif
}
}
for ( ; pB < pB_end ; pB++)
{
int64_t iB = Bi [pB] ;
GB_GET_MIJ (iB) ;
if (mij)
{
// C (iB,j) = B (iB,j), or alpha + B(iB,j)
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
Ci [pC] = iB ;
#ifndef GB_ISO_ADD
#ifdef GB_EWISEUNION
{
// C (iB,j) = alpha + B(iB,j)
GB_LOAD_B (bij, Bx, pB, B_iso) ;
GB_BINOP (GB_CX (pC), alpha_scalar, bij,
iB, j) ;
}
#else
{
// C (iB,j) = B (iB,j)
GB_COPY_B_TO_C (GB_CX (pC), Bx, pB, B_iso) ;
}
#endif
#endif
pC++ ;
#endif
}
}
}
}
//------------------------------------------------------------------
// final count of nnz (C (:,j))
//------------------------------------------------------------------
#if defined ( GB_PHASE_1_OF_2 )
if (fine_task)
{
TaskList [taskid].pC = cjnz ;
}
else
{
Cp [k] = cjnz ;
}
#endif
}
}
}
|
pst_fmt_plug.c | /* PST cracker patch for JtR. Hacked together during July of 2012 by
* Dhiru Kholia <dhiru.kholia at gmail.com>
*
* Optimizations and shift to pkzip CRC32 code done by JimF
*
* This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* Uses code from crc32_fmt_plug.c written by JimF */
#if FMT_EXTERNS_H
extern struct fmt_main fmt_pst;
#elif FMT_REGISTERS_H
john_register_one(&fmt_pst);
#else
#include <string.h>
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "crc32.h"
#if !FAST_FORMATS_OMP
#undef _OPENMP
#endif
#ifdef _OPENMP
#include <omp.h>
#ifdef __MIC__
#ifndef OMP_SCALE
#define OMP_SCALE 1024
#endif
#else
#ifndef OMP_SCALE
#define OMP_SCALE 16384 // core i7 no HT
#endif
#endif
static int omp_t = 1;
#endif
#include "memdbg.h"
#define FORMAT_LABEL "PST"
#define FORMAT_NAME "custom CRC-32"
#define FORMAT_TAG "$pst$"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#define ALGORITHM_NAME "32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 8
#define BINARY_SIZE 4
#define SALT_SIZE 0
#define BINARY_ALIGN sizeof(uint32_t)
#define SALT_ALIGN 1
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 256
static struct fmt_tests tests[] = {
{"$pst$a9290513", "openwall"}, /* "jfuck jw" works too ;) */
{"$pst$50e099bc", "password"},
{"$pst$00000000", ""},
{"$pst$e3da3318", "xxx"},
{"$pst$a655dd18", "XYz123"},
{"$pst$29b14070", "thisisalongstring"},
{"$pst$25b44615", "string with space"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static uint32_t (*crypt_out);
static void init(struct fmt_main *self)
{
#if defined (_OPENMP)
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p;
int extra;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN))
return 0;
p = ciphertext + FORMAT_TAG_LEN;
if (hexlenl(p, &extra) != BINARY_SIZE * 2 || extra)
return 0;
return 1;
}
static void set_key(char *key, int index) {
strnzcpy(saved_key[index], key, PLAINTEXT_LENGTH+1);
}
static int cmp_all(void *binary, int count)
{
uint32_t crc=*((uint32_t*)binary), i;
for (i = 0; i < count; ++i)
if (crc == crypt_out[i]) return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return *((uint32_t*)binary) == crypt_out[index];
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int i;
#ifdef _OPENMP
#pragma omp parallel for private(i)
#endif
for (i = 0; i < count; ++i) {
CRC32_t crc = 0;
unsigned char *p = (unsigned char*)saved_key[i];
while (*p)
crc = jtr_crc32(crc, *p++);
crypt_out[i] = crc;
}
return count;
}
static void *get_binary(char *ciphertext)
{
static uint32_t *out;
if (!out)
out = mem_alloc_tiny(sizeof(uint32_t), MEM_ALIGN_WORD);
sscanf(&ciphertext[5], "%x", out);
return out;
}
static char *get_key(int index)
{
return saved_key[index];
}
static int get_hash_0(int index) { return crypt_out[index] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index] & PH_MASK_6; }
struct fmt_main fmt_pst = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
#ifdef _OPENMP
FMT_OMP | FMT_OMP_BAD |
#endif
FMT_CASE | FMT_TRUNC | FMT_8_BIT | FMT_NOT_EXACT,
{ NULL },
{ FORMAT_TAG },
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
fmt_default_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
fmt_default_set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
AdaptiveRKSolver.h | #include "..//DifferentialSolver.h"
// Runge–Kutta–Fehlberg 45
template<typename Scalar>
class AdaptiveRungeSolver : public DifferentialSolver<Scalar>
{
public:
using DifferentialSolver<Scalar>::timeStep;
using DifferentialSolver<Scalar>::currTime;
using DifferentialSolver<Scalar>::tolerance;
using DifferentialSolver<Scalar>::stepError;
using DifferentialSolver<Scalar>::predictedStep;
virtual void SetSystem(DifferentialSystem<Scalar> *system) override
{
this->system = system;
initialCoords = (system->GetHierarchyLevelsCount() > 1) ? new Scalar[system->GetMaxDimentionsCount()] : nullptr;
oldCoords = (system->GetHierarchyLevelsCount() > 1) ? new Scalar[system->GetMaxDimentionsCount()] : nullptr;
currCoords = new Scalar[system->GetMaxDimentionsCount()];
nextCoords1 = new Scalar[system->GetMaxDimentionsCount()];
nextCoords2 = new Scalar[system->GetMaxDimentionsCount()];
probeCoords = new Scalar[system->GetMaxDimentionsCount()];
k1 = new Scalar[system->GetMaxDimentionsCount()];
k2 = new Scalar[system->GetMaxDimentionsCount()];
k3 = new Scalar[system->GetMaxDimentionsCount()];
k4 = new Scalar[system->GetMaxDimentionsCount()];
k5 = new Scalar[system->GetMaxDimentionsCount()];
k6 = new Scalar[system->GetMaxDimentionsCount()];
}
~AdaptiveRungeSolver()
{
if (system->GetHierarchyLevelsCount() > 1)
{
delete[] initialCoords;
delete[] oldCoords;
}
delete [] currCoords;
delete [] nextCoords1;
delete [] nextCoords2;
delete [] probeCoords;
delete [] k1;
delete [] k2;
delete [] k3;
delete [] k4;
delete [] k5;
delete [] k6;
}
virtual int GetPhasesCount() const override
{
return 6;
}
void InitStep(Scalar timeStep, Scalar tolerance, bool updateInitialCoords) override
{
DifferentialSolver<Scalar>::InitStep(timeStep, tolerance, updateInitialCoords);
if (updateInitialCoords)
{
system->GetCurrCoords(currTime, system->GetHierarchyLevelsCount() > 1 ? initialCoords : currCoords);
}
}
void InitStep(const SolverState& solverState) override
{
if (system->GetHierarchyLevelsCount() > 1)
{
system->GetCurrCoords(currTime, currCoords, oldCoords, solverState);
}
}
bool AdvancePhase(const SolverState& solverState) override
{
Scalar currStep = timeStep * (1 << solverState.hierarchyLevel);
switch (solverState.phaseIndex)
{
case 0:
{
system->GetCurrDerivatives(k1, solverState);
#pragma omp parallel for
for(int coordIndex = 0; coordIndex < system->GetDimentionsCount(solverState); coordIndex++)
{
probeCoords[coordIndex] = currCoords[coordIndex] + Scalar(0.25) * k1[coordIndex] * currStep;
}
system->SetCurrCoords(currTime + currStep * Scalar(0.25), probeCoords, solverState);
} break;
case 1:
{
system->GetCurrDerivatives(k2, solverState);
#pragma omp parallel for
for(int coordIndex = 0; coordIndex < system->GetDimentionsCount(solverState); coordIndex++)
{
probeCoords[coordIndex] = currCoords[coordIndex]
+ Scalar(3.0 / 32.0) * k1[coordIndex] * currStep
+ Scalar(9.0 / 32.0) * k2[coordIndex] * currStep;
}
system->SetCurrCoords(currTime + currStep * Scalar(3.0 / 8.0), probeCoords, solverState);
} break;
case 2:
{
system->GetCurrDerivatives(k3, solverState);
#pragma omp parallel for
for (int coordIndex = 0; coordIndex < system->GetDimentionsCount(solverState); coordIndex++)
{
probeCoords[coordIndex] = currCoords[coordIndex]
+ Scalar(1932.0 / 2197.0) * k1[coordIndex] * currStep
- Scalar(7200.0 / 2197.0) * k2[coordIndex] * currStep
+ Scalar(7296.0 / 2197.0) * k3[coordIndex] * currStep;
}
system->SetCurrCoords(currTime + currStep * Scalar(12.0 / 13.0), probeCoords, solverState);
} break;
case 3:
{
system->GetCurrDerivatives(k4, solverState);
#pragma omp parallel for
for(int coordIndex = 0; coordIndex < system->GetDimentionsCount(solverState); coordIndex++)
{
probeCoords[coordIndex] = currCoords[coordIndex]
+ Scalar(439.0 / 216.0) * k1[coordIndex] * currStep
- Scalar(8.0) * k2[coordIndex] * currStep
+ Scalar(3680.0 / 513.0) * k3[coordIndex] * currStep
- Scalar(845.0 / 4104.0) * k4[coordIndex] * currStep;
}
system->SetCurrCoords(currTime + currStep, probeCoords, solverState);
} break;
case 4:
{
system->GetCurrDerivatives(k5, solverState);
#pragma omp parallel for
for(int coordIndex = 0; coordIndex < system->GetDimentionsCount(solverState); coordIndex++)
{
probeCoords[coordIndex] = currCoords[coordIndex]
- Scalar(8.0 / 27.0) * k1[coordIndex] * currStep
+ Scalar(2.0) * k2[coordIndex] * currStep
- Scalar(3544.0 / 2565.0) * k3[coordIndex] * currStep
+ Scalar(1859.0 / 4104.0) * k4[coordIndex] * currStep
- Scalar(11.0 / 40.0) * k5[coordIndex] * currStep;
}
system->SetCurrCoords(currTime + currStep * Scalar(0.5), probeCoords, solverState);
} break;
case 5:
{
system->GetCurrDerivatives(k6, solverState);
#pragma omp parallel for
for(int coordIndex = 0; coordIndex < system->GetDimentionsCount(solverState); coordIndex++)
{
nextCoords1[coordIndex] = currCoords[coordIndex]
+ Scalar(25.0 / 216.0) * k1[coordIndex] * currStep
+ Scalar(1408.0 / 2565.0) * k3[coordIndex] * currStep
+ Scalar(2197.0 / 4104.0) * k4[coordIndex] * currStep
- Scalar(1.0 / 5.0) * k5[coordIndex] * currStep;
nextCoords2[coordIndex] = currCoords[coordIndex]
+ Scalar(16.0 / 135.0) * k1[coordIndex] * currStep
+ Scalar(6656.0 / 12825.0) * k3[coordIndex] * currStep
+ Scalar(28561.0 / 56430.0)* k4[coordIndex] * currStep
- Scalar(9.0 / 50.0) * k5[coordIndex] * currStep
+ Scalar(2.0 / 55.0) * k6[coordIndex] * currStep;
}
stepError = (system->GetErrorValue(currTime, nextCoords1, nextCoords2, solverState) / tolerance) / currStep;
predictedStep = timeStep * Scalar(pow(Scalar(1.0) / stepError, Scalar(1.0 / 4.0)));
} break;
}
return true;
}
void AdvanceStep(const SolverState& solverState) override
{
if (solverState.IsPreInitial())
{
currTime += timeStep;
}
if (system->GetHierarchyLevelsCount() > 1)
{
system->SetCurrCoords(currTime, nextCoords2, oldCoords, solverState);
} else
{
system->SetCurrCoords(currTime, nextCoords2);
}
}
void RevertStep(Scalar currTime) override
{
this->currTime = currTime;
system->SetCurrCoords(currTime, system->GetHierarchyLevelsCount() > 1 ? initialCoords : currCoords);
}
Scalar GetLastStepError() const override
{
return stepError;
}
Scalar GetTimeStepPrediction() const override
{
return predictedStep;
}
private:
Scalar* initialCoords;
Scalar* oldCoords;
Scalar *currCoords;
Scalar *nextCoords1;
Scalar *nextCoords2;
Scalar *probeCoords;
Scalar *k1;
Scalar *k2;
Scalar *k3;
Scalar *k4;
Scalar *k5;
Scalar *k6;
DifferentialSystem<Scalar> *system;
};
|
GB_bitmap_masker_template.c | //------------------------------------------------------------------------------
// GB_bitmap_masker_template: phase2 for R = masker (C, M, Z), R is bitmap
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// Computes C<M>=Z or C<!M>=Z, returning the result in R, which is bitmap.
// The input matrix C is not modified. Effectively, this computes R=C and then
// R<M>=Z or R<!M>=Z. If the C_replace descriptor is enabled, then C has
// already been cleared, and is an empty (but non-NULL) matrix.
// phase2: computes R in a single pass
// C is sparse or hypersparse. Z is bitmap or full. R is bitmap.
// M has any sparsity structure.
// ------------------------------------------
// C <!M> = Z R
// ------------------------------------------
// sparse sparse bitmap bitmap
// sparse sparse full bitmap
// sparse bitmap bitmap bitmap
// sparse bitmap full bitmap
// sparse full bitmap bitmap
// sparse full full bitmap
// ------------------------------------------
// C <M> = Z R
// ------------------------------------------
// sparse bitmap bitmap bitmap
// sparse bitmap full bitmap
// sparse full bitmap bitmap
// sparse full full bitmap
// FUTURE:: add special cases for C==Z, C==M, and Z==M aliases
{
int64_t p, rnvals = 0 ;
ASSERT (R_sparsity == GxB_BITMAP) ;
ASSERT (C_is_sparse || C_is_hyper) ;
ASSERT (Z_is_bitmap || Z_is_full) ;
//--------------------------------------------------------------------------
// scatter C into the R bitmap
//--------------------------------------------------------------------------
ASSERT_MATRIX_OK (C, "C input to R_bitmap_masker", GB0) ;
GB_SLICE_MATRIX (C, 8, chunk) ;
#pragma omp parallel for num_threads(C_nthreads) schedule(dynamic,1) \
reduction(+:rnvals)
for (taskid = 0 ; taskid < C_ntasks ; taskid++)
{
int64_t kfirst = kfirst_Cslice [taskid] ;
int64_t klast = klast_Cslice [taskid] ;
for (int64_t k = kfirst ; k <= klast ; k++)
{
// find the part of C(:,k) for this task
int64_t j = GBH (Ch, k) ;
int64_t pC_start, pC_end ;
GB_get_pA (&pC_start, &pC_end, taskid, k, kfirst,
klast, pstart_Cslice, Cp, vlen) ;
int64_t pR_start = j * vlen ;
// traverse over C(:,j), the kth vector of C
for (int64_t pC = pC_start ; pC < pC_end ; pC++)
{
// R(i,j) = C(i,j)
int64_t i = Ci [pC] ;
int64_t pR = pR_start + i ;
Rb [pR] = 1 ;
rnvals++ ;
#ifndef GB_ISO_MASKER
memcpy (Rx + (pR)*rsize, Cx + (C_iso? 0:(pC)*rsize), rsize) ;
#endif
}
}
}
R->nvals = rnvals ;
ASSERT_MATRIX_OK (R, "R with C scattered", GB0) ;
//--------------------------------------------------------------------------
// R<M>=Z or R<!M>=Z
//--------------------------------------------------------------------------
if (M_is_sparse || M_is_hyper)
{
//----------------------------------------------------------------------
// Method05: M is sparse or hypersparse, Z bitmap/full, R bitmap
//----------------------------------------------------------------------
// ------------------------------------------
// C <!M> = Z R
// ------------------------------------------
// sparse sparse bitmap bitmap
// sparse sparse full bitmap
ASSERT (Mask_comp) ;
//----------------------------------------------------------------------
// scatter M into the R bitmap
//----------------------------------------------------------------------
GB_SLICE_MATRIX (M, 8, chunk) ;
#pragma omp parallel for num_threads(M_nthreads) schedule(dynamic,1)
for (taskid = 0 ; taskid < M_ntasks ; taskid++)
{
int64_t kfirst = kfirst_Mslice [taskid] ;
int64_t klast = klast_Mslice [taskid] ;
for (int64_t k = kfirst ; k <= klast ; k++)
{
// find the part of M(:,k) for this task
int64_t j = GBH (Mh, k) ;
int64_t pM_start, pM_end ;
GB_get_pA (&pM_start, &pM_end, taskid, k, kfirst,
klast, pstart_Mslice, Mp, vlen) ;
int64_t pR_start = j * vlen ;
// traverse over M(:,j), the kth vector of M
for (int64_t pM = pM_start ; pM < pM_end ; pM++)
{
// mark R(i,j) if M(i,j) is true
bool mij = GB_mcast (Mx, pM, msize) ;
if (mij)
{
int64_t i = Mi [pM] ;
int64_t p = pR_start + i ;
Rb [p] += 2 ;
}
}
}
}
//----------------------------------------------------------------------
// R<!M>=Z, using M scattered into R
//----------------------------------------------------------------------
// Rb is marked as follows:
// 0: R(i,j) is not present, and M(i,j) is false
// 1: R(i,j) is present, and M(i,j) is false
// 2: R(i,j) is not present, and M(i,j) is true
// 3: R(i,j) is present, and M(i,j) is true
// M is complemented, but shown uncomplemented in the table below since
// that is how it is scattered into R.
// Rb R(i,j) M(i,j) Z(i,j) modification to R(i,j)
// 0 - 0 zij R(i,j) = Z(i,j), new value, rnvals++
// 0 - 0 - do nothing
// 1 rij 0 zij R(i,j) = Z(i,j), overwrite
// 1 rij 0 - delete R(i,j), rnvals--
// 2 - 1 zij do nothing, set Rb to 0
// 2 - 1 - do nothing, set Rb to 0
// 3 rij 1 zij keep R(i,j), set Rb to 1
// 3 rij 1 - keep R(i,j), set Rb to 1
#pragma omp parallel for num_threads(R_nthreads) schedule(static) \
reduction(+:rnvals)
for (p = 0 ; p < rnz ; p++)
{
int8_t r = Rb [p] ;
int8_t z = GBB (Zb, p) ;
switch (r)
{
case 0 : // R(i,j) not present, M(i,j) false
if (z)
{
// R(i,j) = Z(i,j), insert new value
#ifndef GB_ISO_MASKER
memcpy (Rx +(p)*rsize, Zx +(Z_iso? 0:(p)*rsize), rsize);
#endif
Rb [p] = 1 ;
rnvals++ ;
}
break ;
case 1 : // R(i,j) present, M(i,j) false
if (z)
{
// R(i,j) = Z(i,j), update prior value
#ifndef GB_ISO_MASKER
memcpy (Rx +(p)*rsize, Zx +(Z_iso? 0:(p)*rsize), rsize);
#endif
}
else
{
// delete R(i,j)
Rb [p] = 0 ;
rnvals-- ;
}
break ;
case 2 : // R(i,j) not present, M(i,j) true
Rb [p] = 0 ;
break ;
case 3 : // R(i,j) present, M(i,j) true
Rb [p] = 1 ;
break ;
default: ;
}
}
}
else
{
//----------------------------------------------------------------------
// Method06: M and Z are bitmap or full, R is bitmap
//----------------------------------------------------------------------
// ------------------------------------------
// C <!M> = Z R
// ------------------------------------------
// sparse bitmap bitmap bitmap
// sparse bitmap full bitmap
// sparse full bitmap bitmap
// sparse full full bitmap
// ------------------------------------------
// C <M> = Z R
// ------------------------------------------
// sparse bitmap bitmap bitmap
// sparse bitmap full bitmap
// sparse full bitmap bitmap
// sparse full full bitmap
// Rb R(i,j) M(i,j) Z(i,j) modification to R(i,j)
// 0 - 0 zij do nothing
// 0 - 0 - do nothing
// 1 rij 0 zij do nothing
// 1 rij 0 - do nothing
// 0 - 1 zij R(i,j) = Z(i,j), rnvals++
// 0 - 1 - do nothing
// 1 rij 1 zij R(i,j) = Z(i,j), no change to rnvals
// 1 rij 1 - delete, rnvals--
#pragma omp parallel for num_threads(R_nthreads) schedule(static) \
reduction(+:rnvals)
for (p = 0 ; p < rnz ; p++)
{
bool mij = GBB (Mb, p) && GB_mcast (Mx, p, msize) ;
if (Mask_comp) mij = !mij ;
if (mij)
{
int8_t z = GBB (Zb, p) ;
int8_t r = Rb [p] ;
if (r)
{
if (z)
{
// R(i,j) = Z(i,j), update, no change to rnvals
#ifndef GB_ISO_MASKER
memcpy (Rx +(p)*rsize, Zx +(Z_iso? 0:(p)*rsize), rsize);
#endif
}
else
{
// delete R(i,j)
Rb [p] = 0 ;
rnvals-- ;
}
}
else if (z)
{
// R(i,j) = Z(i,j), new entry
#ifndef GB_ISO_MASKER
memcpy (Rx +(p)*rsize, Zx +(Z_iso? 0:(p)*rsize), rsize) ;
#endif
Rb [p] = 1 ;
rnvals++ ;
}
}
}
}
R->nvals = rnvals ;
}
|
main.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
//#define DEBUG
#ifdef DEBUG
#define dprintf(_f, ...) printf((_f), ##__VA_ARGS__)
#else
#define dprintf(_f, ...) {}
#endif
typedef struct {
int w, h;
int *x;
} image_t;
void read_image(image_t *im) {
int h = im->h, w = im->w;
im->x = calloc(h*w, sizeof(int));
for (int i = 0; i < h*w; i++) {
scanf("%d", im->x + i);
}
}
void print_image(image_t *im) {
int h = im->h, w = im->w;
dprintf("h=%d, w=%d\n", h, w);
for (int i = 0; i < h; i++) {
for (int j = 0; j < w; j++) {
dprintf("%d ", im->x[i*w+j]);
}
dprintf("\n");
}
}
void free_image(image_t *im) {
if (im->x != 0) {
free(im->x);
im->x = 0;
}
}
long long diff_image(image_t *ref, image_t *test, int oh, int ow) {
int wr = ref->w;
int ht = test->h, wt = test->w;
long long diff = 0;
//#pragma omp parallel for reduction(+ : diff)
for (int i = 0; i < ht; i++) {
for(int j = 0; j < wt; j++) {
dprintf("r[%d][%d]=%d, t[%d][%d]=%d\n",
i+oh, j+ow, ref->x[(i+oh)*wr+(j+ow)], i, j, test->x[i*wt+j]);
long long v = ref->x[(i+oh)*wr+(j+ow)] - test->x[i*wt+j];
diff += v*v;
}
}
return diff;
}
int main(void) {
#ifdef DEBUG
omp_set_num_threads(1);
#endif
image_t a, b;
int c = 0;
while(scanf("%d %d %d %d", &a.h, &a.w, &b.h, &b.w) != EOF) {
read_image(&a);
read_image(&b);
dprintf("[a]\n");
print_image(&a);
dprintf("[b]\n");
print_image(&b);
long long diff = -1, x, y;
#pragma omp parallel for
for (int oh = 0; oh <= a.h-b.h; oh++) {
for (int ow = 0; ow <= a.w-b.w; ow++) {
long long t = diff_image(&a, &b, oh, ow);
dprintf("oh=%d, ow=%d, diff=%lld\n", oh, ow, t);
#pragma omp critical
{
if ((diff < 0) ||
(t < diff) ||
((t == diff) && ((oh < y) || ((oh == y) && (ow < x))))
) {
dprintf("update!\n");
diff = t;
x = ow, y = oh;
}
}
}
}
dprintf("[result]\n");
printf("%d %d\n", y+1, x+1);
free_image(&a);
free_image(&b);
dprintf("\n");
}
return 0;
}
|
LBL.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <sys/time.h>
#include <omp.h>
#include <immintrin.h>
int binary_search_right_boundary_kernel(const int *row_pointer,
const int key_input,
const int size)
{
int start = 0;
int stop = size - 1;
int median;
int key_median;
while (stop >= start)
{
median = (stop + start) / 2;
key_median = row_pointer[median];
if (key_input >= key_median)
start = median + 1;
else
stop = median - 1;
}
return start;
}
void Dot_Product_Avx2_dLBL(int len,
const int *indx,
const double *Val,
const double *X,
double *res)
{
const int *colIndPtr = indx;
const double *matValPtr = (double *) Val;
const double *x = (double *) X;
int j;
double result = 0.0;
__m256d vec_y;
vec_y = _mm256_setzero_pd();
int nnzThisLine = len;
int k_iter = nnzThisLine / 4;
int k_rem = nnzThisLine % 4;
//Loop in multiples of 4 non-zeroes
for (j = 0; j < k_iter; j++) {
vec_y = _mm256_fmadd_pd(
*((__m256d_u *) (matValPtr)),
_mm256_set_pd(x[*(colIndPtr + 3)],
x[*(colIndPtr + 2)],
x[*(colIndPtr + 1)],
x[*(colIndPtr)]),
vec_y);
matValPtr += 4;
colIndPtr += 4;
}
// Horizontal addition
if (k_iter) {
// sum[0] += sum[1] ; sum[2] += sum[3]
vec_y = _mm256_hadd_pd(vec_y, vec_y);
// Cast avx_sum to 128 bit to obtain sum[0] and sum[1]
__m128d sum_lo = _mm256_castpd256_pd128(vec_y);
// Extract 128 bits to obtain sum[2] and sum[3]
__m128d sum_hi = _mm256_extractf128_pd(vec_y, 1);
// Add remaining two sums
__m128d sse_sum = _mm_add_pd(sum_lo, sum_hi);
// Store result
result = sse_sum[0];
}
//Remainder loop for nnzThisLine%4
for (j = 0; j < k_rem; j++)
{
result += *matValPtr++ * x[*colIndPtr++];
}
*(double *) res = result;
}
//int main(int argc, char ** argv)
int spmvLBL(int m,int n,int nnzR,int* RowPtr,int* ColIdx,double*Val,char* filename,double* GFlops_LBL,double* Time_LBL,double* time_pre,double* LBL_error)
{
//char *filename = argv[1];
//printf ("filename = %s\n", filename);
//read matrix
//int m, n, nnzR, isSymmetric;
//mmio_info(&m, &n, &nnzR, &isSymmetric, filename);
//int *RowPtr = (int *)malloc((m+1) * sizeof(int));
//int *ColIdx = (int *)malloc(nnzR * sizeof(int));
//double *Val = (double *)malloc(nnzR * sizeof(double));
//mmio_data(RowPtr, ColIdx, Val, filename);
for (int i = 0; i < nnzR; i++)
Val[i] = 1;
//create X, Y,Y_golden
double *X = (double *)malloc(sizeof(double) * (n+1));
double *Y = (double *)malloc(sizeof(double) * (m+1));
double *Y_golden = (double *)malloc(sizeof(double) * (m+1));
memset (X, 0, sizeof(double) * (n+1));
memset (Y, 0, sizeof(double) * (m+1));
memset (Y_golden, 0, sizeof(double) * (m+1));
for (int i = 0; i < n; i++)
X[i] = 1;
for (int i = 0; i < m; i++)
for(int j = RowPtr[i]; j < RowPtr[i+1]; j++)
Y_golden[i] += Val[j] * X[ColIdx[j]];
//int nthreads = atoi(argv[2]);
//omp_set_num_threads(nthreads);
int nthreads = omp_get_max_threads();
//int iter = atoi(argv[3]);
//printf("#iter is %i \n", iter);
int iter = 500;
struct timeval t1, t2;
gettimeofday(&t1, NULL);
// find balanced points
int *csrSplitter = (int *)malloc((nthreads+1) * sizeof(int));
//int *csrSplitter_normal = (int *)malloc((nthreads+1) * sizeof(int));
int stridennz = ceil((double)nnzR/(double)nthreads);
//#pragma omp parallel for
for (int tid = 0; tid <= nthreads; tid++)
{
// compute partition boundaries by partition of size stride
int boundary = tid * stridennz;
// clamp partition boundaries to [0, nnzR]
boundary = boundary > nnzR ? nnzR : boundary;
// binary search
csrSplitter[tid] = binary_search_right_boundary_kernel(RowPtr, boundary, m + 1) - 1;
}
csrSplitter[0] = 0;
//#pragma omp parallel for
for (int tid = 1; tid <= nthreads; tid++)
{
// compute partition boundaries by partition of size stride
int boundary = tid * stridennz;
// clamp partition boundaries to [0, nnzR]
boundary = boundary > nnzR ? nnzR : boundary;
// binary search
int spl = binary_search_right_boundary_kernel(RowPtr, boundary, m + 1) - 1;
if(spl==csrSplitter[tid-1])
{
spl = m>(spl+1)? (spl+1):m;
csrSplitter[tid] = spl;
}
else
{
csrSplitter[tid] = spl;
}
}
gettimeofday(&t2, NULL);
double time_balanced_pre = ((t2.tv_sec - t1.tv_sec) * 1000.0 + (t2.tv_usec - t1.tv_usec) / 1000.0);
//printf("time_balanced_pre = %f\n", time_balanced_pre);
time_pre[1] = time_balanced_pre;
/*
//-----------------------------------parallel_omp_balanced-------------------------------------
gettimeofday(&t1, NULL);
int currentiter = 0;
for (currentiter = 0; currentiter < iter; currentiter++)
{
#pragma omp parallel for
for (int tid = 0; tid < nthreads; tid++)
{
for (int u = csrSplitter[tid]; u < csrSplitter[tid+1]; u++)
{
double sum = 0;
for (int j = RowPtr[u]; j < RowPtr[u + 1]; j++)
{
sum += Val[j] * X[ColIdx[j]];
}
Y[u] = sum;
}
}
}
gettimeofday(&t2, NULL);
double time_balanced = ((t2.tv_sec - t1.tv_sec) * 1000.0 + (t2.tv_usec - t1.tv_usec) / 1000.0) / iter;
double GFlops_balanced = 2 * nnzR / time_balanced / pow(10,6);
int errorcount_balanced = 0;
for (int i = 0; i < m; i++)
if (Y[i] != Y_golden[i])
errorcount_balanced++;
//printf("time_balanced = %f\n", time_balanced);
//printf("errorcount_balanced = %i\n", errorcount_balanced);
//printf("GFlops_balanced = %f\n", GFlops_balanced);
GFlops_LBL[0] = GFlops_balanced;
Time_LBL[0] = time_balanced;
LBL_error[0] = errorcount_balanced;
//------------------------------------------------------------------------
*/
//------------------------------------parallel_omp_balanced_avx2------------------------------------
int currentiter = 0;
gettimeofday(&t1, NULL);
for (currentiter = 0; currentiter < iter; currentiter++)
{
#pragma omp parallel for
for (int tid = 0; tid < nthreads; tid++)
{
for (int u = csrSplitter[tid]; u < csrSplitter[tid+1]; u++)
{
Dot_Product_Avx2_dLBL(RowPtr[u + 1] - RowPtr[u],
ColIdx + RowPtr[u],
Val,
X,
Y + u);
}
}
}
gettimeofday(&t2, NULL);
double time_balanced_avx = ((t2.tv_sec - t1.tv_sec) * 1000.0 + (t2.tv_usec - t1.tv_usec) / 1000.0) / iter;
double GFlops_balanced_avx = 2 * nnzR / time_balanced_avx / pow(10,6);
int errorcount_balanced_avx = 0;
for (int i = 0; i < m; i++)
if (Y[i] != Y_golden[i])
errorcount_balanced_avx++;
//printf("time_balanced_avx = %f\n", time_balanced_avx);
//printf("errorcount_balanced_avx = %i\n", errorcount_balanced_avx);
//printf("GFlops_balanced_avx = %f\n", GFlops_balanced_avx);
GFlops_LBL[1] = GFlops_balanced_avx;
Time_LBL[1] = time_balanced_avx;
LBL_error[1] = errorcount_balanced_avx;
//------------------------------------------------------------------------
return 0;
}
|
noncommutative_broadcast_binary_operation.h | /* Copyright 2021 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef __NUMPY_NONCOMMUTATIVE_BROADCAST_BINARY_OPERATION_H__
#define __NUMPY_NONCOMMUTATIVE_BROADCAST_BINARY_OPERATION_H__
#include "point_task.h"
namespace legate {
namespace numpy {
#if defined(LEGATE_USE_CUDA) && defined(__CUDACC__)
template <int DIM, typename BinaryFunction, typename Args>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
gpu_noncommutative_broadcast_binary_op(const Args args, const bool dense)
{
const size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= args.volume) return;
BinaryFunction func;
if (args.scalar_on_rhs) {
if (dense) {
args.outptr[idx] = func(args.inptr[idx], args.scalar);
} else {
const Legion::Point<DIM> point = args.pitches.unflatten(idx, args.rect.lo);
args.out[point] = func(args.in[point], args.scalar);
}
} else {
if (dense) {
args.outptr[idx] = func(args.scalar, args.inptr[idx]);
} else {
const Legion::Point<DIM> point = args.pitches.unflatten(idx, args.rect.lo);
args.out[point] = func(args.scalar, args.in[point]);
}
}
}
#endif
// Base class for all Legate's noncommutative binary operation tasks
template <class Derived, class BinaryFunction>
class NoncommutativeBroadcastBinaryOperationTask : public PointTask<Derived> {
private:
using first_argument_type = typename BinaryFunction::first_argument_type;
using second_argument_type = typename BinaryFunction::second_argument_type;
using result_type = std::result_of_t<BinaryFunction(first_argument_type, second_argument_type)>;
public:
static_assert(std::is_same<first_argument_type, second_argument_type>::value,
"NoncommutativeBroadcastBinaryOperationTask requires first_argument_type and "
"second_argument_type to be the same type.");
static const int TASK_ID = task_id<BinaryFunction::op_code,
NUMPY_BROADCAST_VARIANT_OFFSET,
result_type,
first_argument_type,
second_argument_type>;
// out_region = in_region1 op scalar
static const int REGIONS = 2;
template <int N>
struct DeserializedArgs {
Legion::Rect<N> rect;
AccessorWO<result_type, N> out;
AccessorRO<first_argument_type, N> in;
Pitches<N - 1> pitches;
size_t volume;
result_type* outptr;
const first_argument_type* inptr;
second_argument_type scalar;
bool scalar_on_rhs;
bool deserialize(LegateDeserializer& derez,
const Legion::Task* task,
const std::vector<Legion::PhysicalRegion>& regions)
{
rect = NumPyProjectionFunctor::unpack_shape<N>(task, derez);
out = derez.unpack_accessor_WO<result_type, N>(regions[0], rect);
in = derez.unpack_accessor_RO<first_argument_type, N>(regions[1], rect);
scalar = task->futures[0].get_result<second_argument_type>(true /*silence warnings*/);
scalar_on_rhs = derez.unpack_bool();
volume = pitches.flatten(rect);
#ifndef LEGION_BOUNDS_CHECKS
// Check to see if this is dense or not
return out.accessor.is_dense_row_major(rect) && in.accessor.is_dense_row_major(rect) &&
(outptr = out.ptr(rect)) && (inptr = in.ptr(rect));
#else
// No dense execution if we're doing bounds checks
return false;
#endif
}
};
template <int DIM>
static void dispatch_cpu(const Legion::Task* task,
const std::vector<Legion::PhysicalRegion>& regions,
LegateDeserializer& derez)
{
DeserializedArgs<DIM> args;
const bool dense = args.deserialize(derez, task, regions);
if (args.volume == 0) return;
BinaryFunction func;
if (args.scalar_on_rhs) {
if (dense) {
for (size_t idx = 0; idx < args.volume; ++idx)
args.outptr[idx] = func(args.inptr[idx], args.scalar);
} else {
const Scalar<second_argument_type, DIM> scalar(args.scalar);
CPULoop<DIM>::binary_loop(func, args.out, args.in, scalar, args.rect);
}
} else {
if (dense) {
for (size_t idx = 0; idx < args.volume; ++idx)
args.outptr[idx] = func(args.scalar, args.inptr[idx]);
} else {
const Scalar<second_argument_type, DIM> scalar(args.scalar);
CPULoop<DIM>::binary_loop(func, args.out, scalar, args.in, args.rect);
}
}
}
#ifdef LEGATE_USE_OPENMP
template <int DIM>
static void dispatch_omp(const Legion::Task* task,
const std::vector<Legion::PhysicalRegion>& regions,
LegateDeserializer& derez)
{
DeserializedArgs<DIM> args;
const bool dense = args.deserialize(derez, task, regions);
if (args.volume == 0) return;
BinaryFunction func;
if (args.scalar_on_rhs) {
if (dense) {
#pragma omp parallel for schedule(static)
for (size_t idx = 0; idx < args.volume; ++idx)
args.outptr[idx] = func(args.inptr[idx], args.scalar);
} else {
const Scalar<second_argument_type, DIM> scalar(args.scalar);
OMPLoop<DIM>::binary_loop(func, args.out, args.in, scalar, args.rect);
}
} else {
if (dense) {
#pragma omp parallel for schedule(static)
for (size_t idx = 0; idx < args.volume; ++idx)
args.outptr[idx] = func(args.scalar, args.inptr[idx]);
} else {
const Scalar<second_argument_type, DIM> scalar(args.scalar);
OMPLoop<DIM>::binary_loop(func, args.out, scalar, args.in, args.rect);
}
}
}
#endif
#if defined(LEGATE_USE_CUDA) && defined(__CUDACC__)
template <int DIM>
static void dispatch_gpu(const Legion::Task* task,
const std::vector<Legion::PhysicalRegion>& regions,
LegateDeserializer& derez)
{
DeserializedArgs<DIM> args;
const bool dense = args.deserialize(derez, task, regions);
if (args.volume == 0) return;
const size_t blocks = (args.volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
gpu_noncommutative_broadcast_binary_op<DIM, BinaryFunction, DeserializedArgs<DIM>>
<<<blocks, THREADS_PER_BLOCK>>>(args, dense);
}
#elif defined(LEGATE_USE_CUDA)
template <int DIM>
static void dispatch_gpu(const Legion::Task* task,
const std::vector<Legion::PhysicalRegion>& regions,
LegateDeserializer& derez);
#endif
};
} // namespace numpy
} // namespace legate
#endif // __NUMPY_NONCOMMUTATIVE_BROADCAST_BINARY_OPERATION_H__
|
GB_binop__times_fc32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__times_fc32)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__times_fc32)
// A.*B function (eWiseMult): GB (_AemultB_03__times_fc32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__times_fc32)
// A*D function (colscale): GB (_AxD__times_fc32)
// D*A function (rowscale): GB (_DxB__times_fc32)
// C+=B function (dense accum): GB (_Cdense_accumB__times_fc32)
// C+=b function (dense accum): GB (_Cdense_accumb__times_fc32)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__times_fc32)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__times_fc32)
// C=scalar+B GB (_bind1st__times_fc32)
// C=scalar+B' GB (_bind1st_tran__times_fc32)
// C=A+scalar GB (_bind2nd__times_fc32)
// C=A'+scalar GB (_bind2nd_tran__times_fc32)
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// B,b type: GxB_FC32_t
// BinaryOp: cij = GB_FC32_mul (aij, bij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_BTYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
GxB_FC32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
GxB_FC32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = GB_FC32_mul (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_TIMES || GxB_NO_FC32 || GxB_NO_TIMES_FC32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__times_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__times_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__times_fc32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__times_fc32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type GxB_FC32_t
GxB_FC32_t bwork = (*((GxB_FC32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__times_fc32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__times_fc32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__times_fc32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__times_fc32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__times_fc32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__times_fc32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__times_fc32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__times_fc32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ;
GxB_FC32_t x = (*((GxB_FC32_t *) x_input)) ;
GxB_FC32_t *Bx = (GxB_FC32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
GxB_FC32_t bij = Bx [p] ;
Cx [p] = GB_FC32_mul (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__times_fc32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ;
GxB_FC32_t *Ax = (GxB_FC32_t *) Ax_input ;
GxB_FC32_t y = (*((GxB_FC32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
GxB_FC32_t aij = Ax [p] ;
Cx [p] = GB_FC32_mul (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC32_t aij = Ax [pA] ; \
Cx [pC] = GB_FC32_mul (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__times_fc32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t x = (*((const GxB_FC32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC32_t aij = Ax [pA] ; \
Cx [pC] = GB_FC32_mul (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__times_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t y = (*((const GxB_FC32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
openbsdsoftraid_fmt_plug.c | /*
* Copyright (c) 2014 Thiébaud Weksteen <thiebaud at weksteen dot fr>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Fixed BE issues, and build problems (Fall 2014), JimF.
*/
#include "arch.h"
#if FMT_EXTERNS_H
extern struct fmt_main fmt_openbsd_softraid;
#elif FMT_REGISTERS_H
john_register_one(&fmt_openbsd_softraid);
#else
#include "aes.h"
#include "hmac_sha.h"
#include "sha.h"
#include "common.h"
#include "formats.h"
#include "pbkdf2_hmac_sha1.h"
#ifdef _OPENMP
static int omp_t = 1;
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 1
#endif
#endif
#include "memdbg.h"
#define PLAINTEXT_LENGTH 125
#define SALT_SIZE sizeof(struct custom_salt)
#define SALT_ALIGN 4
#ifdef SIMD_COEF_32
#define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1
#define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
#define OPENBSD_SOFTRAID_SALTLENGTH 128
#define OPENBSD_SOFTRAID_KEYS 32
#define OPENBSD_SOFTRAID_KEYLENGTH 64 /* AES-XTS-256 keys are 512 bits long */
#define OPENBSD_SOFTRAID_MACLENGTH 20
#define BINARY_SIZE OPENBSD_SOFTRAID_MACLENGTH
#define BINARY_ALIGN sizeof(ARCH_WORD_32)
static char (*key_buffer)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)];
static struct custom_salt {
unsigned int num_iterations;
unsigned char salt[OPENBSD_SOFTRAID_SALTLENGTH];
unsigned char masked_keys[OPENBSD_SOFTRAID_KEYLENGTH * OPENBSD_SOFTRAID_KEYS];
} *cur_salt;
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
key_buffer = mem_calloc(sizeof(*key_buffer), self->params.max_keys_per_crypt);
crypt_out = mem_calloc(sizeof(*crypt_out), self->params.max_keys_per_crypt);
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(key_buffer);
}
static int valid(char* ciphertext, struct fmt_main *self)
{
char *ctcopy;
char *keeptr;
char *p;
if (strncmp(ciphertext, "$openbsd-softraid$", 18) != 0)
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += 18;
if ((p = strtokm(ctcopy, "$")) == NULL)
goto err;
if (!isdec(p)) /* iterations */
goto err;
if ((p = strtokm(NULL, "$")) == NULL)
goto err;
if (strlen(p) != 2 * 128) /* salt */
goto err;
if (!ishexlc(p))
goto err;
if ((p = strtokm(NULL, "$")) == NULL)
goto err;
if (strlen(p) != 2 * 32 * 64) /* masked keys */
goto err;
if (!ishexlc(p))
goto err;
if ((p = strtokm(NULL, "$")) == NULL)
goto err;
if (strlen(p) != 2 * BINARY_SIZE) /* HMAC-SHA1 */
goto err;
if (!ishexlc(p))
goto err;
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
static void* get_salt(char *ciphertext)
{
static struct custom_salt cs;
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
int i;
char *p;
ctcopy += 18;
p = strtokm(ctcopy, "$"); /* iterations */
cs.num_iterations = atoi(p);
p = strtokm(NULL, "$"); /* salt */
for (i = 0; i < OPENBSD_SOFTRAID_SALTLENGTH ; i++)
cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "$"); /* masked keys */
for (i = 0; i < OPENBSD_SOFTRAID_KEYLENGTH * OPENBSD_SOFTRAID_KEYS; i++)
cs.masked_keys[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
MEM_FREE(keeptr);
return (void *)&cs;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i;
p = strrchr(ciphertext, '$') + 1;
for (i = 0; i < BINARY_SIZE; i++) {
out[i] = (atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
#endif
{
AES_KEY akey;
unsigned char mask_key[MAX_KEYS_PER_CRYPT][32];
unsigned char unmasked_keys[OPENBSD_SOFTRAID_KEYLENGTH * OPENBSD_SOFTRAID_KEYS];
unsigned char hashed_mask_key[20];
int i, j;
/* derive masking key from password */
#ifdef SSE_GROUP_SZ_SHA1
int lens[SSE_GROUP_SZ_SHA1];
unsigned char *pin[SSE_GROUP_SZ_SHA1], *pout[SSE_GROUP_SZ_SHA1];
for (i = 0; i < SSE_GROUP_SZ_SHA1; ++i) {
lens[i] = strlen(key_buffer[index+i]);
pin[i] = (unsigned char*)key_buffer[index+i];
pout[i] = mask_key[i];
}
pbkdf2_sha1_sse((const unsigned char **)pin, lens,
cur_salt->salt, OPENBSD_SOFTRAID_SALTLENGTH,
cur_salt->num_iterations, (unsigned char**)pout,
32, 0);
#else
pbkdf2_sha1((const unsigned char*)(key_buffer[index]),
strlen(key_buffer[index]),
cur_salt->salt, OPENBSD_SOFTRAID_SALTLENGTH,
cur_salt->num_iterations, mask_key[0],
32, 0);
#endif
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
/* decrypt sector keys */
AES_set_decrypt_key(mask_key[i], 256, &akey);
for(j = 0; j < (OPENBSD_SOFTRAID_KEYLENGTH * OPENBSD_SOFTRAID_KEYS) / 16; j++) {
AES_decrypt(&cur_salt->masked_keys[16*j], &unmasked_keys[16*j], &akey);
}
/* get SHA1 of mask_key */
SHA1(mask_key[i], 32, hashed_mask_key);
hmac_sha1(hashed_mask_key, OPENBSD_SOFTRAID_MACLENGTH,
unmasked_keys, OPENBSD_SOFTRAID_KEYLENGTH * OPENBSD_SOFTRAID_KEYS,
(unsigned char*)crypt_out[index+i], 20);
}
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
for (; index < count; index++)
if (*(ARCH_WORD_32*)binary == *(ARCH_WORD_32*)(crypt_out[index]))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return (*(ARCH_WORD_32*)binary == *(ARCH_WORD_32*)(crypt_out[index]));
}
static int cmp_exact(char *source, int index)
{
void *bin = get_binary(source);
return !memcmp(bin, crypt_out[index], 20);
}
static void jtr_set_key(char* key, int index)
{
strcpy(key_buffer[index], key);
}
static char *get_key(int index)
{
return key_buffer[index];
}
/* report iteration count as tunable cost */
static unsigned int iteration_count(void *salt)
{
return ((struct custom_salt*)salt)->num_iterations;
}
static struct fmt_tests tests_openbsdsoftraid[] = {
// too long of line was causing my Sparc box to fail to compile this code
{"\
$openbsd-softraid$8192$c2891132ca5305d1189a7da94d32de29182abc2f56dc641d685e471935f2646e06b79f1d6c102c2f62f3757a20efb0a110b8ae207f9129f0dc5eea8ab05cc8280e0ba2460faf979dbac9f577c4a083349064364556b7ad15468c17c4d794c3da0ddf5990cc66751a6ded8d534531dd9aa9fce2f43e68d6a7200e135beb55e752$311c42d1d8daf1e47e0150c8d4a455a0567b062970c1838faaedcd3e43795545de64971c7598902a6e2c3fffcf8abe2ef78979164d0c9089fbb931c4c9dac8b86c85eeace11095e38487e41eb7b6094d96c339e86686121fbe1c32dbff3c00706926b22ec3a1329f346c599d132105b5d182a380161504d535f9836bb7286331adce1e47e4e251a0249612a94312bb309a6f4558568467731c1ae8c9b910d27102dca2a72228ffde7bfc60004c8ab33ca2b01aa476c4f42f99a3d1f904e3bbc56270edb314a62e92cf68185ace93731ef4ce08dff3c695c45e35b57ed8ab1552114635eb2ff531437ba5c3a08ebf3e73b6bbb7fe1ad98373da349f09284ae819b6a2f6fc5a10aec347f3c2331abc1d6617e77d68f314fdb683294f3ef351869491c4fb096969924215d711c15e5fce533dc5acaed4a473b14c595bababc178e62ef065770716520ecddc7cbf1cbed1250b7e004ab975bc29780c952087ec382bf6e77447720a10a8c2993262a2b21f8a3f47e35daa5b620573626b474d3e8abf8e73164664b041a18fe35c2a1905fad617bf6e6c380fdeeb680fa89b6c6dc7676ad93fde25076ecb8855d623b45af9a16a62a957d85c4c70896019be1827ad9320a69f18bdfc2674f04babdbfcd679c0ef22f7ab2a18818b9b425e61d8c06196a23babd0aefd5a00f1b297a66d973daae40f4dbd9be60d8953fafbd51f7745e2d04b5c80b63ad1f550cd939490b346d4fe7c1fc266d593bcafac0d8989994e174de6d1ef4ce78b3224ea4e68ccbf998654a067558537be332f5cae4b44c18664428d45b71cde5b53bedddf8a7daf47fce212578b72\
7e420c91de0baa1108683dd5b5534e81f4fe945d27fd9d28934afc8d15d95932952c0be717d4d87bb8255bf658a083c3aed643f7a6cfb56fbcbdab9e0a7348b0a3a91e3d560d1ec96f5769551e64beb54a499f6d6dd37e4361d484fe4f7bac4dc26c8a1a2609592d527b134c8212d71b3578217e0ec1da317c69e7e8c39d2d5b2d4073fa9c618a01a092b61613f6f1e41e6ab43d8ca010f177947aeab2884e9a4dd28453ff5bdadb765680733e7af1463ec1b20b879ae01c9256da0207811f956b3950f6db743a9e34a6d8f0fdfa5c47b4f807f0017c2092d72dc19d111711e796ffc4035da3a4caa6a5301491d0473b0d47cd01b705ff11a10263867013a11c65462c311fa5ac9a2598142779b55f09dbec89ac18049c29e5baf3aa38696a3b92d08b02cb10af5389e06058b3ad8be09b121e4e320520413775b7c6fbb3f2b332e3ac0295a4a4dfb4a56ea1c32bc28c149ffaa3b426f5a17a11afe56426b38966c86734654fe05a611c8f025ee4092656c097bbf59743c31508fa9e80ff86a2ae33d401ec316e65eef251d173e9565ffc1672b8b341174427a851a6a4c42554848c637283d13d4ba5b5414b4e61ade6ec7ef7b77186a81adff381e6a79d3dac2c68bf386f100fef1c354221a2ba3d8a7a10460f637eaa152ab79027ab94e5965660de3ed66dac4a0f8e75b85d768e51c8e82a26cb81249ca8d249d8c5cdc8bd55289679d3915a397d31863334df18e2fe3ef9069b064c4ef6b418e5388817040ae9922e5e9f57a8bf3b3fe04748b9cf5068ac86f942b4068853602a6c6c794423569b665b359d5f947c2e5ff194d23d953b435b2b3834513fdfda2b66fcea22883690b1cc56c2fcaa5600895ff8d8ae9e3a6a2b6258ff873242d1128b20e7d1e843ade1bd206b541eba02a214a95cd83860865f947cb4adbd465957055060df05e53fa9ea4b29867c92b224be939d3715be0e61b7aa0e24a8f25bccfa3b7901a3f0a8cb25498d7c9899d435b409220723dcde1d38ab6d4e7cfb42d443c9b65a37\
53891f46adb9bc52574699a7b642955702ed662d04cbe21aeec7c15db7e325dcaa74c85c5e3ed54424642d5bd8d3109c2d4c0079b3d2c5f2da12ad5b25407ae48f6fe4fc653b23a7f2d56a93c898dd0bd59ba02295934c9f7ffb433ef611d51b7c203f374cf9e8b69d4952ccc44593447ad41540270b0e30c349401048cbce10a0e1bae373de15c878982b0af837fb5432cd2471516d1e218296ce462a59fd5412921bbd3f75cf65070f7bafe21105ba83f7ffe8ece71534863c0dd731a2f3c29fff97b8ce798890a1b158a8891bb6f2dd751e75c0cb0db7ea152d7cdc91663f46f85d12ce0015351dba5225b2a87b64cc30518b23e31b2bfbb0b2a5042eeaea1234a57549a3e55ddd708e3380df032e93071b10b3e6902152c90ffd99bda0177a197779341307c5d9f335e698259ade70564eab9d2856aa1aa814211e71ba2885ef9cd5f5bdd225af2f6eebf775cc0bbdb3e519edb7c49a9a1984cc0cc012679aca8fd1d002fa64b2df095b4a9e2b496e3f4b544955c817efb29562cf8b3d2eeccbe4d364ce71d2d12b504b11de4747139ef505bdd12f382eb02fa3f5272b710644a9c20660ca5b4fa74be60984240b555c1f34261ee1d72d9eb2cc680f32b4603865503addc3a1fdc49d2b158d3407a282edd72ef51ad021338fdebf413726e1778e3bc3909b670d3f40e824391c5525b162ea01c29205e12f8e62bdd8cd0f21f6f7b44af4521c2dd23a7f3508e5dc6fffa3365e4ca1cac33bb515a5c5495dc059a94396de7d802758b65bb4cecb90bf69ab4126eab85958cb8b64eedf3a0955ab42cdc98ef90620e10cc854b9c02bfaff60742494a0c3bb34ef6d6bb861b275d975bdc4a10ac922dc70c1b03a4c01943a704af36ec8d79cf2f9ce0f602f01bef4a32edeb8fbba863c945552efc814410ac6bb839349ea65879644003bdda35d40eabdc9dcfb2d67d945b7f111ab62591763a0dd2d338594eff004237e5acce69dd9d2cdbb9ce121bd$5337e4ba9d877a1e84559688386fbc844c5fe557", "password1" },
{NULL}
};
#ifdef SIMD_COEF_32
#define ALGORITHM_NAME "PBKDF2-SHA1 " SHA1_ALGORITHM_NAME
#else
#define ALGORITHM_NAME "PBKDF2-SHA1 32/" ARCH_BITS_STR
#endif
struct fmt_main fmt_openbsd_softraid = {
{
"OpenBSD-SoftRAID", // FORMAT_LABEL
"", // FORMAT_NAME
ALGORITHM_NAME,
" (8192 iterations)", // BENCHMARK_COMMENT
-1, // BENCHMARK_LENGTH
0,
PLAINTEXT_LENGTH,
sizeof(ARCH_WORD_32), //BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{
"iteration count",
},
tests_openbsdsoftraid
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{
iteration_count,
},
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
NULL,
set_salt,
jtr_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif
|
3d7pt_var.c | /*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 32;
tile_size[3] = 512;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] +
coef[1][i][j][k] * A[t%2][i-1][j ][k ] +
coef[2][i][j][k] * A[t%2][i ][j-1][k ] +
coef[3][i][j][k] * A[t%2][i ][j ][k-1] +
coef[4][i][j][k] * A[t%2][i+1][j ][k ] +
coef[5][i][j][k] * A[t%2][i ][j+1][k ] +
coef[6][i][j][k] * A[t%2][i ][j ][k+1];
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
pooling_3x3.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void pooling3x3s2_max_neon(const Mat& bottom_blob, Mat& top_blob, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
const int tailstep = w - 2*outw + w;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q=0; q<inch; q++)
{
const float* img0 = bottom_blob.channel(q);
float* outptr = top_blob.channel(q);
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w*2;
for (int i = 0; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw - (nn << 2);
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"prfm pldl1keep, [%1, #256] \n"
"ld2 {v0.4s, v1.4s}, [%1], #32 \n"
"prfm pldl1keep, [%2, #256] \n"
"ld2 {v2.4s, v3.4s}, [%2], #32 \n"
"prfm pldl1keep, [%3, #256] \n"
"ld2 {v4.4s, v5.4s}, [%3], #32 \n"
"0: \n"
"prfm pldl1keep, [%1, #256] \n"
"ld2 {v6.4s, v7.4s}, [%1], #32 \n"
"fmax v12.4s, v0.4s, v1.4s \n"
"fmax v13.4s, v2.4s, v3.4s \n"
"prfm pldl1keep, [%2, #256] \n"
"ld2 {v8.4s, v9.4s}, [%2], #32 \n"
"fmax v14.4s, v4.4s, v5.4s \n"
"ext v0.16b, v0.16b, v6.16b, #4 \n"
"prfm pldl1keep, [%3, #256] \n"
"ld2 {v10.4s, v11.4s}, [%3], #32 \n"
"ext v2.16b, v2.16b, v8.16b, #4 \n"
"fmax v12.4s, v12.4s, v0.4s \n"
"ext v4.16b, v4.16b, v10.16b, #4 \n"
"fmax v13.4s, v13.4s, v2.4s \n"
"fmax v14.4s, v14.4s, v4.4s \n"
"fmax v12.4s, v12.4s, v13.4s \n"
"orr v0.16b, v6.16b, v6.16b \n"
"orr v1.16b, v7.16b, v7.16b \n"
"fmax v12.4s, v12.4s, v14.4s \n"
"orr v2.16b, v8.16b, v8.16b \n"
"orr v3.16b, v9.16b, v9.16b \n"
"orr v4.16b, v10.16b, v10.16b \n"
"orr v5.16b, v11.16b, v11.16b \n"
"subs %w0, %w0, #1 \n"
"st1 {v12.4s}, [%4], #16 \n"
"bne 0b \n"
"sub %1, %1, #32 \n"
"sub %2, %2, #32 \n"
"sub %3, %3, #32 \n"
: "=r"(nn), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(outptr) // %4
: "0"(nn),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(outptr)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14"
);
}
#else
if (nn > 0)
{
asm volatile(
"pld [%1, #256] \n"
"vld2.f32 {d0-d3}, [%1]! \n"// q0 = 0 2 4 6 q1 = 1 3 5 7
"pld [%2, #256] \n"
"vld2.f32 {d4-d7}, [%2]! \n"
"pld [%3, #256] \n"
"vld2.f32 {d8-d11}, [%3]! \n"
"0: \n"
"pld [%1, #256] \n"
"vld2.f32 {d12-d15}, [%1]! \n"// q6 = 8 10 12 14 q7 = 9 11 13 15
"vmax.f32 q12, q0, q1 \n"
"vmax.f32 q13, q2, q3 \n"
"pld [%2, #256] \n"
"vld2.f32 {d16-d19}, [%2]! \n"
"vmax.f32 q14, q4, q5 \n"
"vext.32 q0, q0, q6, #1 \n"
"pld [%3, #256] \n"
"vld2.f32 {d20-d23}, [%3]! \n"
"vext.32 q2, q2, q8, #1 \n"
"vmax.f32 q12, q12, q0 \n"
"vext.32 q4, q4, q10, #1 \n"
"vmax.f32 q13, q13, q2 \n"
"vmax.f32 q14, q14, q4 \n"
"vmax.f32 q12, q12, q13 \n"
"vorr q0, q6, q6 \n"
"vorr q1, q7, q7 \n"
"vmax.f32 q12, q12, q14 \n"
"vorr q2, q8, q8 \n"
"vorr q3, q9, q9 \n"
"vorr q4, q10, q10 \n"
"vorr q5, q11, q11 \n"
"subs %0, #1 \n"
"vst1.f32 {d24-d25}, [%4]! \n"
"bne 0b \n"
"sub %1, #32 \n"
"sub %2, #32 \n"
"sub %3, #32 \n"
: "=r"(nn), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(outptr) // %4
: "0"(nn),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(outptr)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
float max0 = std::max(std::max(r0[0], r0[1]), r0[2]);
float max1 = std::max(std::max(r1[0], r1[1]), r1[2]);
float max2 = std::max(std::max(r2[0], r2[1]), r2[2]);
*outptr = std::max(std::max(max0, max1), max2);
r0 += 2;
r1 += 2;
r2 += 2;
outptr++;
}
r0 += tailstep;//1 + w;
r1 += tailstep;//1 + w;
r2 += tailstep;//1 + w;
}
}
}
|
analyze.c | /**
* @file analyze.c
* @brief Functions for determining matrix statistics
* @author Dominique LaSalle <lasalle@cs.umn.edu>
* Copyright 2014
* @version 1
* @date 2014-06-28
*/
#ifndef GOOSEBERRY_ANALYZE_C
#define GOOSEBERRY_ANALYZE_C
#include "analyze.h"
#include "cholesky.h"
/******************************************************************************
* PRIVATE FUNCTIONS ***********************************************************
******************************************************************************/
static dim_t __min_active_set_block(
const dim_t start,
const dim_t end,
const dim_t ncols,
const ind_t * const rowptr,
const dim_t * const rowind,
const real_t * const rowval)
{
dim_t i, k, nactive, maxactive;
ind_t j;
dim_t * first;
dim_t * last;
first = dim_init_alloc(NULL_DIM,ncols);
last = dim_init_alloc(NULL_DIM,ncols);
/* do a first pass to find the first and last use of each element */
for (i=start;i<end;++i) {
for (j=rowptr[i];j<rowptr[i+1];++j) {
k = rowind[j];
if (first[k] == NULL_DIM) {
first[k] = i;
}
last[k] = i;
}
}
/* track the size of the active set */
nactive = 0;
maxactive = 0;
for (i=start;i<end;++i) {
for (j=rowptr[i];j<rowptr[i+1];++j) {
k = rowind[j];
if (first[k] == i) {
++nactive;
}
if (last[k] == i) {
--nactive;
}
if (nactive > maxactive) {
maxactive = nactive;
}
}
}
dl_free(first);
dl_free(last);
return maxactive;
}
/******************************************************************************
* PUBLIC FUNCTIONS ************************************************************
******************************************************************************/
int analyze_min_active_set(
const dim_t nrows,
const dim_t ncols,
const ind_t * const rowptr,
const dim_t * const rowind,
const real_t * const rowval,
const dim_t * const blocks,
const dim_t nblocks,
dim_t * const mas)
{
#pragma omp parallel default(none)
{
dim_t c,mystart,myend;
const size_t myid = omp_get_thread_num();
const size_t nthreads = omp_get_num_threads();
if (blocks) {
for (c=myid;c<nblocks;c+=nthreads) {
mystart = blocks[c];
myend = blocks[c+1];
mas[c] = __min_active_set_block(mystart,myend,ncols,rowptr,rowind,
rowval);
}
} else {
mas[0] = __min_active_set_block(0,nrows,ncols,rowptr,rowind,rowval);
}
}
return GOOSEBERRY_SUCCESS;
}
void analyze_cholesky(
dim_t const nrows,
ind_t const * const rowptr,
dim_t const * const rowind,
ind_t * const r_nnz,
double * const r_nops)
{
dim_t i;
double nops;
ind_t nnz;
dim_t * counts;
counts = dim_alloc(nrows);
nops = 0;
nnz = 0;
cholesky_rowcounts(nrows,rowptr,rowind,counts);
for (i=0;i<nrows;++i) {
nops += counts[i]*counts[i];
nnz += counts[i];
}
dl_free(counts);
*r_nnz = nnz;
*r_nops = nops;
}
#endif
|
DenseAffine.h | // --------------------------------------------------------------------------
// Binary Brain -- binary neural net framework
//
// Copyright (C) 2018 by Ryuji Fuchikami
// https://github.com/ryuz
// ryuji.fuchikami@nifty.com
// --------------------------------------------------------------------------
#pragma once
#include <random>
#include "bb/DataType.h"
#include "bb/Model.h"
#ifdef BB_WITH_CUDA
#include "cuda_runtime.h"
#include "cublas_v2.h"
#include "bbcu/bbcu.h"
#endif
namespace bb {
// Affineレイヤー
template <typename T = float>
class DenseAffine : public Model
{
using _super = Model;
public:
static inline std::string ModelName(void) { return "DenseAffine"; }
static inline std::string ObjectName(void){ return ModelName() + "_" + DataType<T>::Name(); }
std::string GetModelName(void) const override { return ModelName(); }
std::string GetObjectName(void) const override { return ObjectName(); }
protected:
bool m_host_only = false;
bool m_binary_mode = false;
bool m_backward_break = false;
T m_initialize_std = (T)0.01;
std::string m_initializer = "";
std::mt19937_64 m_mt;
index_t m_input_node_size = 0;
indices_t m_input_shape;
index_t m_output_node_size = 0;
indices_t m_output_shape;
std::shared_ptr<Tensor> m_W;
std::shared_ptr<Tensor> m_b;
std::shared_ptr<Tensor> m_dW;
std::shared_ptr<Tensor> m_db;
bool m_cublasEnable = false;
#ifdef BB_WITH_CUDA
cublasHandle_t m_cublasHandle;
#endif
public:
struct create_t
{
indices_t output_shape;
T initialize_std = (T)0.01;
std::string initializer = "";
std::uint64_t seed = 1;
};
protected:
DenseAffine(create_t const &create)
{
m_W = std::make_shared<Tensor>();
m_b = std::make_shared<Tensor>();
m_dW = std::make_shared<Tensor>();
m_db = std::make_shared<Tensor>();
#ifdef BB_WITH_CUDA
if ( cublasCreate(&m_cublasHandle) == CUBLAS_STATUS_SUCCESS ) {
m_cublasEnable = true;
}
#endif
// BB_ASSERT(!create.output_shape.empty());
m_initialize_std = create.initialize_std;
m_initializer = create.initializer;
m_mt.seed(create.seed);
m_output_shape = create.output_shape;
m_output_node_size = CalcShapeSize(m_output_shape);
}
void CommandProc(std::vector<std::string> args)
{
_super::CommandProc(args);
// バイナリモード設定
if ( args.size() == 2 && args[0] == "binary" )
{
m_binary_mode = EvalBool(args[1]);
}
// HostOnlyモード設定
if (args.size() == 2 && args[0] == "host_only")
{
m_host_only = EvalBool(args[1]);
}
if (args.size() == 2 && args[0] == "backward_break")
{
m_backward_break = EvalBool(args[1]);
}
}
public:
~DenseAffine() {
#ifdef BB_WITH_CUDA
if ( m_cublasEnable ) {
BB_CUBLAS_SAFE_CALL(cublasDestroy(m_cublasHandle));
m_cublasEnable = false;
}
#endif
}
static std::shared_ptr<DenseAffine> Create(create_t const &create)
{
return std::shared_ptr<DenseAffine>(new DenseAffine(create));
}
static std::shared_ptr<DenseAffine> Create(indices_t const &output_shape)
{
create_t create;
create.output_shape = output_shape;
return Create(create);
}
static std::shared_ptr<DenseAffine> Create(index_t output_node_size)
{
create_t create;
create.output_shape.resize(1);
create.output_shape[0] = output_node_size;
return Create(create);
}
static std::shared_ptr<DenseAffine> Create(void)
{
return Create(create_t());
}
#ifdef BB_PYBIND11
static std::shared_ptr<DenseAffine> CreatePy(
indices_t output_shape,
T initialize_std = (T)0.01,
std::string initializer = "",
std::uint64_t seed = 1
)
{
create_t create;
create.output_shape = output_shape;
create.initialize_std = initialize_std;
create.initializer = initializer;
create.seed = seed;
return Create(create);
}
#endif
Tensor &W(void) { return *m_W; }
Tensor const &W(void) const { return *m_W; }
Tensor &b(void) { return *m_b; }
Tensor const &b(void) const { return *m_b; }
Tensor &dW(void) { return *m_dW; }
Tensor const &dW(void) const { return *m_dW; }
Tensor &db(void) { return *m_db; }
Tensor const &db(void) const { return *m_db; }
auto lock_W(void) { return m_W->Lock<T>(); }
auto lock_W_const(void) const { return m_W->LockConst<T>(); }
auto lock_b(void) { return m_b->Lock<T>(); }
auto lock_b_const(void) const { return m_b->LockConst<T>(); }
auto lock_dW(void) { return m_dW->Lock<T>(); }
auto lock_dW_const(void) const { return m_dW->LockConst<T>(); }
auto lock_db(void) { return m_db->Lock<T>(); }
auto lock_db_const(void) const { return m_db->LockConst<T>(); }
/**
* @brief 入力のshape設定
* @detail 入力のshape設定
* @param shape 新しいshape
* @return なし
*/
indices_t SetInputShape(indices_t shape)
{
// 設定済みなら何もしない
if ( shape == this->GetInputShape() ) {
return this->GetOutputShape();
}
// 形状設定
m_input_shape = shape;
m_input_node_size = CalcShapeSize(shape);
// パラメータ初期化
m_W->Resize ({m_output_node_size, m_input_node_size}, DataType<T>::type);
m_b->Resize ({m_output_node_size}, DataType<T>::type);
m_dW->Resize({m_output_node_size, m_input_node_size}, DataType<T>::type);
m_db->Resize({m_output_node_size}, DataType<T>::type);
if (m_initializer == "he" || m_initializer == "He") {
m_initialize_std = (T)std::sqrt(2.0 / (double)m_input_node_size);
m_W->InitNormalDistribution(0.0, m_initialize_std, m_mt());
m_b->InitNormalDistribution(0.0, m_initialize_std, m_mt());
}
else if (m_initializer == "xavier" || m_initializer == "Xavier" ) {
m_initialize_std = (T)std::sqrt(1.0 / (double)m_input_node_size);
m_W->InitNormalDistribution(0.0, m_initialize_std, m_mt());
m_b->InitNormalDistribution(0.0, m_initialize_std, m_mt());
}
else if (m_initializer == "normal" || m_initializer == "Normal" ) {
m_W->InitNormalDistribution(0.0, m_initialize_std, m_mt());
m_b->InitNormalDistribution(0.0, m_initialize_std, m_mt());
}
else if (m_initializer == "uniform" || m_initializer == "Uniform" ) {
double k = (double)m_initialize_std * std::sqrt(3.0);
m_W->InitUniformDistribution(-k, +k, m_mt());
m_b->InitUniformDistribution(-k, +k, m_mt());
}
else {
double k = std::sqrt(1.0 / (double)m_input_node_size);
m_W->InitUniformDistribution(-k, +k, m_mt());
m_b->InitUniformDistribution(-k, +k, m_mt());
}
m_dW->FillZero();
m_db->FillZero();
return m_output_shape;
}
/**
* @brief 出力のshape設定
* @detail 出力のshape設定
* 出力ノード数が変わらない限りshpeは自由
* @param shape 新しいshape
* @return なし
*/
void SetOutputShape(indices_t const &shape)
{
BB_ASSERT(CalcShapeSize(shape) == m_input_node_size);
m_output_shape = shape;
}
/**
* @brief 入力形状取得
* @detail 入力形状を取得する
* @return 入力形状を返す
*/
indices_t GetInputShape(void) const
{
return m_input_shape;
}
/**
* @brief 出力形状取得
* @detail 出力形状を取得する
* @return 出力形状を返す
*/
indices_t GetOutputShape(void) const
{
return m_output_shape;
}
Variables GetParameters(void) override
{
Variables parameters;
if ( !this->m_parameter_lock ) {
parameters.PushBack(m_W);
parameters.PushBack(m_b);
}
return parameters;
}
Variables GetGradients(void) override
{
Variables gradients;
if ( !this->m_parameter_lock ) {
gradients.PushBack(m_dW);
gradients.PushBack(m_db);
}
return gradients;
}
FrameBuffer Forward(FrameBuffer x_buf, bool train = true) override
{
// backwardの為に保存
if ( train ) {
this->PushFrameBuffer(x_buf);
}
// 型合わせ
if ( x_buf.GetType() != DataType<T>::type ) {
x_buf = x_buf.ConvertTo(DataType<T>::type);
}
BB_ASSERT(x_buf.GetType() == DataType<T>::type);
BB_ASSERT(x_buf.GetNodeSize() == m_input_node_size);
// SetInputShpaeされていなければ初回に設定
if (x_buf.GetNodeSize() != m_input_node_size) {
SetInputShape(x_buf.GetShape());
}
// 出力を設定
FrameBuffer y_buf(x_buf.GetFrameSize(), m_output_shape, DataType<T>::type);
#ifdef BB_WITH_CUDA
if (DataType<T>::type == BB_TYPE_FP32 && m_cublasEnable && x_buf.IsDeviceAvailable() && y_buf.IsDeviceAvailable() && Manager::IsDeviceAvailable())
{
auto x_ptr = x_buf.LockDeviceMemoryConst();
auto y_ptr = y_buf.LockDeviceMemory(true);
auto W_ptr = m_W->LockDeviceMemoryConst();
auto b_ptr = m_b->LockDeviceMemoryConst();
bbcu_fp32_MatrixRowwiseSetVector
(
(float const *)b_ptr.GetAddr(),
(float *)y_ptr.GetAddr(),
(int )y_buf.GetNodeSize(),
(int )y_buf.GetFrameSize(),
(int )(y_buf.GetFrameStride() / sizeof(float))
);
float alpha = 1.0f;
float beta = 1.0f;
BB_CUBLAS_SAFE_CALL(cublasSgemm
(
m_cublasHandle,
CUBLAS_OP_N,
CUBLAS_OP_N,
(int)y_buf.GetFrameSize(),
(int)y_buf.GetNodeSize(),
(int)x_buf.GetNodeSize(),
&alpha,
(const float *)x_ptr.GetAddr(),
(int)(x_buf.GetFrameStride() / sizeof(float)),
(const float *)W_ptr.GetAddr(),
(int)x_buf.GetNodeSize(),
&beta,
(float *)y_ptr.GetAddr(),
(int)(y_buf.GetFrameStride() / sizeof(float))
));
return y_buf;
}
#endif
{
auto frame_size = x_buf.GetFrameSize();
auto x_ptr = x_buf.LockConst<T>();
auto y_ptr = y_buf.Lock<T>();
auto W_ptr = lock_W_const();
auto b_ptr = lock_b_const();
#pragma omp parallel for
for (index_t frame = 0; frame < frame_size; ++frame) {
for (index_t output_node = 0; output_node < m_output_node_size; ++output_node) {
y_ptr.Set(frame, output_node, b_ptr(output_node));
for (index_t input_node = 0; input_node < m_input_node_size; ++input_node) {
y_ptr.Add(frame, output_node, x_ptr.Get(frame, input_node) * W_ptr(output_node, input_node));
}
}
}
return y_buf;
}
}
FrameBuffer Backward(FrameBuffer dy_buf) override
{
if ( dy_buf.Empty() || m_backward_break ) {
m_dW = 0;
m_db = 0;
return FrameBuffer();
}
BB_ASSERT(dy_buf.GetType() == DataType<T>::type);
// フレーム数
auto frame_size = dy_buf.GetFrameSize();
// forward時保存復帰
FrameBuffer x_buf = PopFrameBuffer();
BB_ASSERT(x_buf.GetFrameSize() == dy_buf.GetFrameSize());
// 型合わせ
if ( x_buf.GetType() != DataType<T>::type ) {
x_buf = x_buf.ConvertTo(DataType<T>::type);
}
FrameBuffer dx_buf(x_buf.GetFrameSize(), x_buf.GetShape(), DataType<T>::type);
#ifdef BB_WITH_CUDA
if (DataType<T>::type == BB_TYPE_FP32 && m_cublasEnable && dy_buf.IsDeviceAvailable() && x_buf.IsDeviceAvailable() && dx_buf.IsDeviceAvailable() && Manager::IsDeviceAvailable())
{
auto dy_ptr = dy_buf.LockDeviceMemoryConst();
auto x_ptr = x_buf.LockDeviceMemoryConst();
auto dx_ptr = dx_buf.LockDeviceMemory(true);
auto W_ptr = m_W->LockDeviceMemoryConst();
auto b_ptr = m_b->LockDeviceMemoryConst();
auto dW_ptr = m_dW->LockDeviceMemory();
auto db_ptr = m_db->LockDeviceMemory();
bbcu_fp32_MatrixColwiseSum
(
(float const *)dy_ptr.GetAddr(),
(float *)db_ptr.GetAddr(),
(int )dy_buf.GetNodeSize(),
(int )dy_buf.GetFrameSize(),
(int )(dy_buf.GetFrameStride() / sizeof(float))
);
float alpha = 1.0f;
float beta = 0.0f;
BB_CUBLAS_SAFE_CALL(cublasSgemm
(
m_cublasHandle,
CUBLAS_OP_N,
CUBLAS_OP_T,
(int)dx_buf.GetFrameSize(),
(int)dx_buf.GetNodeSize(),
(int)dy_buf.GetNodeSize(),
&alpha,
(const float *)dy_ptr.GetAddr(),
(int)(dy_buf.GetFrameStride() / sizeof(float)),
(const float *)W_ptr.GetAddr(),
(int)dx_buf.GetNodeSize(),
&beta,
(float *)dx_ptr.GetAddr(),
(int)(dx_buf.GetFrameStride() / sizeof(float))
));
beta = 1.0f;
BB_CUBLAS_SAFE_CALL(cublasSgemm
(
m_cublasHandle,
CUBLAS_OP_T,
CUBLAS_OP_N,
(int)dx_buf.GetNodeSize(),
(int)dy_buf.GetNodeSize(),
(int)dx_buf.GetFrameSize(),
&alpha,
(const float *)x_ptr.GetAddr(),
(int)(x_buf.GetFrameStride() / sizeof(float)),
(const float *)dy_ptr.GetAddr(),
(int)(dy_buf.GetFrameStride() / sizeof(float)),
&beta,
(float *)dW_ptr.GetAddr(),
(int)dx_buf.GetNodeSize()
));
return dx_buf;
}
#endif
{
dx_buf.FillZero();
auto x_ptr = x_buf.LockConst<T>();
auto dy_ptr = dy_buf.LockConst<T>();
auto dx_ptr = dx_buf.Lock<T>();
auto W_ptr = lock_W_const();
auto b_ptr = lock_b_const();
auto dW_ptr = lock_dW();
auto db_ptr = lock_db();
#pragma omp parallel for
for (index_t frame = 0; frame < frame_size; ++frame) {
for (index_t output_node = 0; output_node < m_output_node_size; ++output_node) {
auto grad = dy_ptr.Get(frame, output_node);
db_ptr(output_node) += grad;
for (index_t input_node = 0; input_node < m_input_node_size; ++input_node) {
dx_ptr.Add(frame, input_node, grad * W_ptr(output_node, input_node));
dW_ptr(output_node, input_node) += grad * x_ptr.Get(frame, input_node);
}
}
}
return dx_buf;
}
}
// シリアライズ
protected:
void DumpObjectData(std::ostream &os) const override
{
// バージョン
std::int64_t ver = 1;
bb::SaveValue(os, ver);
// 親クラス
_super::DumpObjectData(os);
// メンバ
bb::SaveValue(os, m_host_only);
bb::SaveValue(os, m_binary_mode);
bb::SaveValue(os, m_initialize_std);
bb::SaveValue(os, m_initializer);
bb::SaveValue(os, m_input_shape);
bb::SaveValue(os, m_output_shape);
bb::SaveValue(os, m_cublasEnable);
m_W->DumpObject(os);
m_b->DumpObject(os);
}
void LoadObjectData(std::istream &is) override
{
#ifdef BB_WITH_CUDA
if ( m_cublasEnable ) {
BB_CUBLAS_SAFE_CALL(cublasDestroy(m_cublasHandle));
m_cublasEnable = false;
}
#endif
// バージョン
std::int64_t ver;
bb::LoadValue(is, ver);
BB_ASSERT(ver == 1);
// 親クラス
_super::LoadObjectData(is);
// メンバ
bb::LoadValue(is, m_host_only);
bb::LoadValue(is, m_binary_mode);
bb::LoadValue(is, m_initialize_std);
bb::LoadValue(is, m_initializer);
bb::LoadValue(is, m_input_shape);
bb::LoadValue(is, m_output_shape);
bb::LoadValue(is, m_cublasEnable);
m_W->LoadObject(is);
m_b->LoadObject(is);
// 再構築
#ifdef BB_WITH_CUDA
if ( m_cublasEnable ) {
if ( cublasCreate(&m_cublasHandle) != CUBLAS_STATUS_SUCCESS ) {
m_cublasEnable = false;
}
}
#endif
m_input_node_size = CalcShapeSize(m_input_shape);
m_output_node_size = CalcShapeSize(m_output_shape);
m_dW->Resize({m_output_node_size, m_input_node_size}, DataType<T>::type); m_dW->FillZero();
m_db->Resize({m_output_node_size}, DataType<T>::type); m_db->FillZero();
}
public:
// Serialize(旧)
void Save(std::ostream &os) const
{
// SaveValue(os, m_binary_mode);
os.write((const char*)&m_binary_mode, sizeof(m_binary_mode)); // バグに対する後方互換性
SaveIndices(os, m_input_shape);
SaveIndices(os, m_output_shape);
m_W->Save(os);
m_b->Save(os);
}
void Load(std::istream &is)
{
// bb::LoadValue(is, m_binary_mode);
is.read((char*)&m_binary_mode, sizeof(m_binary_mode)); // バグに対する後方互換性
m_input_shape = bb::LoadIndices(is);
m_output_shape = bb::LoadIndices(is);
m_W->Load(is);
m_b->Load(is);
}
#ifdef BB_WITH_CEREAL
template <class Archive>
void save(Archive& archive, std::uint32_t const version) const
{
_super::save(archive, version);
archive(cereal::make_nvp("binary_mode", m_binary_mode));
archive(cereal::make_nvp("input_shape", m_input_shape));
archive(cereal::make_nvp("output_shape", m_output_shape));
archive(cereal::make_nvp("W", *m_W));
archive(cereal::make_nvp("b", *m_b));
}
template <class Archive>
void load(Archive& archive, std::uint32_t const version)
{
_super::load(archive, version);
archive(cereal::make_nvp("binary_mode", m_binary_mode));
archive(cereal::make_nvp("input_shape", m_input_shape));
archive(cereal::make_nvp("output_shape", m_output_shape));
m_input_node_size = CalcShapeSize(m_input_shape);
m_output_node_size = CalcShapeSize(m_output_shape);
archive(cereal::make_nvp("W", *m_W));
archive(cereal::make_nvp("b", *m_b));
}
void Save(cereal::JSONOutputArchive& archive) const
{
archive(cereal::make_nvp("DenseAffine", *this));
}
void Load(cereal::JSONInputArchive& archive)
{
archive(cereal::make_nvp("DenseAffine", *this));
}
#endif
};
} |
GB_unaryop__ainv_fp64_int64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_fp64_int64
// op(A') function: GB_tran__ainv_fp64_int64
// C type: double
// A type: int64_t
// cast: double cij = (double) aij
// unaryop: cij = -aij
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, aij) \
double z = (double) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_FP64 || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_fp64_int64
(
double *Cx, // Cx and Ax may be aliased
int64_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_fp64_int64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.