source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
pr46032-2.c | /* { dg-do compile } */
/* { dg-options "-O2 -fopenmp -std=c99 -fipa-pta -fdump-tree-optimized" } */
#define N 2
int
foo (void)
{
int a[N], b[N], c[N];
int *ap = &a[0];
int *bp = &b[0];
int *cp = &c[0];
#pragma omp parallel for
for (unsigned int idx = 0; idx < N; idx++)
{
ap[idx] = 1;
bp[idx] = 2;
cp[idx] = ap[idx];
}
return *cp;
}
/* { dg-final { scan-tree-dump-times "\\] = 1;" 2 "optimized" } } */
/* { dg-final { scan-tree-dump-times "\\] = 2;" 1 "optimized" } } */
/* { dg-final { scan-tree-dump-times "\\] = _\[0-9\]*;" 0 "optimized" } } */
/* { dg-final { scan-tree-dump-times "\\] = " 3 "optimized" } } */
|
LG_CC_FastSV5_64_Nov23_2021.c | //------------------------------------------------------------------------------
// LG_CC_FastSV5_64: connected components (64-bit version)
//------------------------------------------------------------------------------
// LAGraph, (c) 2021 by The LAGraph Contributors, All Rights Reserved.
// SPDX-License-Identifier: BSD-2-Clause
//------------------------------------------------------------------------------
// Code is based on the algorithm described in the following paper
// Zhang, Azad, Hu. FastSV: FastSV: A Distributed-Memory Connected Component
// Algorithm with Fast Convergence (SIAM PP20)
// A subsequent update to the algorithm is here (which might not be reflected
// in this code):
//
// Yongzhe Zhang, Ariful Azad, Aydin Buluc: Parallel algorithms for finding
// connected components using linear algebra. J. Parallel Distributed Comput.
// 144: 14-27 (2020).
// Modified by Tim Davis, Texas A&M University
// The input matrix A must be symmetric. Self-edges (diagonal entries) are
// OK, and are ignored. The values and type of A are ignored; just its
// structure is accessed.
// todo: this function is not thread-safe, since it exports G->A and then
// reimports it back. G->A is unchanged when the function returns, but during
// execution G->A is invalid.
#define LAGraph_FREE_ALL ;
#include "LG_internal.h"
#if !LG_VANILLA
#if (! LG_SUITESPARSE )
#error "SuiteSparse:GraphBLAS v6.0.0 or later required"
#endif
//------------------------------------------------------------------------------
// hash functions: todo describe me
//------------------------------------------------------------------------------
// hash table size must be a power of 2
#define HASH_SIZE 1024
// number of samples to insert into the hash table
// todo: this seems to be a lot of entries for a HASH_SIZE of 1024.
// There could be lots of collisions.
#define HASH_SAMPLES 864
#define HASH(x) (((x << 4) + x) & (HASH_SIZE-1))
#define NEXT(x) ((x + 23) & (HASH_SIZE-1))
//------------------------------------------------------------------------------
// ht_init: todo describe me
//------------------------------------------------------------------------------
// Clear the hash table counts (ht_val [0:HASH_SIZE-1] = 0), and set all hash
// table entries as empty (ht_key [0:HASH_SIZE-1] =-1).
// todo: the memset of ht_key is confusing
// todo: the name "ht_val" is confusing. It is not a value, but a count of
// the number of times the value x = ht_key [h] has been inserted into the
// hth position in the hash table. It should be renamed ht_cnt.
static inline void ht_init
(
int64_t *ht_key,
int64_t *ht_val
)
{
memset (ht_key, -1, sizeof (int64_t) * HASH_SIZE) ;
memset (ht_val, 0, sizeof (int64_t) * HASH_SIZE) ;
}
//------------------------------------------------------------------------------
// ht_sample: todo describe me
//------------------------------------------------------------------------------
//
static inline void ht_sample
(
uint64_t *V, // array of size n (todo: this is a bad variable name)
int64_t n,
int64_t samples, // number of samples to take from V
int64_t *ht_key,
int64_t *ht_val,
uint64_t *seed
)
{
for (int64_t k = 0 ; k < samples ; k++)
{
// select an entry from V at random
int64_t x = V [LAGraph_Random60 (seed) % n] ;
// find x in the hash table
// todo: make this loop a static inline function (see also below)
int64_t h = HASH (x) ;
while (ht_key [h] != -1 && ht_key [h] != x)
{
h = NEXT (h) ;
}
ht_key [h] = x ;
ht_val [h]++ ;
}
}
//------------------------------------------------------------------------------
// ht_most_frequent: todo describe me
//------------------------------------------------------------------------------
// todo what if key is returned as -1? Code breaks. todo: handle this case
static inline int64_t ht_most_frequent
(
int64_t *ht_key,
int64_t *ht_val
)
{
int64_t key = -1 ;
int64_t val = 0 ; // max (ht_val [0:HASH_SIZE-1])
for (int64_t h = 0 ; h < HASH_SIZE ; h++)
{
if (ht_val [h] > val)
{
key = ht_key [h] ;
val = ht_val [h] ;
}
}
return (key) ; // return most frequent key
}
//------------------------------------------------------------------------------
// Reduce_assign: w (index) += s, using MIN as the "+=" accum operator
//------------------------------------------------------------------------------
// The index array, of size n can have duplicates. The vectors w and s are
// full (all entries present). This function computes:
//
// for (j = 0 ; j < n ; j++)
// {
// uint64_t i = index [j] ;
// w [i] = min (w [i], s [j]) ;
// }
//
// If C(i,j) = true where i == index [j], then this can be written with the
// min_second semiring:
//
// w = min (w, C*s)
#if 1
static inline int Reduce_assign
(
GrB_Vector w, // vector of size n, all entries present
GrB_Vector s, // vector of size n, all entries present
GrB_Matrix C, // boolean matrix of size n-by-n
GrB_Index **Cp_handle, // array of size n+1, equal to 0:n
GrB_Index **Ci_handle, // index array of size n, can have duplicates
bool **Cx_handle, // array of size 1, equal to true
char *msg
)
{
// size of Cp, Ci, and Cx in bytes
GrB_Index n ;
GrB_TRY (GrB_Vector_size (&n, w)) ;
GrB_Index Cp_size = (n+1) * sizeof (GrB_Index) ;
GrB_Index Ci_size = n * sizeof (GrB_Index) ;
GrB_Index Cx_size = sizeof (bool) ;
// pack Cp, Ci, and Cx into a matrix C with C(i,j) = true if Ci(j) == i
bool iso = true ;
bool jumbled = false ;
GrB_TRY (GxB_Matrix_pack_CSC (C, Cp_handle, Ci_handle, (void **) Cx_handle,
Cp_size, Ci_size, Cx_size, iso, jumbled, NULL)) ;
// w = min (w, C*s) using the MIN_SECOND semiring
GrB_TRY (GrB_mxv (w, NULL, GrB_MIN_UINT64,
GrB_MIN_SECOND_SEMIRING_UINT64, C, s, NULL)) ;
// unpack the contents of C
GrB_TRY (GxB_Matrix_unpack_CSC (C, Cp_handle, Ci_handle, (void **)Cx_handle,
&Cp_size, &Ci_size, &Cx_size, &iso, &jumbled, NULL)) ;
return (GrB_SUCCESS) ; // yay! It works!
}
#else
static inline int Reduce_assign
(
GrB_Vector *w_handle, // vector of size n, all entries present
GrB_Vector *s_handle, // vector of size n, all entries present
GrB_Index *index, // index array of size n, can have duplicates
GrB_Index n,
int nthreads,
int64_t *ht_key, // hash table
int64_t *ht_val, // hash table (count of # of entries)
uint64_t *seed, // random
char *msg
)
{
GrB_Type w_type, s_type ;
GrB_Index w_n, s_n, w_nvals, s_nvals, *w_i, *s_i, w_size, s_size ;
uint64_t *w_x, *s_x ;
bool s_iso = false ;
//--------------------------------------------------------------------------
// export w and s
//--------------------------------------------------------------------------
// export the GrB_Vectors w and s as full arrays, to get direct access to
// their contents. Note that this would fail if w or s are not full, with
// all entries present.
GrB_TRY (GxB_Vector_export_Full (w_handle, &w_type, &w_n, (void **) &w_x,
&w_size, NULL, NULL)) ;
GrB_TRY (GxB_Vector_export_Full (s_handle, &s_type, &s_n, (void **) &s_x,
&s_size, &s_iso, NULL)) ;
if (nthreads >= 4)
{
// allocate a buf array for each thread, of size HASH_SIZE
uint64_t *mem = LAGraph_Malloc (nthreads*HASH_SIZE, sizeof (uint64_t)) ;
// todo: check out-of-memory condition here
// todo why is hashing needed here? hashing is slow for what needs
// to be computed here. GraphBLAS has fast MIN atomic monoids that
// do not require hashing.
ht_init (ht_key, ht_val) ;
ht_sample (index, n, HASH_SAMPLES, ht_key, ht_val, seed) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int tid = 0 ; tid < nthreads ; tid++)
{
// get the thread-specific buf array of size HASH_SIZE
// todo: buf is a bad variable name; it's not a "buffer",
// but a local workspace to compute the local version of w_x.
uint64_t *buf = mem + tid * HASH_SIZE ;
// copy the values from the global hash table into buf
for (int64_t h = 0 ; h < HASH_SIZE ; h++)
{
if (ht_key [h] != -1)
{
buf [h] = w_x [ht_key [h]] ;
}
}
// this thread works on index [kstart:kend]
int64_t kstart = (n * tid + nthreads - 1) / nthreads ;
int64_t kend = (n * tid + n + nthreads - 1) / nthreads ;
for (int64_t k = kstart ; k < kend ; k++)
{
uint64_t i = index [k] ;
// todo: make this loop a static inline function
int64_t h = HASH (i) ;
while (ht_key [h] != -1 && ht_key [h] != i)
{
h = NEXT (h) ;
}
if (ht_key [h] == -1)
{
// todo is this a race condition?
w_x [i] = LAGraph_MIN (w_x [i], s_x [s_iso?0:k]) ;
}
else
{
buf [h] = LAGraph_MIN (buf [h], s_x [s_iso?0:k]) ;
}
}
}
// combine intermediate results from each thread
for (int64_t h = 0 ; h < HASH_SIZE ; h++)
{
int64_t i = ht_key [h] ;
if (i != -1)
{
for (int64_t tid = 0 ; tid < nthreads ; tid++)
{
w_x [i] = LAGraph_MIN (w_x [i], mem [tid * HASH_SIZE + h]) ;
}
}
}
LAGraph_Free ((void **) &mem) ;
}
else
{
// sequential version
for (GrB_Index k = 0 ; k < n ; k++)
{
uint64_t i = index [k] ;
w_x [i] = LAGraph_MIN (w_x [i], s_x [s_iso?0:k]) ;
}
}
//--------------------------------------------------------------------------
// reimport w and s back into GrB_Vectors, and return result
//--------------------------------------------------------------------------
// s is unchanged. It was exported only to compute w (index) += s
GrB_TRY (GxB_Vector_import_Full (w_handle, w_type, w_n, (void **) &w_x,
w_size, false, NULL)) ;
GrB_TRY (GxB_Vector_import_Full (s_handle, s_type, s_n, (void **) &s_x,
s_size, s_iso, NULL)) ;
return (0) ;
}
#endif
//------------------------------------------------------------------------------
// LG_CC_FastSV5_64
//------------------------------------------------------------------------------
// The output of LG_CC_FastSV5 is a vector component, where
// component(i)=s if node i is in the connected compononent whose
// representative node is node s. If s is a representative, then
// component(s)=s. The number of connected components in the graph G is the
// number of representatives.
#undef LAGraph_FREE_ALL
#define LAGraph_FREE_ALL \
{ \
LAGraph_Free ((void **) &Cp) ; \
LAGraph_Free ((void **) &Cx) ; \
LAGraph_Free ((void **) &V) ; \
LAGraph_Free ((void **) &ht_key) ; \
LAGraph_Free ((void **) &ht_val) ; \
/* todo why is T not freed?? */ \
GrB_free (&t) ; \
GrB_free (&f) ; \
GrB_free (&gp) ; \
GrB_free (&mngp) ; \
GrB_free (&gp_new) ; \
GrB_free (&mod) ; \
}
#endif
int LG_CC_FastSV5_64 // SuiteSparse:GraphBLAS method, with GxB extensions
(
// output
GrB_Vector *component, // component(i)=s if node is in the component s
// inputs
LAGraph_Graph G, // input graph, G->A can change
char *msg
)
{
#if LG_VANILLA
LG_CHECK (0, -1, "SuiteSparse required for this method") ;
#else
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
LG_CLEAR_MSG ;
uint64_t *V = NULL ;
int64_t *ht_key = NULL, *ht_val = NULL ;
GrB_Index n, nnz ;
GrB_Vector f = NULL, gp_new = NULL, mngp = NULL, mod = NULL, gp = NULL,
t = NULL ;
GrB_Matrix T = NULL, C = NULL ;
GrB_Index *Cp = NULL ;
GrB_Index Cp_size = 0 ;
bool *Cx = NULL ;
LG_CHECK (LAGraph_CheckGraph (G, msg), -1, "graph is invalid") ;
LG_CHECK (component == NULL, -1, "component parameter is NULL") ;
if (G->kind == LAGRAPH_ADJACENCY_UNDIRECTED ||
(G->kind == LAGRAPH_ADJACENCY_DIRECTED &&
G->A_structure_is_symmetric == LAGRAPH_TRUE))
{
// A must be symmetric
;
}
else
{
// A must not be unsymmetric
LG_CHECK (false, -1, "input must be symmetric") ;
}
GrB_Matrix S = G->A ;
GrB_TRY (GrB_Matrix_nrows (&n, S)) ;
GrB_TRY (GrB_Matrix_nvals (&nnz, S)) ;
#define FASTSV_SAMPLES 4
bool sampling = (n * FASTSV_SAMPLES * 2 < nnz) ;
// random number seed
uint64_t seed = n ;
//--------------------------------------------------------------------------
// initializations
//--------------------------------------------------------------------------
// determine # of threads to use
int nthreads ;
LAGraph_TRY (LAGraph_GetNumThreads (&nthreads, NULL)) ;
nthreads = LAGraph_MIN (nthreads, n / 16) ;
nthreads = LAGraph_MAX (nthreads, 1) ;
// vectors
GrB_TRY (GrB_Vector_new (&f, GrB_UINT64, n)) ;
GrB_TRY (GrB_Vector_new (&gp_new, GrB_UINT64, n)) ;
GrB_TRY (GrB_Vector_new (&mod, GrB_BOOL, n)) ;
V = LAGraph_Malloc (n, sizeof (uint64_t)) ;
GrB_TRY (GrB_assign (f, NULL, NULL, 0, GrB_ALL, n, NULL)) ;
GrB_TRY (GrB_apply (f, NULL, NULL, GrB_ROWINDEX_INT64, f, 0, NULL)) ;
GrB_TRY (GrB_Vector_extractTuples (NULL, V, &n, f)) ;
GrB_TRY (GrB_Vector_dup (&gp, f)) ;
GrB_TRY (GrB_Vector_dup (&mngp, f)) ;
// allocate the hash table
ht_key = LAGraph_Malloc (HASH_SIZE, sizeof (int64_t)) ;
ht_val = LAGraph_Malloc (HASH_SIZE, sizeof (int64_t)) ;
LG_CHECK (ht_key == NULL || ht_val == NULL, -1, "out of memory") ;
// create Cp = 0:n, and Cx = true, and the empty C matrix
GrB_TRY (GrB_Vector_new (&t, GrB_INT64, n+1)) ;
GrB_TRY (GrB_assign (t, NULL, NULL, 0, GrB_ALL, n+1, NULL)) ;
GrB_TRY (GrB_apply (t, NULL, NULL, GrB_ROWINDEX_INT64, t, 0, NULL)) ;
GrB_TRY (GxB_Vector_unpack_Full (t, (void **) &Cp, &Cp_size, NULL, NULL)) ;
Cx = (bool *) LAGraph_Malloc (1, sizeof (bool)) ;
Cx [0] = true ;
GrB_TRY (GrB_free (&t)) ;
GrB_TRY (GrB_Matrix_new (&C, GrB_BOOL, n, n)) ;
//--------------------------------------------------------------------------
// sample phase
//--------------------------------------------------------------------------
if (sampling)
{
//----------------------------------------------------------------------
// export S = G->A in CSR format
//----------------------------------------------------------------------
// S is not modified. It is only exported so that its contents can be
// read by the parallel loops below.
GrB_Type type ;
GrB_Index nrows, ncols, nvals ;
size_t typesize ;
int64_t nonempty ;
GrB_Index *Sp, *Sj ;
void *Sx ;
bool S_jumbled = false ;
GrB_Index Sp_size, Sj_size, Sx_size ;
bool S_iso = false ;
GrB_TRY (GrB_Matrix_nvals (&nvals, S)) ;
GrB_TRY (GxB_Matrix_export_CSR (&S, &type, &nrows, &ncols, &Sp, &Sj,
&Sx, &Sp_size, &Sj_size, &Sx_size,
&S_iso, &S_jumbled, NULL)) ;
GrB_TRY (GxB_Type_size (&typesize, type)) ;
G->A = NULL ;
//----------------------------------------------------------------------
// allocate space to construct T
//----------------------------------------------------------------------
GrB_Index Tp_len = nrows+1, Tp_size = Tp_len*sizeof(GrB_Index);
GrB_Index Tj_len = nvals, Tj_size = Tj_len*sizeof(GrB_Index);
GrB_Index Tx_len = nvals ;
GrB_Index *Tp = LAGraph_Malloc (Tp_len, sizeof (GrB_Index)) ;
GrB_Index *Tj = LAGraph_Malloc (Tj_len, sizeof (GrB_Index)) ;
GrB_Index Tx_size = typesize ;
void *Tx = LAGraph_Calloc (1, typesize) ; // T is iso
// todo check out-of-memory conditions
//----------------------------------------------------------------------
// allocate workspace
//----------------------------------------------------------------------
int64_t *range = LAGraph_Malloc (nthreads + 1, sizeof (int64_t)) ;
GrB_Index *count = LAGraph_Malloc (nthreads + 1, sizeof (GrB_Index)) ;
// todo check out-of-memory conditions
memset (count, 0, sizeof (GrB_Index) * (nthreads + 1)) ;
//----------------------------------------------------------------------
// define parallel tasks to construct T
//----------------------------------------------------------------------
// thread tid works on rows range[tid]:range[tid+1]-1 of S and T
for (int tid = 0 ; tid <= nthreads ; tid++)
{
range [tid] = (n * tid + nthreads - 1) / nthreads ;
}
//----------------------------------------------------------------------
// determine the number entries to be constructed in T for each thread
//----------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int tid = 0 ; tid < nthreads ; tid++)
{
for (int64_t i = range [tid] ; i < range [tid+1] ; i++)
{
int64_t deg = Sp [i + 1] - Sp [i] ;
count [tid + 1] += LAGraph_MIN (FASTSV_SAMPLES, deg) ;
}
}
//----------------------------------------------------------------------
// count = cumsum (count)
//----------------------------------------------------------------------
for (int tid = 0 ; tid < nthreads ; tid++)
{
count [tid + 1] += count [tid] ;
}
//----------------------------------------------------------------------
// construct T
//----------------------------------------------------------------------
// T (i,:) consists of the first FASTSV_SAMPLES of S (i,:).
// todo: this could be done by GxB_Select, using a new operator. Need
// to define a set of GxB_SelectOp operators that would allow for this.
// Note that Tx is not modified. Only Tp and Tj are constructed.
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int tid = 0 ; tid < nthreads ; tid++)
{
GrB_Index p = count [tid] ;
Tp [range [tid]] = p ;
for (int64_t i = range [tid] ; i < range [tid+1] ; i++)
{
// construct T (i,:) from the first entries in S (i,:)
for (int64_t j = 0 ;
j < FASTSV_SAMPLES && Sp [i] + j < Sp [i + 1] ; j++)
{
Tj [p++] = Sj [Sp [i] + j] ;
}
Tp [i + 1] = p ;
}
}
//----------------------------------------------------------------------
// import the result into the GrB_Matrix T
//----------------------------------------------------------------------
// Note that Tx is unmodified.
// in SuiteSparse:GraphBLAS v5, sizes are in bytes, not entries
GrB_Index Tp_siz = Tp_size ;
GrB_Index Tj_siz = Tj_size ;
GrB_Index Tx_siz = Tx_size ;
GrB_Index t_nvals = Tp [nrows] ;
GrB_TRY (GxB_Matrix_import_CSR (&T, type, nrows, ncols,
&Tp, &Tj, &Tx, Tp_siz, Tj_siz, Tx_siz,
true, // T is iso
S_jumbled, NULL)) ;
//----------------------------------------------------------------------
// find the connected components of T
//----------------------------------------------------------------------
// todo: this is nearly identical to the final phase below.
// Make this a function
bool change = true, is_first = true ;
while (change)
{
// hooking & shortcutting
// mngp = min (mngp, T*gp) using the MIN_SECOND semiring
GrB_TRY (GrB_mxv (mngp, NULL, GrB_MIN_UINT64,
GrB_MIN_SECOND_SEMIRING_UINT64, T, gp, NULL)) ;
if (!is_first)
{
#if 1
LAGraph_TRY (Reduce_assign (f, mngp, C, &Cp, &V, &Cx, msg)) ;
#else
LAGraph_TRY (Reduce_assign (&f, &mngp, V, n,
nthreads, ht_key, ht_val, &seed, msg)) ;
#endif
}
// f = min (f, mngp, gp)
GrB_TRY (GrB_eWiseAdd (f, NULL, GrB_MIN_UINT64, GrB_MIN_UINT64,
mngp, gp, NULL)) ;
// calculate grandparent: gp_new = f (f)
GrB_TRY (GrB_Vector_extractTuples (NULL, V, &n, f)) ;
GrB_TRY (GrB_extract (gp_new, NULL, NULL, f, V, n, NULL)) ;
// terminate if gp and gb_new are the same
GrB_TRY (GrB_eWiseMult (mod, NULL, NULL, GrB_NE_UINT64, gp_new,
gp, NULL)) ;
GrB_TRY (GrB_reduce (&change, NULL, GrB_LOR_MONOID_BOOL, mod,
NULL)) ;
// swap gp and gp_new
GrB_Vector t = gp ; gp = gp_new ; gp_new = t ;
is_first = false ;
}
//----------------------------------------------------------------------
// todo: describe me
//----------------------------------------------------------------------
ht_init (ht_key, ht_val) ;
ht_sample (V, n, HASH_SAMPLES, ht_key, ht_val, &seed) ;
int64_t key = ht_most_frequent (ht_key, ht_val) ;
// todo: what if key is returned as -1? Then T below is invalid.
int64_t t_nonempty = -1 ;
bool T_jumbled = false, T_iso = true ;
// export T
GrB_TRY (GxB_Matrix_export_CSR (&T, &type, &nrows, &ncols, &Tp, &Tj,
&Tx, &Tp_siz, &Tj_siz, &Tx_siz,
&T_iso, &T_jumbled, NULL)) ;
// todo what is this phase doing? It is constructing a matrix T that
// depends only on S, key, and V. T contains a subset of the entries
// in S, except that T (i,:) is empty if
// The prior content of T is ignored; it is exported from the earlier
// phase, only to reuse the allocated space for T. However, T_jumbled
// is preserved from the prior matrix T, which doesn't make sense.
// This parallel loop is badly load balanced. Each thread operates on
// the same number of rows of S, regardless of how many entries appear
// in each set of rows. It uses one thread per task, statically
// scheduled.
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int tid = 0 ; tid < nthreads ; tid++)
{
GrB_Index ptr = Sp [range [tid]] ;
// thread tid scans S (range [tid]:range [tid+1]-1,:),
// and constructs T(i,:) for all rows in this range.
for (int64_t i = range [tid] ; i < range [tid+1] ; i++)
{
int64_t pv = V [i] ; // what is pv?
Tp [i] = ptr ; // start the construction of T(i,:)
// T(i,:) is empty if pv == key
if (pv != key)
{
// scan S(i,:)
for (GrB_Index p = Sp [i] ; p < Sp [i+1] ; p++)
{
// get S(i,j)
int64_t j = Sj [p] ;
if (V [j] != key)
{
// add the entry T(i,j) to T, but skip it if
// V [j] is equal to key
Tj [ptr++] = j ;
}
}
// add the entry T(i,key) if there is room for it in T(i,:)
if (ptr - Tp [i] < Sp [i+1] - Sp [i])
{
Tj [ptr++] = key ;
}
}
}
// count the number of entries inserted into T by this thread?
count [tid] = ptr - Tp [range [tid]] ;
}
// Compact empty space out of Tj not filled in from the above phase.
// This is a lot of work and should be done in parallel.
GrB_Index offset = 0 ;
for (int tid = 0 ; tid < nthreads ; tid++)
{
memcpy (Tj + offset, Tj + Tp [range [tid]],
sizeof (GrB_Index) * count [tid]) ;
offset += count [tid] ;
count [tid] = offset - count [tid] ;
}
// Compact empty space out of Tp
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int tid = 0 ; tid < nthreads ; tid++)
{
GrB_Index ptr = Tp [range [tid]] ;
for (int64_t i = range [tid] ; i < range [tid+1] ; i++)
{
Tp [i] -= ptr - count [tid] ;
}
}
// finalize T
Tp [n] = offset ;
// free workspace
LAGraph_Free ((void **) &count) ;
LAGraph_Free ((void **) &range) ;
// import S (unchanged since last export)
GrB_TRY (GxB_Matrix_import_CSR (&S, type, nrows, ncols,
&Sp, &Sj, &Sx, Sp_size, Sj_size, Sx_size,
S_iso, S_jumbled, NULL)) ;
// import T for the final phase
GrB_TRY (GxB_Matrix_import_CSR (&T, type, nrows, ncols,
&Tp, &Tj, &Tx, Tp_siz, Tj_siz, Tx_siz,
T_iso, T_jumbled, NULL)) ;
// restore G->A
G->A = S ;
}
else
{
// no sampling; the final phase operates on the whole graph
T = S ;
}
//--------------------------------------------------------------------------
// final phase
//--------------------------------------------------------------------------
GrB_TRY (GrB_Matrix_nvals (&nnz, T)) ;
bool change = true ;
while (change && nnz > 0)
{
// hooking & shortcutting
// mngp = min (mngp, T*gp) using the MIN_SECOND semiring
GrB_TRY (GrB_mxv (mngp, NULL, GrB_MIN_UINT64,
GrB_MIN_SECOND_SEMIRING_UINT64, T, gp, NULL)) ;
#if 1
GrB_TRY (Reduce_assign (f, mngp, C, &Cp, &V, &Cx, msg)) ;
#else
GrB_TRY (Reduce_assign (&f, &mngp, V, n,
nthreads, ht_key, ht_val, &seed, msg)) ;
#endif
// f = min (f, mngp, gp)
GrB_TRY (GrB_eWiseAdd (f, NULL, GrB_MIN_UINT64, GrB_MIN_UINT64,
mngp, gp, NULL)) ;
// calculate grandparent: gp_new = f (f)
GrB_TRY (GrB_Vector_extractTuples (NULL, V, &n, f)) ;
GrB_TRY (GrB_extract (gp_new, NULL, NULL, f, V, n, NULL)) ;
// terminate if gp and gb_new are the same
GrB_TRY (GrB_eWiseMult (mod, NULL, NULL, GrB_NE_UINT64, gp_new, gp,
NULL)) ;
GrB_TRY (GrB_reduce (&change, NULL, GrB_LOR_MONOID_BOOL, mod, NULL)) ;
// swap gp and gp_new
GrB_Vector t = gp ; gp = gp_new ; gp_new = t ;
}
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
(*component) = f ;
f = NULL ;
if (sampling)
{
GrB_free (&T) ;
}
LAGraph_FREE_ALL ;
return (0) ;
#endif
}
|
3d7pt.c | /*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 8;
tile_size[3] = 64;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
GB_convert_hyper_to_sparse.c | //------------------------------------------------------------------------------
// GB_convert_hyper_to_sparse: convert a matrix from hypersparse to sparse
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// On input, the matrix may have shallow A->p and A->h content; it is safely
// removed. On output, the matrix is always non-hypersparse (even if out of
// memory). If the input matrix is hypersparse, it is given a new A->p that is
// not shallow. If the input matrix is already non-hypersparse, nothing is
// changed (and in that case A->p remains shallow on output if shallow on
// input). The A->x and A->i content is not changed; it remains in whatever
// shallow/non-shallow/iso property that it had on input).
// If an out-of-memory condition occurs, all content of the matrix is cleared.
// If the input matrix A is sparse, bitmap or full, it is unchanged.
#include "GB.h"
GB_PUBLIC
GrB_Info GB_convert_hyper_to_sparse // convert hypersparse to sparse
(
GrB_Matrix A, // matrix to convert to non-hypersparse
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT_MATRIX_OK (A, "A being converted from hyper to sparse", GB0) ;
ASSERT (GB_ZOMBIES_OK (A)) ;
ASSERT (GB_JUMBLED_OK (A)) ;
ASSERT (GB_PENDING_OK (A)) ;
//--------------------------------------------------------------------------
// convert A from hypersparse to sparse
//--------------------------------------------------------------------------
if (GB_IS_HYPERSPARSE (A))
{
//----------------------------------------------------------------------
// determine the number of threads to use
//----------------------------------------------------------------------
GBURBLE ("(hyper to sparse) ") ;
int64_t n = A->vdim ;
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nthreads = GB_nthreads (n, chunk, nthreads_max) ;
int ntasks = (nthreads == 1) ? 1 : (8 * nthreads) ;
ntasks = GB_IMIN (ntasks, n) ;
ntasks = GB_IMAX (ntasks, 1) ;
//----------------------------------------------------------------------
// allocate the new Ap array, of size n+1
//----------------------------------------------------------------------
int64_t *restrict Ap_new = NULL ; size_t Ap_new_size = 0 ;
Ap_new = GB_MALLOC (n+1, int64_t, &Ap_new_size) ;
if (Ap_new == NULL)
{
// out of memory
return (GrB_OUT_OF_MEMORY) ;
}
#ifdef GB_DEBUG
// to ensure all values of Ap_new are assigned below.
for (int64_t j = 0 ; j <= n ; j++) Ap_new [j] = -99999 ;
#endif
//----------------------------------------------------------------------
// get the old hyperlist
//----------------------------------------------------------------------
int64_t nvec = A->nvec ; // # of vectors in Ah_old
int64_t *restrict Ap_old = A->p ; // size nvec+1
int64_t *restrict Ah_old = A->h ; // size nvec
int64_t nvec_nonempty = 0 ; // recompute A->nvec_nonempty
int64_t anz = GB_nnz (A) ;
//----------------------------------------------------------------------
// construct the new vector pointers
//----------------------------------------------------------------------
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(+:nvec_nonempty)
for (tid = 0 ; tid < ntasks ; tid++)
{
int64_t jstart, jend, my_nvec_nonempty = 0 ;
GB_PARTITION (jstart, jend, n, tid, ntasks) ;
ASSERT (0 <= jstart && jstart <= jend && jend <= n) ;
// task tid computes Ap_new [jstart:jend-1] from Ap_old, Ah_old.
// GB_SPLIT_BINARY_SEARCH of Ah_old [0..nvec-1] for jstart:
// If found is true then Ah_old [k] == jstart.
// If found is false, and nvec > 0 then
// Ah_old [0 ... k-1] < jstart < Ah_old [k ... nvec-1]
// Whether or not i is found, if nvec > 0
// Ah_old [0 ... k-1] < jstart <= Ah_old [k ... nvec-1]
// If nvec == 0, then k == 0 and found will be false. In this
// case, jstart cannot be compared with any content of Ah_old,
// since Ah_old is completely empty (Ah_old [0] is invalid).
int64_t k = 0, pright = nvec-1 ;
bool found ;
GB_SPLIT_BINARY_SEARCH (jstart, Ah_old, k, pright, found) ;
ASSERT (k >= 0 && k <= nvec) ;
ASSERT (GB_IMPLIES (nvec == 0, !found && k == 0)) ;
ASSERT (GB_IMPLIES (found, jstart == Ah_old [k])) ;
ASSERT (GB_IMPLIES (!found && k < nvec, jstart < Ah_old [k])) ;
// Let jk = Ah_old [k], jlast = Ah_old [k-1], and pk = Ah_old [k].
// Then Ap_new [jlast+1:jk] must be set to pk. This must be done
// for all k = 0:nvec-1. In addition, the last vector k=nvec-1
// must be terminated by setting Ap_new [jk+1:n-1] to Ap_old [nvec].
// A task owns the kth vector if jk is in jstart:jend-1, inclusive.
// It counts all non-empty vectors that it owns. However, the task
// must also set Ap_new [...] = pk for any jlast+1:jk that overlaps
// jstart:jend-1, even if it does not own that particular vector k.
// This happens only at the tail end of jstart:jend-1.
int64_t jlast = (k == 0) ? (-1) : Ah_old [k-1] ;
jlast = GB_IMAX (jstart-1, jlast) ;
bool done = false ;
for ( ; k <= nvec && !done ; k++)
{
//--------------------------------------------------------------
// get the kth vector in Ah_old, which is vector index jk.
//--------------------------------------------------------------
int64_t jk = (k < nvec) ? Ah_old [k] : n ;
int64_t pk = (k < nvec) ? Ap_old [k] : anz ;
//--------------------------------------------------------------
// determine if this task owns jk
//--------------------------------------------------------------
int64_t jfin ;
if (jk >= jend)
{
// This is the last iteration for this task. This task
// does not own the kth vector. However, it does own the
// vector indices jlast+1:jend-1, and these vectors must
// be handled by this task.
jfin = jend - 1 ;
done = true ;
}
else
{
// This task owns the kth vector, which is vector index jk.
// Ap must be set to pk for all vector indices jlast+1:jk.
jfin = jk ;
ASSERT (k >= 0 && k < nvec && nvec > 0) ;
if (pk < Ap_old [k+1]) my_nvec_nonempty++ ;
}
//--------------------------------------------------------------
// set Ap_new for this vector
//--------------------------------------------------------------
// Ap_new [jlast+1:jk] must be set to pk. This tasks handles
// the intersection of jlast+1:jk with jstart:jend-1.
for (int64_t j = jlast+1 ; j <= jfin ; j++)
{
Ap_new [j] = pk ;
}
//--------------------------------------------------------------
// keep track of the prior vector index
//--------------------------------------------------------------
jlast = jk ;
}
nvec_nonempty += my_nvec_nonempty ;
//------------------------------------------------------------------
// no task owns Ap_new [n] so it is set by the last task
//------------------------------------------------------------------
if (tid == ntasks-1)
{
ASSERT (jend == n) ;
Ap_new [n] = anz ;
}
}
// free the old A->p and A->h hyperlist content.
// this clears A->nvec_nonempty so it must be restored below.
GB_ph_free (A) ;
// transplant the new vector pointers; matrix is no longer hypersparse
A->p = Ap_new ; A->p_size = Ap_new_size ;
A->h = NULL ;
A->nvec = n ;
A->nvec_nonempty = nvec_nonempty ;
A->plen = n ;
A->p_shallow = false ;
A->h_shallow = false ;
A->magic = GB_MAGIC ;
ASSERT (anz == GB_nnz (A)) ;
//----------------------------------------------------------------------
// A is now sparse
//----------------------------------------------------------------------
ASSERT (GB_IS_SPARSE (A)) ;
}
//--------------------------------------------------------------------------
// A is now in sparse form (or left as full or bitmap)
//--------------------------------------------------------------------------
ASSERT_MATRIX_OK (A, "A converted to sparse (or left as-is)", GB0) ;
ASSERT (!GB_IS_HYPERSPARSE (A)) ;
ASSERT (GB_ZOMBIES_OK (A)) ;
ASSERT (GB_JUMBLED_OK (A)) ;
ASSERT (GB_PENDING_OK (A)) ;
return (GrB_SUCCESS) ;
}
|
mixed_tentusscher_myo_epi_2004_S1_2.c | // Scenario 1 - Mixed-Model TenTusscher 2004 (Myocardium + Epicardium)
// (AP + max:dvdt)
#include <stdio.h>
#include "mixed_tentusscher_myo_epi_2004_S1_2.h"
GET_CELL_MODEL_DATA(init_cell_model_data)
{
if(get_initial_v)
cell_model->initial_v = INITIAL_V;
if(get_neq)
cell_model->number_of_ode_equations = NEQ;
}
SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu)
{
static bool first_call = true;
if(first_call)
{
print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium CPU model\n");
first_call = false;
}
// Get the mapping array
uint32_t *mapping = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
}
else
{
print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n");
}
// Initial conditions for TenTusscher myocardium
if (mapping[sv_id] == 0)
{
// Default initial conditions
/*
sv[0] = INITIAL_V; // V; millivolt
sv[1] = 0.f; //M
sv[2] = 0.75; //H
sv[3] = 0.75f; //J
sv[4] = 0.f; //Xr1
sv[5] = 1.f; //Xr2
sv[6] = 0.f; //Xs
sv[7] = 1.f; //S
sv[8] = 0.f; //R
sv[9] = 0.f; //D
sv[10] = 1.f; //F
sv[11] = 1.f; //FCa
sv[12] = 1.f; //G
sv[13] = 0.0002; //Cai
sv[14] = 0.2f; //CaSR
sv[15] = 11.6f; //Nai
sv[16] = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633};
for (uint32_t i = 0; i < NEQ; i++)
sv[i] = sv_sst[i];
}
// Initial conditions for TenTusscher epicardium
else
{
// Default initial conditions
/*
sv[0] = INITIAL_V; // V; millivolt
sv[1] = 0.f; //M
sv[2] = 0.75; //H
sv[3] = 0.75f; //J
sv[4] = 0.f; //Xr1
sv[5] = 1.f; //Xr2
sv[6] = 0.f; //Xs
sv[7] = 1.f; //S
sv[8] = 0.f; //R
sv[9] = 0.f; //D
sv[10] = 1.f; //F
sv[11] = 1.f; //FCa
sv[12] = 1.f; //G
sv[13] = 0.0002; //Cai
sv[14] = 0.2f; //CaSR
sv[15] = 11.6f; //Nai
sv[16] = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.6775309540028,0.00126031074107193,0.782379594133090,0.782216749001106,0.000172068343086772,0.486227463562957,0.00291750746806204,0.999998383839518,1.89860165324306e-08,1.86371442934849e-05,0.999771183306077,1.00730952275387,0.999997729764813,4.01181567168462e-05,0.661435383223664,9.89216406636310,139.601234209998};
for (uint32_t i = 0; i < NEQ; i++)
sv[i] = sv_sst[i];
}
}
SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu)
{
// Get the mapping array
uint32_t *mapping = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
}
else
{
print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n");
}
uint32_t sv_id;
int i;
#pragma omp parallel for private(sv_id)
for (i = 0; i < num_cells_to_solve; i++)
{
if(cells_to_solve)
sv_id = cells_to_solve[i];
else
sv_id = (uint32_t )i;
for (int j = 0; j < num_steps; ++j)
{
if (mapping[i] == 0)
solve_model_ode_cpu_myo(dt, sv + (sv_id * NEQ), stim_currents[i]);
else
solve_model_ode_cpu_epi(dt, sv + (sv_id * NEQ), stim_currents[i]);
}
}
}
void solve_model_ode_cpu_myo (real dt, real *sv, real stim_current)
{
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu_myo(rY, rDY, stim_current, dt);
for(int i = 0; i < NEQ; i++)
sv[i] = rDY[i];
}
void RHS_cpu_myo(const real *sv, real *rDY_, real stim_current, real dt)
{
// State variables
real svolt = sv[0];
real sm = sv[1];
real sh = sv[2];
real sj = sv[3];
real sxr1 = sv[4];
real sxr2 = sv[5];
real sxs = sv[6];
real ss = sv[7];
real sr = sv[8];
real sd = sv[9];
real sf = sv[10];
real sfca = sv[11];
real sg = sv[12];
real Cai = sv[13];
real CaSR = sv[14];
real Nai = sv[15];
real Ki = sv[16];
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Myocardium cell
real Gks=0.062;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Myocardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
Irel=A*sd*sg;
Ileak=0.00008f*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
// [!] Myocardium cell
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
void solve_model_ode_cpu_epi (real dt, real *sv, real stim_current)
{
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu_epi(rY, rDY, stim_current, dt);
for(int i = 0; i < NEQ; i++)
sv[i] = rDY[i];
}
void RHS_cpu_epi(const real *sv, real *rDY_, real stim_current, real dt)
{
// State variables
real svolt = sv[0];
real sm = sv[1];
real sh = sv[2];
real sj = sv[3];
real sxr1 = sv[4];
real sxr2 = sv[5];
real sxs = sv[6];
real ss = sv[7];
real sr = sv[8];
real sd = sv[9];
real sf = sv[10];
real sfca = sv[11];
real sg = sv[12];
real Cai = sv[13];
real CaSR = sv[14];
real Nai = sv[15];
real Ki = sv[16];
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Epicardium cell
real Gks=0.245;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Epicardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real parameters []={13.9645635317638,0.000234559273515713,0.000158508496150117,0.000387718953473422,0.271550011299244,0.171313643894679,0.148132634408518,3.52429749186627,0.0163232963007063,1.80625170161156,1099.99984094905,0.000508428591582056,0.426315288126368,0.0193610246251599,0.00342305438925442,2.79133840240607e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
irbuilder_for_unsigned_dynamic.c | // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs
// RUN: %clang_cc1 -no-opaque-pointers -fopenmp-enable-irbuilder -verify -fopenmp -fopenmp-version=45 -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck %s
// expected-no-diagnostics
#ifndef HEADER
#define HEADER
// CHECK-LABEL: define {{.*}}@workshareloop_unsigned_dynamic(
// CHECK-NEXT: [[ENTRY:.*]]:
// CHECK-NEXT: %[[A_ADDR:.+]] = alloca float*, align 8
// CHECK-NEXT: %[[B_ADDR:.+]] = alloca float*, align 8
// CHECK-NEXT: %[[C_ADDR:.+]] = alloca float*, align 8
// CHECK-NEXT: %[[D_ADDR:.+]] = alloca float*, align 8
// CHECK-NEXT: %[[I:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[AGG_CAPTURED:.+]] = alloca %struct.anon, align 8
// CHECK-NEXT: %[[AGG_CAPTURED1:.+]] = alloca %struct.anon.0, align 4
// CHECK-NEXT: %[[DOTCOUNT_ADDR:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[P_LASTITER:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[P_LOWERBOUND:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[P_UPPERBOUND:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[P_STRIDE:.+]] = alloca i32, align 4
// CHECK-NEXT: store float* %[[A:.+]], float** %[[A_ADDR]], align 8
// CHECK-NEXT: store float* %[[B:.+]], float** %[[B_ADDR]], align 8
// CHECK-NEXT: store float* %[[C:.+]], float** %[[C_ADDR]], align 8
// CHECK-NEXT: store float* %[[D:.+]], float** %[[D_ADDR]], align 8
// CHECK-NEXT: store i32 33, i32* %[[I]], align 4
// CHECK-NEXT: %[[TMP0:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[AGG_CAPTURED]], i32 0, i32 0
// CHECK-NEXT: store i32* %[[I]], i32** %[[TMP0]], align 8
// CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon.0, %struct.anon.0* %[[AGG_CAPTURED1]], i32 0, i32 0
// CHECK-NEXT: %[[TMP2:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: store i32 %[[TMP2]], i32* %[[TMP1]], align 4
// CHECK-NEXT: call void @__captured_stmt(i32* %[[DOTCOUNT_ADDR]], %struct.anon* %[[AGG_CAPTURED]])
// CHECK-NEXT: %[[DOTCOUNT:.+]] = load i32, i32* %[[DOTCOUNT_ADDR]], align 4
// CHECK-NEXT: br label %[[OMP_LOOP_PREHEADER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_PREHEADER]]:
// CHECK-NEXT: store i32 1, i32* %[[P_LOWERBOUND]], align 4
// CHECK-NEXT: store i32 %[[DOTCOUNT]], i32* %[[P_UPPERBOUND]], align 4
// CHECK-NEXT: store i32 1, i32* %[[P_STRIDE]], align 4
// CHECK-NEXT: %[[OMP_GLOBAL_THREAD_NUM:.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @1)
// CHECK-NEXT: call void @__kmpc_dispatch_init_4u(%struct.ident_t* @1, i32 %[[OMP_GLOBAL_THREAD_NUM]], i32 1073741859, i32 1, i32 %[[DOTCOUNT]], i32 1, i32 1)
// CHECK-NEXT: br label %[[OMP_LOOP_PREHEADER_OUTER_COND:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_HEADER:.*]]:
// CHECK-NEXT: %[[OMP_LOOP_IV:.+]] = phi i32 [ %[[LB:.+]], %[[OMP_LOOP_PREHEADER_OUTER_COND]] ], [ %[[OMP_LOOP_NEXT:.+]], %[[OMP_LOOP_INC:.+]] ]
// CHECK-NEXT: br label %[[OMP_LOOP_COND:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_COND]]:
// CHECK-NEXT: %[[UB:.+]] = load i32, i32* %[[P_UPPERBOUND]], align 4
// CHECK-NEXT: %[[OMP_LOOP_CMP:.+]] = icmp ult i32 %[[OMP_LOOP_IV]], %[[UB]]
// CHECK-NEXT: br i1 %[[OMP_LOOP_CMP]], label %[[OMP_LOOP_BODY:.+]], label %[[OMP_LOOP_PREHEADER_OUTER_COND]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_BODY]]:
// CHECK-NEXT: call void @__captured_stmt.1(i32* %[[I]], i32 %[[OMP_LOOP_IV]], %struct.anon.0* %[[AGG_CAPTURED1]])
// CHECK-NEXT: %[[TMP3:.+]] = load float*, float** %[[B_ADDR]], align 8
// CHECK-NEXT: %[[TMP4:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: %[[IDXPROM:.+]] = zext i32 %[[TMP4]] to i64
// CHECK-NEXT: %[[ARRAYIDX:.+]] = getelementptr inbounds float, float* %[[TMP3]], i64 %[[IDXPROM]]
// CHECK-NEXT: %[[TMP5:.+]] = load float, float* %[[ARRAYIDX]], align 4
// CHECK-NEXT: %[[TMP6:.+]] = load float*, float** %[[C_ADDR]], align 8
// CHECK-NEXT: %[[TMP7:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: %[[IDXPROM2:.+]] = zext i32 %[[TMP7]] to i64
// CHECK-NEXT: %[[ARRAYIDX3:.+]] = getelementptr inbounds float, float* %[[TMP6]], i64 %[[IDXPROM2]]
// CHECK-NEXT: %[[TMP8:.+]] = load float, float* %[[ARRAYIDX3]], align 4
// CHECK-NEXT: %[[MUL:.+]] = fmul float %[[TMP5]], %[[TMP8]]
// CHECK-NEXT: %[[TMP9:.+]] = load float*, float** %[[D_ADDR]], align 8
// CHECK-NEXT: %[[TMP10:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: %[[IDXPROM4:.+]] = zext i32 %[[TMP10]] to i64
// CHECK-NEXT: %[[ARRAYIDX5:.+]] = getelementptr inbounds float, float* %[[TMP9]], i64 %[[IDXPROM4]]
// CHECK-NEXT: %[[TMP11:.+]] = load float, float* %[[ARRAYIDX5]], align 4
// CHECK-NEXT: %[[MUL6:.+]] = fmul float %[[MUL]], %[[TMP11]]
// CHECK-NEXT: %[[TMP12:.+]] = load float*, float** %[[A_ADDR]], align 8
// CHECK-NEXT: %[[TMP13:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: %[[IDXPROM7:.+]] = zext i32 %[[TMP13]] to i64
// CHECK-NEXT: %[[ARRAYIDX8:.+]] = getelementptr inbounds float, float* %[[TMP12]], i64 %[[IDXPROM7]]
// CHECK-NEXT: store float %[[MUL6]], float* %[[ARRAYIDX8]], align 4
// CHECK-NEXT: br label %[[OMP_LOOP_INC]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_INC]]:
// CHECK-NEXT: %[[OMP_LOOP_NEXT]] = add nuw i32 %[[OMP_LOOP_IV]], 1
// CHECK-NEXT: br label %[[OMP_LOOP_HEADER]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_EXIT:.*]]:
// CHECK-NEXT: %[[OMP_GLOBAL_THREAD_NUM9:.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @1)
// CHECK-NEXT: call void @__kmpc_barrier(%struct.ident_t* @2, i32 %[[OMP_GLOBAL_THREAD_NUM9]])
// CHECK-NEXT: br label %[[OMP_LOOP_AFTER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_AFTER]]:
// CHECK-NEXT: ret void
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_PREHEADER_OUTER_COND]]:
// CHECK-NEXT: %[[TMP14:.+]] = call i32 @__kmpc_dispatch_next_4u(%struct.ident_t* @1, i32 %[[OMP_GLOBAL_THREAD_NUM]], i32* %[[P_LASTITER]], i32* %[[P_LOWERBOUND]], i32* %[[P_UPPERBOUND]], i32* %[[P_STRIDE]])
// CHECK-NEXT: %[[TMP15:.+]] = icmp ne i32 %[[TMP14]], 0
// CHECK-NEXT: %[[TMP16:.+]] = load i32, i32* %[[P_LOWERBOUND]], align 4
// CHECK-NEXT: %[[LB]] = sub i32 %[[TMP16]], 1
// CHECK-NEXT: br i1 %[[TMP15]], label %[[OMP_LOOP_HEADER]], label %[[OMP_LOOP_EXIT]]
// CHECK-NEXT: }
extern "C" void workshareloop_unsigned_dynamic(float *a, float *b, float *c, float *d) {
#pragma omp for schedule(dynamic)
for (unsigned i = 33; i < 32000000; i += 7) {
a[i] = b[i] * c[i] * d[i];
}
}
#endif // HEADER
// CHECK-LABEL: define {{.*}}@__captured_stmt(
// CHECK-NEXT: [[ENTRY:.*]]:
// CHECK-NEXT: %[[DISTANCE_ADDR:.+]] = alloca i32*, align 8
// CHECK-NEXT: %[[__CONTEXT_ADDR:.+]] = alloca %struct.anon*, align 8
// CHECK-NEXT: %[[DOTSTART:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[DOTSTOP:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[DOTSTEP:.+]] = alloca i32, align 4
// CHECK-NEXT: store i32* %[[DISTANCE:.+]], i32** %[[DISTANCE_ADDR]], align 8
// CHECK-NEXT: store %struct.anon* %[[__CONTEXT:.+]], %struct.anon** %[[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: %[[TMP0:.+]] = load %struct.anon*, %struct.anon** %[[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[TMP0]], i32 0, i32 0
// CHECK-NEXT: %[[TMP2:.+]] = load i32*, i32** %[[TMP1]], align 8
// CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[TMP2]], align 4
// CHECK-NEXT: store i32 %[[TMP3]], i32* %[[DOTSTART]], align 4
// CHECK-NEXT: store i32 32000000, i32* %[[DOTSTOP]], align 4
// CHECK-NEXT: store i32 7, i32* %[[DOTSTEP]], align 4
// CHECK-NEXT: %[[TMP4:.+]] = load i32, i32* %[[DOTSTART]], align 4
// CHECK-NEXT: %[[TMP5:.+]] = load i32, i32* %[[DOTSTOP]], align 4
// CHECK-NEXT: %[[CMP:.+]] = icmp ult i32 %[[TMP4]], %[[TMP5]]
// CHECK-NEXT: br i1 %[[CMP]], label %[[COND_TRUE:.+]], label %[[COND_FALSE:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[COND_TRUE]]:
// CHECK-NEXT: %[[TMP6:.+]] = load i32, i32* %[[DOTSTOP]], align 4
// CHECK-NEXT: %[[TMP7:.+]] = load i32, i32* %[[DOTSTART]], align 4
// CHECK-NEXT: %[[SUB:.+]] = sub i32 %[[TMP6]], %[[TMP7]]
// CHECK-NEXT: %[[TMP8:.+]] = load i32, i32* %[[DOTSTEP]], align 4
// CHECK-NEXT: %[[SUB1:.+]] = sub i32 %[[TMP8]], 1
// CHECK-NEXT: %[[ADD:.+]] = add i32 %[[SUB]], %[[SUB1]]
// CHECK-NEXT: %[[TMP9:.+]] = load i32, i32* %[[DOTSTEP]], align 4
// CHECK-NEXT: %[[DIV:.+]] = udiv i32 %[[ADD]], %[[TMP9]]
// CHECK-NEXT: br label %[[COND_END:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[COND_FALSE]]:
// CHECK-NEXT: br label %[[COND_END]]
// CHECK-EMPTY:
// CHECK-NEXT: [[COND_END]]:
// CHECK-NEXT: %[[COND:.+]] = phi i32 [ %[[DIV]], %[[COND_TRUE]] ], [ 0, %[[COND_FALSE]] ]
// CHECK-NEXT: %[[TMP10:.+]] = load i32*, i32** %[[DISTANCE_ADDR]], align 8
// CHECK-NEXT: store i32 %[[COND]], i32* %[[TMP10]], align 4
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK-LABEL: define {{.*}}@__captured_stmt.1(
// CHECK-NEXT: [[ENTRY:.*]]:
// CHECK-NEXT: %[[LOOPVAR_ADDR:.+]] = alloca i32*, align 8
// CHECK-NEXT: %[[LOGICAL_ADDR:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[__CONTEXT_ADDR:.+]] = alloca %struct.anon.0*, align 8
// CHECK-NEXT: store i32* %[[LOOPVAR:.+]], i32** %[[LOOPVAR_ADDR]], align 8
// CHECK-NEXT: store i32 %[[LOGICAL:.+]], i32* %[[LOGICAL_ADDR]], align 4
// CHECK-NEXT: store %struct.anon.0* %[[__CONTEXT:.+]], %struct.anon.0** %[[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: %[[TMP0:.+]] = load %struct.anon.0*, %struct.anon.0** %[[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon.0, %struct.anon.0* %[[TMP0]], i32 0, i32 0
// CHECK-NEXT: %[[TMP2:.+]] = load i32, i32* %[[TMP1]], align 4
// CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[LOGICAL_ADDR]], align 4
// CHECK-NEXT: %[[MUL:.+]] = mul i32 7, %[[TMP3]]
// CHECK-NEXT: %[[ADD:.+]] = add i32 %[[TMP2]], %[[MUL]]
// CHECK-NEXT: %[[TMP4:.+]] = load i32*, i32** %[[LOOPVAR_ADDR]], align 8
// CHECK-NEXT: store i32 %[[ADD]], i32* %[[TMP4]], align 4
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK: ![[META0:[0-9]+]] = !{i32 1, !"wchar_size", i32 4}
// CHECK: ![[META1:[0-9]+]] = !{i32 7, !"openmp", i32 45}
// CHECK: ![[META2:[0-9]+]] =
|
convolution_sgemm_packnto1.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void im2col_sgemm_packnto1_rvv(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
const int packn = csrr_vlenb() / 4;
const word_type vl = vsetvl_e32m1(packn);
// Mat bottom_im2col(size, maxk, inch, 4u * packn, packn, opt.workspace_allocator);
const int size = bottom_im2col.w;
const int maxk = bottom_im2col.h;
const int inch = bottom_im2col.c;
const int outch = top_blob.c;
const float* bias = _bias;
Mat tmp;
if (size >= 8)
tmp.create(8 * maxk, inch, size / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 4u * packn, packn, opt.workspace_allocator);
else if (size >= 4)
tmp.create(4 * maxk, inch, size / 4 + (size % 4) / 2 + size % 2, 4u * packn, packn, opt.workspace_allocator);
else if (size >= 2)
tmp.create(2 * maxk, inch, size / 2 + size % 2, 4u * packn, packn, opt.workspace_allocator);
else
tmp.create(maxk, inch, size, 4u * packn, packn, opt.workspace_allocator);
{
int remain_size_start = 0;
int nn_size = size >> 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 8;
float* tmpptr = tmp.channel(i / 8);
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i * packn;
for (int k = 0; k < maxk; k++)
{
#if RVV_SPEC_0_7
for (int l = 0; l < packn; l++)
{
tmpptr[0] = img0[l];
tmpptr[1] = img0[l + packn];
tmpptr[2] = img0[l + packn * 2];
tmpptr[3] = img0[l + packn * 3];
tmpptr[4] = img0[l + packn * 4];
tmpptr[5] = img0[l + packn * 5];
tmpptr[6] = img0[l + packn * 6];
tmpptr[7] = img0[l + packn * 7];
tmpptr += 8;
}
img0 += size * packn;
#else
vfloat32m1_t _val0 = vle32_v_f32m1(img0, vl);
vfloat32m1_t _val1 = vle32_v_f32m1(img0 + packn, vl);
vfloat32m1_t _val2 = vle32_v_f32m1(img0 + packn * 2, vl);
vfloat32m1_t _val3 = vle32_v_f32m1(img0 + packn * 3, vl);
vfloat32m1_t _val4 = vle32_v_f32m1(img0 + packn * 4, vl);
vfloat32m1_t _val5 = vle32_v_f32m1(img0 + packn * 5, vl);
vfloat32m1_t _val6 = vle32_v_f32m1(img0 + packn * 6, vl);
vfloat32m1_t _val7 = vle32_v_f32m1(img0 + packn * 7, vl);
vsseg8e32_v_f32m1x8(tmpptr, vcreate_f32m1x8(_val0, _val1, _val2, _val3, _val4, _val5, _val6, _val7), vl);
img0 += size * packn;
tmpptr += packn * 8;
#endif
}
}
}
remain_size_start += nn_size << 3;
nn_size = (size - remain_size_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 4;
float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i * packn;
for (int k = 0; k < maxk; k++)
{
#if RVV_SPEC_0_7
for (int l = 0; l < packn; l++)
{
tmpptr[0] = img0[l];
tmpptr[1] = img0[l + packn];
tmpptr[2] = img0[l + packn * 2];
tmpptr[3] = img0[l + packn * 3];
tmpptr += 4;
}
img0 += size * packn;
#else
vfloat32m1_t _val0 = vle32_v_f32m1(img0, vl);
vfloat32m1_t _val1 = vle32_v_f32m1(img0 + packn, vl);
vfloat32m1_t _val2 = vle32_v_f32m1(img0 + packn * 2, vl);
vfloat32m1_t _val3 = vle32_v_f32m1(img0 + packn * 3, vl);
vsseg4e32_v_f32m1x4(tmpptr, vcreate_f32m1x4(_val0, _val1, _val2, _val3), vl);
img0 += size * packn;
tmpptr += packn * 4;
#endif
}
}
}
remain_size_start += nn_size << 2;
nn_size = (size - remain_size_start) >> 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 2;
float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + (i % 4) / 2);
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i * packn;
for (int k = 0; k < maxk; k++)
{
#if RVV_SPEC_0_7
for (int l = 0; l < packn; l++)
{
tmpptr[0] = img0[l];
tmpptr[1] = img0[l + packn];
tmpptr += 2;
}
img0 += size * packn;
#else
vfloat32m1_t _val0 = vle32_v_f32m1(img0, vl);
vfloat32m1_t _val1 = vle32_v_f32m1(img0 + packn, vl);
vsseg2e32_v_f32m1x2(tmpptr, vcreate_f32m1x2(_val0, _val1), vl);
img0 += size * packn;
tmpptr += packn * 2;
#endif
}
}
}
remain_size_start += nn_size << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < size; i++)
{
float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2);
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i * packn;
for (int k = 0; k < maxk; k++)
{
vfloat32m1_t _val = vle32_v_f32m1(img0, vl);
vse32_v_f32m1(tmpptr, _val, vl);
img0 += size * packn;
tmpptr += packn;
}
}
}
}
int nn_outch = outch / packn;
int remain_outch_start = nn_outch * packn;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * packn;
float* outptr0 = top_blob.channel(p);
const float zeros[packn] = {0.f};
const float* biasptr = bias ? bias + p : zeros;
int i = 0;
for (; i + 7 < size; i += 8)
{
const float* tmpptr = tmp.channel(i / 8);
const float* kptr0 = kernel.channel(p / packn);
int nn = inch * maxk * packn; // inch always > 0
vfloat32m1_t _sum0 = vle32_v_f32m1(biasptr, vl);
vfloat32m1_t _sum1 = vle32_v_f32m1(biasptr, vl);
vfloat32m1_t _sum2 = vle32_v_f32m1(biasptr, vl);
vfloat32m1_t _sum3 = vle32_v_f32m1(biasptr, vl);
vfloat32m1_t _sum4 = vle32_v_f32m1(biasptr, vl);
vfloat32m1_t _sum5 = vle32_v_f32m1(biasptr, vl);
vfloat32m1_t _sum6 = vle32_v_f32m1(biasptr, vl);
vfloat32m1_t _sum7 = vle32_v_f32m1(biasptr, vl);
for (int j = 0; j < nn; j++)
{
float val0 = *tmpptr++;
float val1 = *tmpptr++;
float val2 = *tmpptr++;
float val3 = *tmpptr++;
float val4 = *tmpptr++;
float val5 = *tmpptr++;
float val6 = *tmpptr++;
float val7 = *tmpptr++;
vfloat32m1_t _w0 = vle32_v_f32m1(kptr0, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, val0, _w0, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, val1, _w0, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, val2, _w0, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, val3, _w0, vl);
_sum4 = vfmacc_vf_f32m1(_sum4, val4, _w0, vl);
_sum5 = vfmacc_vf_f32m1(_sum5, val5, _w0, vl);
_sum6 = vfmacc_vf_f32m1(_sum6, val6, _w0, vl);
_sum7 = vfmacc_vf_f32m1(_sum7, val7, _w0, vl);
kptr0 += packn;
}
#if RVV_SPEC_0_7
vsse32_v_f32m1(outptr0, top_blob.cstep * sizeof(float), _sum0, vl);
vsse32_v_f32m1(outptr0 + 1, top_blob.cstep * sizeof(float), _sum1, vl);
vsse32_v_f32m1(outptr0 + 2, top_blob.cstep * sizeof(float), _sum2, vl);
vsse32_v_f32m1(outptr0 + 3, top_blob.cstep * sizeof(float), _sum3, vl);
vsse32_v_f32m1(outptr0 + 4, top_blob.cstep * sizeof(float), _sum4, vl);
vsse32_v_f32m1(outptr0 + 5, top_blob.cstep * sizeof(float), _sum5, vl);
vsse32_v_f32m1(outptr0 + 6, top_blob.cstep * sizeof(float), _sum6, vl);
vsse32_v_f32m1(outptr0 + 7, top_blob.cstep * sizeof(float), _sum7, vl);
#else
vssseg8e32_v_f32m1x8(outptr0, top_blob.cstep * sizeof(float), vcreate_f32m1x8(_sum0, _sum1, _sum2, _sum3, _sum4, _sum5, _sum6, _sum7), vl);
#endif
outptr0 += 8;
}
for (; i + 3 < size; i += 4)
{
const float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
const float* kptr0 = kernel.channel(p / packn);
int nn = inch * maxk * packn; // inch always > 0
vfloat32m1_t _sum0 = vle32_v_f32m1(biasptr, vl);
vfloat32m1_t _sum1 = vle32_v_f32m1(biasptr, vl);
vfloat32m1_t _sum2 = vle32_v_f32m1(biasptr, vl);
vfloat32m1_t _sum3 = vle32_v_f32m1(biasptr, vl);
for (int j = 0; j < nn; j++)
{
float val0 = *tmpptr++;
float val1 = *tmpptr++;
float val2 = *tmpptr++;
float val3 = *tmpptr++;
vfloat32m1_t _w0 = vle32_v_f32m1(kptr0, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, val0, _w0, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, val1, _w0, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, val2, _w0, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, val3, _w0, vl);
kptr0 += packn;
}
#if RVV_SPEC_0_7
vsse32_v_f32m1(outptr0, top_blob.cstep * sizeof(float), _sum0, vl);
vsse32_v_f32m1(outptr0 + 1, top_blob.cstep * sizeof(float), _sum1, vl);
vsse32_v_f32m1(outptr0 + 2, top_blob.cstep * sizeof(float), _sum2, vl);
vsse32_v_f32m1(outptr0 + 3, top_blob.cstep * sizeof(float), _sum3, vl);
#else
vssseg4e32_v_f32m1x4(outptr0, top_blob.cstep * sizeof(float), vcreate_f32m1x4(_sum0, _sum1, _sum2, _sum3), vl);
#endif
outptr0 += 4;
}
for (; i + 1 < size; i += 2)
{
const float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + (i % 4) / 2);
const float* kptr0 = kernel.channel(p / packn);
int nn = inch * maxk * packn; // inch always > 0
vfloat32m1_t _sum0 = vle32_v_f32m1(biasptr, vl);
vfloat32m1_t _sum1 = vle32_v_f32m1(biasptr, vl);
for (int j = 0; j < nn; j++)
{
float val0 = *tmpptr++;
float val1 = *tmpptr++;
vfloat32m1_t _w0 = vle32_v_f32m1(kptr0, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, val0, _w0, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, val1, _w0, vl);
kptr0 += packn;
}
#if RVV_SPEC_0_7
vsse32_v_f32m1(outptr0, top_blob.cstep * sizeof(float), _sum0, vl);
vsse32_v_f32m1(outptr0 + 1, top_blob.cstep * sizeof(float), _sum1, vl);
#else
vssseg2e32_v_f32m1x2(outptr0, top_blob.cstep * sizeof(float), vcreate_f32m1x2(_sum0, _sum1), vl);
#endif
outptr0 += 2;
}
for (; i < size; i++)
{
const float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2);
const float* kptr0 = kernel.channel(p / packn);
int nn = inch * maxk * packn; // inch always > 0
vfloat32m1_t _sum = vle32_v_f32m1(biasptr, vl);
for (int j = 0; j < nn; j++)
{
float val = *tmpptr++;
vfloat32m1_t _w0 = vle32_v_f32m1(kptr0, vl);
_sum = vfmacc_vf_f32m1(_sum, val, _w0, vl);
kptr0 += packn;
}
vsse32_v_f32m1(outptr0, top_blob.cstep * sizeof(float), _sum, vl);
outptr0 += 1;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
float* outptr0 = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
int i = 0;
for (; i + 7 < size; i += 8)
{
const float* tmpptr = tmp.channel(i / 8);
const float* kptr0 = kernel.channel(p / packn + p % packn);
int nn = inch * maxk; // inch always > 0
float sum0 = bias0;
float sum1 = bias0;
float sum2 = bias0;
float sum3 = bias0;
float sum4 = bias0;
float sum5 = bias0;
float sum6 = bias0;
float sum7 = bias0;
vfloat32m1_t _sum0 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum1 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum2 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum3 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum4 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum5 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum6 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum7 = vfmv_v_f_f32m1(0.f, vl);
for (int j = 0; j < nn; j++)
{
vfloat32m1x8_t _val01 = vlseg8e32_v_f32m1x8(tmpptr, vl);
vfloat32m1_t _w0 = vle32_v_f32m1(kptr0, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, vget_f32m1x8_f32m1(_val01, 0), _w0, vl);
_sum1 = vfmacc_vv_f32m1(_sum1, vget_f32m1x8_f32m1(_val01, 1), _w0, vl);
_sum2 = vfmacc_vv_f32m1(_sum2, vget_f32m1x8_f32m1(_val01, 2), _w0, vl);
_sum3 = vfmacc_vv_f32m1(_sum3, vget_f32m1x8_f32m1(_val01, 3), _w0, vl);
_sum4 = vfmacc_vv_f32m1(_sum4, vget_f32m1x8_f32m1(_val01, 4), _w0, vl);
_sum5 = vfmacc_vv_f32m1(_sum5, vget_f32m1x8_f32m1(_val01, 5), _w0, vl);
_sum6 = vfmacc_vv_f32m1(_sum6, vget_f32m1x8_f32m1(_val01, 6), _w0, vl);
_sum7 = vfmacc_vv_f32m1(_sum7, vget_f32m1x8_f32m1(_val01, 7), _w0, vl);
tmpptr += packn * 8;
kptr0 += packn;
}
#ifdef RVV_SPEC_0_7
// TODO
std::vector<float> ss0(packn);
std::vector<float> ss1(packn);
std::vector<float> ss2(packn);
std::vector<float> ss3(packn);
std::vector<float> ss4(packn);
std::vector<float> ss5(packn);
std::vector<float> ss6(packn);
std::vector<float> ss7(packn);
vse32_v_f32m1((float*)ss0.data(), _sum0, vl);
vse32_v_f32m1((float*)ss1.data(), _sum1, vl);
vse32_v_f32m1((float*)ss2.data(), _sum2, vl);
vse32_v_f32m1((float*)ss3.data(), _sum3, vl);
vse32_v_f32m1((float*)ss4.data(), _sum4, vl);
vse32_v_f32m1((float*)ss5.data(), _sum5, vl);
vse32_v_f32m1((float*)ss6.data(), _sum6, vl);
vse32_v_f32m1((float*)ss7.data(), _sum7, vl);
for (int i = 0; i < packn; i++)
{
sum0 += ss0[i];
sum1 += ss1[i];
sum2 += ss2[i];
sum3 += ss3[i];
sum4 += ss4[i];
sum5 += ss5[i];
sum6 += ss6[i];
sum7 += ss7[i];
}
#else
sum0 = vfmv_f_s_f32m1_f32(vfredsum_vs_f32m1_f32m1(vfloat32m1_t(), _sum0, vfmv_s_f_f32m1(vfloat32m1_t(), sum0, vl), vl));
sum1 = vfmv_f_s_f32m1_f32(vfredsum_vs_f32m1_f32m1(vfloat32m1_t(), _sum1, vfmv_s_f_f32m1(vfloat32m1_t(), sum1, vl), vl));
sum2 = vfmv_f_s_f32m1_f32(vfredsum_vs_f32m1_f32m1(vfloat32m1_t(), _sum2, vfmv_s_f_f32m1(vfloat32m1_t(), sum2, vl), vl));
sum3 = vfmv_f_s_f32m1_f32(vfredsum_vs_f32m1_f32m1(vfloat32m1_t(), _sum3, vfmv_s_f_f32m1(vfloat32m1_t(), sum3, vl), vl));
sum4 = vfmv_f_s_f32m1_f32(vfredsum_vs_f32m1_f32m1(vfloat32m1_t(), _sum4, vfmv_s_f_f32m1(vfloat32m1_t(), sum4, vl), vl));
sum5 = vfmv_f_s_f32m1_f32(vfredsum_vs_f32m1_f32m1(vfloat32m1_t(), _sum5, vfmv_s_f_f32m1(vfloat32m1_t(), sum5, vl), vl));
sum6 = vfmv_f_s_f32m1_f32(vfredsum_vs_f32m1_f32m1(vfloat32m1_t(), _sum6, vfmv_s_f_f32m1(vfloat32m1_t(), sum6, vl), vl));
sum7 = vfmv_f_s_f32m1_f32(vfredsum_vs_f32m1_f32m1(vfloat32m1_t(), _sum7, vfmv_s_f_f32m1(vfloat32m1_t(), sum7, vl), vl));
#endif
outptr0[0] = sum0;
outptr0[1] = sum1;
outptr0[2] = sum2;
outptr0[3] = sum3;
outptr0[4] = sum4;
outptr0[5] = sum5;
outptr0[6] = sum6;
outptr0[7] = sum7;
outptr0 += 8;
}
for (; i + 3 < size; i += 4)
{
const float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
const float* kptr0 = kernel.channel(p / packn + p % packn);
int nn = inch * maxk; // inch always > 0
float sum0 = bias0;
float sum1 = bias0;
float sum2 = bias0;
float sum3 = bias0;
vfloat32m1_t _sum0 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum1 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum2 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum3 = vfmv_v_f_f32m1(0.f, vl);
for (int j = 0; j < nn; j++)
{
vfloat32m1x4_t _val01 = vlseg4e32_v_f32m1x4(tmpptr, vl);
vfloat32m1_t _w0 = vle32_v_f32m1(kptr0, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, vget_f32m1x4_f32m1(_val01, 0), _w0, vl);
_sum1 = vfmacc_vv_f32m1(_sum1, vget_f32m1x4_f32m1(_val01, 1), _w0, vl);
_sum2 = vfmacc_vv_f32m1(_sum2, vget_f32m1x4_f32m1(_val01, 2), _w0, vl);
_sum3 = vfmacc_vv_f32m1(_sum3, vget_f32m1x4_f32m1(_val01, 3), _w0, vl);
tmpptr += packn * 4;
kptr0 += packn;
}
#ifdef RVV_SPEC_0_7
// TODO
std::vector<float> ss0(packn);
std::vector<float> ss1(packn);
std::vector<float> ss2(packn);
std::vector<float> ss3(packn);
vse32_v_f32m1((float*)ss0.data(), _sum0, vl);
vse32_v_f32m1((float*)ss1.data(), _sum1, vl);
vse32_v_f32m1((float*)ss2.data(), _sum2, vl);
vse32_v_f32m1((float*)ss3.data(), _sum3, vl);
for (int i = 0; i < packn; i++)
{
sum0 += ss0[i];
sum1 += ss1[i];
sum2 += ss2[i];
sum3 += ss3[i];
}
#else
sum0 = vfmv_f_s_f32m1_f32(vfredsum_vs_f32m1_f32m1(vfloat32m1_t(), _sum0, vfmv_s_f_f32m1(vfloat32m1_t(), sum0, vl), vl));
sum1 = vfmv_f_s_f32m1_f32(vfredsum_vs_f32m1_f32m1(vfloat32m1_t(), _sum1, vfmv_s_f_f32m1(vfloat32m1_t(), sum1, vl), vl));
sum2 = vfmv_f_s_f32m1_f32(vfredsum_vs_f32m1_f32m1(vfloat32m1_t(), _sum2, vfmv_s_f_f32m1(vfloat32m1_t(), sum2, vl), vl));
sum3 = vfmv_f_s_f32m1_f32(vfredsum_vs_f32m1_f32m1(vfloat32m1_t(), _sum3, vfmv_s_f_f32m1(vfloat32m1_t(), sum3, vl), vl));
#endif
outptr0[0] = sum0;
outptr0[1] = sum1;
outptr0[2] = sum2;
outptr0[3] = sum3;
outptr0 += 4;
}
for (; i + 1 < size; i += 2)
{
const float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + (i % 4) / 2);
const float* kptr0 = kernel.channel(p / packn + p % packn);
int nn = inch * maxk; // inch always > 0
float sum0 = bias0;
float sum1 = bias0;
vfloat32m1_t _sum0 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum1 = vfmv_v_f_f32m1(0.f, vl);
for (int j = 0; j < nn; j++)
{
vfloat32m1x2_t _val01 = vlseg2e32_v_f32m1x2(tmpptr, vl);
vfloat32m1_t _w0 = vle32_v_f32m1(kptr0, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, vget_f32m1x2_f32m1(_val01, 0), _w0, vl);
_sum1 = vfmacc_vv_f32m1(_sum1, vget_f32m1x2_f32m1(_val01, 1), _w0, vl);
tmpptr += packn * 2;
kptr0 += packn;
}
#ifdef RVV_SPEC_0_7
// TODO
std::vector<float> ss0(packn);
std::vector<float> ss1(packn);
vse32_v_f32m1((float*)ss0.data(), _sum0, vl);
vse32_v_f32m1((float*)ss1.data(), _sum1, vl);
for (int i = 0; i < packn; i++)
{
sum0 += ss0[i];
sum1 += ss1[i];
}
#else
sum0 = vfmv_f_s_f32m1_f32(vfredsum_vs_f32m1_f32m1(vfloat32m1_t(), _sum0, vfmv_s_f_f32m1(vfloat32m1_t(), sum0, vl), vl));
sum1 = vfmv_f_s_f32m1_f32(vfredsum_vs_f32m1_f32m1(vfloat32m1_t(), _sum1, vfmv_s_f_f32m1(vfloat32m1_t(), sum1, vl), vl));
#endif
outptr0[0] = sum0;
outptr0[1] = sum1;
outptr0 += 2;
}
for (; i < size; i++)
{
const float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2);
const float* kptr0 = kernel.channel(p / packn + p % packn);
int nn = inch * maxk; // inch always > 0
float sum0 = bias0;
vfloat32m1_t _sum0 = vfmv_v_f_f32m1(0.f, vl);
for (int j = 0; j < nn; j++)
{
vfloat32m1_t _val0 = vle32_v_f32m1(tmpptr, vl);
vfloat32m1_t _w0 = vle32_v_f32m1(kptr0, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _val0, _w0, vl);
tmpptr += packn;
kptr0 += packn;
}
#ifdef RVV_SPEC_0_7
// TODO
std::vector<float> ss0(packn);
vse32_v_f32m1((float*)ss0.data(), _sum0, vl);
for (int i = 0; i < packn; i++)
{
sum0 += ss0[i];
}
#else
sum0 = vfmv_f_s_f32m1_f32(vfredsum_vs_f32m1_f32m1(vfloat32m1_t(), _sum0, vfmv_s_f_f32m1(vfloat32m1_t(), sum0, vl), vl));
#endif
outptr0[0] = sum0;
outptr0 += 1;
}
}
}
static void convolution_im2col_sgemm_transform_kernel_packnto1_rvv(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h)
{
const int packn = csrr_vlenb() / 4;
const int maxk = kernel_w * kernel_h;
// interleave
// src = maxk-inch-outch
// dst = pb-pa-maxk-inch/pa-outch/pb
Mat kernel = _kernel.reshape(maxk, inch, outch);
kernel_tm.create(packn * packn * maxk, inch / packn, outch / packn + outch % packn, 4u);
int q = 0;
for (; q + (packn - 1) < outch; q += packn)
{
float* g00 = kernel_tm.channel(q / packn);
for (int p = 0; p + (packn - 1) < inch; p += packn)
{
for (int k = 0; k < maxk; k++)
{
for (int i = 0; i < packn; i++)
{
for (int j = 0; j < packn; j++)
{
const float* k00 = kernel.channel(q + j).row(p + i);
g00[0] = k00[k];
g00++;
}
}
}
}
}
for (; q < outch; q++)
{
const Mat k0 = kernel.channel(q);
float* g00 = kernel_tm.channel(q / packn + q % packn);
for (int p = 0; p + (packn - 1) < inch; p += packn)
{
for (int k = 0; k < maxk; k++)
{
for (int j = 0; j < packn; j++)
{
const float* k00 = k0.row(p + j);
g00[0] = k00[k];
g00++;
}
}
}
}
}
static void convolution_im2col_sgemm_packnto1_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt)
{
const int packn = csrr_vlenb() / 4;
const word_type vl = vsetvl_e32m1(packn);
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
const int size = outw * outh;
const int maxk = kernel_w * kernel_h;
// im2col
Mat bottom_im2col(size, maxk, inch, 4u * packn, packn, opt.workspace_allocator);
{
const int gap = (w * stride_h - outw * stride_w) * packn;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < inch; p++)
{
const Mat img = bottom_blob.channel(p);
float* ptr = bottom_im2col.channel(p);
for (int u = 0; u < kernel_h; u++)
{
for (int v = 0; v < kernel_w; v++)
{
const float* sptr = img.row(dilation_h * u) + dilation_w * v * packn;
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j < outw; j++)
{
vfloat32m1_t _val = vle32_v_f32m1(sptr, vl);
vse32_v_f32m1(ptr, _val, vl);
sptr += stride_w * packn;
ptr += packn;
}
sptr += gap;
}
}
}
}
}
im2col_sgemm_packnto1_rvv(bottom_im2col, top_blob, kernel, _bias, opt);
}
|
smg_residual_unrolled.c | /*BHEADER**********************************************************************
* Copyright (c) 2008, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* This file is part of HYPRE. See file COPYRIGHT for details.
*
* HYPRE is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* $Revision: 2.13 $
***********************************************************************EHEADER*/
#include "_hypre_struct_ls.h"
/*--------------------------------------------------------------------------
* hypre_SMGResidualData data structure
*--------------------------------------------------------------------------*/
typedef struct
{
hypre_Index base_index;
hypre_Index base_stride;
hypre_StructMatrix *A;
hypre_StructVector *x;
hypre_StructVector *b;
hypre_StructVector *r;
hypre_BoxArray *base_points;
hypre_ComputePkg *compute_pkg;
HYPRE_Int time_index;
HYPRE_Int flops;
} hypre_SMGResidualData;
/*--------------------------------------------------------------------------
* hypre_SMGResidualCreate
*--------------------------------------------------------------------------*/
void *
hypre_SMGResidualCreate( )
{
hypre_SMGResidualData *residual_data;
residual_data = hypre_CTAlloc(hypre_SMGResidualData, 1);
(residual_data -> time_index) = hypre_InitializeTiming("SMGResidual");
/* set defaults */
hypre_SetIndex((residual_data -> base_index), 0, 0, 0);
hypre_SetIndex((residual_data -> base_stride), 1, 1, 1);
return (void *) residual_data;
}
/*--------------------------------------------------------------------------
* hypre_SMGResidualSetup
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SMGResidualSetup( void *residual_vdata,
hypre_StructMatrix *A,
hypre_StructVector *x,
hypre_StructVector *b,
hypre_StructVector *r )
{
HYPRE_Int ierr;
hypre_SMGResidualData *residual_data = residual_vdata;
hypre_IndexRef base_index = (residual_data -> base_index);
hypre_IndexRef base_stride = (residual_data -> base_stride);
hypre_Index unit_stride;
hypre_StructGrid *grid;
hypre_StructStencil *stencil;
hypre_BoxArray *base_points;
hypre_ComputeInfo *compute_info;
hypre_ComputePkg *compute_pkg;
/*----------------------------------------------------------
* Set up base points and the compute package
*----------------------------------------------------------*/
grid = hypre_StructMatrixGrid(A);
stencil = hypre_StructMatrixStencil(A);
hypre_SetIndex(unit_stride, 1, 1, 1);
base_points = hypre_BoxArrayDuplicate(hypre_StructGridBoxes(grid));
hypre_ProjectBoxArray(base_points, base_index, base_stride);
hypre_CreateComputeInfo(grid, stencil, &compute_info);
hypre_ComputeInfoProjectComp(compute_info, base_index, base_stride);
hypre_ComputePkgCreate(compute_info, hypre_StructVectorDataSpace(x), 1,
grid, &compute_pkg);
/*----------------------------------------------------------
* Set up the residual data structure
*----------------------------------------------------------*/
(residual_data -> A) = hypre_StructMatrixRef(A);
(residual_data -> x) = hypre_StructVectorRef(x);
(residual_data -> b) = hypre_StructVectorRef(b);
(residual_data -> r) = hypre_StructVectorRef(r);
(residual_data -> base_points) = base_points;
(residual_data -> compute_pkg) = compute_pkg;
/*-----------------------------------------------------
* Compute flops
*-----------------------------------------------------*/
(residual_data -> flops) =
(hypre_StructMatrixGlobalSize(A) + hypre_StructVectorGlobalSize(x)) /
(hypre_IndexX(base_stride) *
hypre_IndexY(base_stride) *
hypre_IndexZ(base_stride) );
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_SMGResidual
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SMGResidual( void *residual_vdata,
hypre_StructMatrix *A,
hypre_StructVector *x,
hypre_StructVector *b,
hypre_StructVector *r )
{
HYPRE_Int ierr;
hypre_SMGResidualData *residual_data = residual_vdata;
hypre_IndexRef base_stride = (residual_data -> base_stride);
hypre_BoxArray *base_points = (residual_data -> base_points);
hypre_ComputePkg *compute_pkg = (residual_data -> compute_pkg);
hypre_CommHandle *comm_handle;
hypre_BoxArrayArray *compute_box_aa;
hypre_BoxArray *compute_box_a;
hypre_Box *compute_box;
hypre_Box *A_data_box;
hypre_Box *x_data_box;
hypre_Box *b_data_box;
hypre_Box *r_data_box;
HYPRE_Int Ai;
HYPRE_Int xi;
HYPRE_Int bi;
HYPRE_Int ri;
double *Ap0;
double *xp0;
double *bp;
double *rp;
hypre_Index loop_size;
hypre_IndexRef start;
hypre_StructStencil *stencil;
hypre_Index *stencil_shape;
HYPRE_Int stencil_size;
HYPRE_Int compute_i, i, j, si;
double *Ap1, *Ap2;
double *Ap3, *Ap4;
double *Ap5, *Ap6;
double *Ap7, *Ap8, *Ap9;
double *Ap10, *Ap11, *Ap12, *Ap13, *Ap14;
double *Ap15, *Ap16, *Ap17, *Ap18;
double *Ap19, *Ap20, *Ap21, *Ap22, *Ap23, *Ap24, *Ap25, *Ap26;
double *xp1, *xp2;
double *xp3, *xp4;
double *xp5, *xp6;
double *xp7, *xp8, *xp9;
double *xp10, *xp11, *xp12, *xp13, *xp14;
double *xp15, *xp16, *xp17, *xp18;
double *xp19, *xp20, *xp21, *xp22, *xp23, *xp24, *xp25, *xp26;
hypre_BeginTiming(residual_data -> time_index);
/*-----------------------------------------------------------------------
* Compute residual r = b - Ax
*-----------------------------------------------------------------------*/
stencil = hypre_StructMatrixStencil(A);
stencil_shape = hypre_StructStencilShape(stencil);
stencil_size = hypre_StructStencilSize(stencil);
for (compute_i = 0; compute_i < 2; compute_i++)
{
switch(compute_i)
{
case 0:
{
xp0 = hypre_StructVectorData(x);
hypre_InitializeIndtComputations(compute_pkg, xp0, &comm_handle);
compute_box_aa = hypre_ComputePkgIndtBoxes(compute_pkg);
/*----------------------------------------
* Copy b into r
*----------------------------------------*/
compute_box_a = base_points;
hypre_ForBoxI(i, compute_box_a)
{
compute_box = hypre_BoxArrayBox(compute_box_a, i);
start = hypre_BoxIMin(compute_box);
b_data_box =
hypre_BoxArrayBox(hypre_StructVectorDataSpace(b), i);
r_data_box =
hypre_BoxArrayBox(hypre_StructVectorDataSpace(r), i);
bp = hypre_StructVectorBoxData(b, i);
rp = hypre_StructVectorBoxData(r, i);
hypre_BoxGetStrideSize(compute_box, base_stride, loop_size);
hypre_BoxLoop2Begin(hypre_StructMatrixDim(A), loop_size,
b_data_box, start, base_stride, bi,
r_data_box, start, base_stride, ri);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,bi,ri) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop2For(bi, ri)
{
rp[ri] = bp[bi];
}
hypre_BoxLoop2End(bi, ri);
}
}
break;
case 1:
{
hypre_FinalizeIndtComputations(comm_handle);
compute_box_aa = hypre_ComputePkgDeptBoxes(compute_pkg);
}
break;
}
/*--------------------------------------------------------------------
* Compute r -= A*x
*--------------------------------------------------------------------*/
hypre_ForBoxArrayI(i, compute_box_aa)
{
compute_box_a = hypre_BoxArrayArrayBoxArray(compute_box_aa, i);
A_data_box = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A), i);
x_data_box = hypre_BoxArrayBox(hypre_StructVectorDataSpace(x), i);
r_data_box = hypre_BoxArrayBox(hypre_StructVectorDataSpace(r), i);
rp = hypre_StructVectorBoxData(r, i);
/*--------------------------------------------------------------
* Switch statement to direct control (based on stencil size) to
* code to get pointers and offsets fo A and x.
*--------------------------------------------------------------*/
switch (stencil_size)
{
case 1:
Ap0 = hypre_StructMatrixBoxData(A, i, 0);
xp0 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[0]);
break;
case 3:
Ap0 = hypre_StructMatrixBoxData(A, i, 0);
Ap1 = hypre_StructMatrixBoxData(A, i, 1);
Ap2 = hypre_StructMatrixBoxData(A, i, 2);
xp0 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[0]);
xp1 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[1]);
xp2 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[2]);
break;
case 5:
Ap0 = hypre_StructMatrixBoxData(A, i, 0);
Ap1 = hypre_StructMatrixBoxData(A, i, 1);
Ap2 = hypre_StructMatrixBoxData(A, i, 2);
Ap3 = hypre_StructMatrixBoxData(A, i, 3);
Ap4 = hypre_StructMatrixBoxData(A, i, 4);
xp0 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[0]);
xp1 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[1]);
xp2 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[2]);
xp3 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[3]);
xp4 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[4]);
break;
case 7:
Ap0 = hypre_StructMatrixBoxData(A, i, 0);
Ap1 = hypre_StructMatrixBoxData(A, i, 1);
Ap2 = hypre_StructMatrixBoxData(A, i, 2);
Ap3 = hypre_StructMatrixBoxData(A, i, 3);
Ap4 = hypre_StructMatrixBoxData(A, i, 4);
Ap5 = hypre_StructMatrixBoxData(A, i, 5);
Ap6 = hypre_StructMatrixBoxData(A, i, 6);
xp0 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[0]);
xp1 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[1]);
xp2 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[2]);
xp3 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[3]);
xp4 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[4]);
xp5 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[5]);
xp6 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[6]);
break;
case 9:
Ap0 = hypre_StructMatrixBoxData(A, i, 0);
Ap1 = hypre_StructMatrixBoxData(A, i, 1);
Ap2 = hypre_StructMatrixBoxData(A, i, 2);
Ap3 = hypre_StructMatrixBoxData(A, i, 3);
Ap4 = hypre_StructMatrixBoxData(A, i, 4);
Ap5 = hypre_StructMatrixBoxData(A, i, 5);
Ap6 = hypre_StructMatrixBoxData(A, i, 6);
Ap7 = hypre_StructMatrixBoxData(A, i, 7);
Ap8 = hypre_StructMatrixBoxData(A, i, 8);
xp0 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[0]);
xp1 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[1]);
xp2 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[2]);
xp3 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[3]);
xp4 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[4]);
xp5 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[5]);
xp6 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[6]);
xp7 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[7]);
xp8 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[8]);
break;
case 15:
Ap0 = hypre_StructMatrixBoxData(A, i, 0);
Ap1 = hypre_StructMatrixBoxData(A, i, 1);
Ap2 = hypre_StructMatrixBoxData(A, i, 2);
Ap3 = hypre_StructMatrixBoxData(A, i, 3);
Ap4 = hypre_StructMatrixBoxData(A, i, 4);
Ap5 = hypre_StructMatrixBoxData(A, i, 5);
Ap6 = hypre_StructMatrixBoxData(A, i, 6);
Ap7 = hypre_StructMatrixBoxData(A, i, 7);
Ap8 = hypre_StructMatrixBoxData(A, i, 8);
Ap9 = hypre_StructMatrixBoxData(A, i, 9);
Ap10 = hypre_StructMatrixBoxData(A, i, 10);
Ap11 = hypre_StructMatrixBoxData(A, i, 11);
Ap12 = hypre_StructMatrixBoxData(A, i, 12);
Ap13 = hypre_StructMatrixBoxData(A, i, 13);
Ap14 = hypre_StructMatrixBoxData(A, i, 14);
xp0 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[0]);
xp1 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[1]);
xp2 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[2]);
xp3 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[3]);
xp4 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[4]);
xp5 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[5]);
xp6 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[6]);
xp7 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[7]);
xp8 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[8]);
xp9 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[9]);
xp10 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[10]);
xp11 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[11]);
xp12 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[12]);
xp13 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[13]);
xp14 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[14]);
break;
case 19:
Ap0 = hypre_StructMatrixBoxData(A, i, 0);
Ap1 = hypre_StructMatrixBoxData(A, i, 1);
Ap2 = hypre_StructMatrixBoxData(A, i, 2);
Ap3 = hypre_StructMatrixBoxData(A, i, 3);
Ap4 = hypre_StructMatrixBoxData(A, i, 4);
Ap5 = hypre_StructMatrixBoxData(A, i, 5);
Ap6 = hypre_StructMatrixBoxData(A, i, 6);
Ap7 = hypre_StructMatrixBoxData(A, i, 7);
Ap8 = hypre_StructMatrixBoxData(A, i, 8);
Ap9 = hypre_StructMatrixBoxData(A, i, 9);
Ap10 = hypre_StructMatrixBoxData(A, i, 10);
Ap11 = hypre_StructMatrixBoxData(A, i, 11);
Ap12 = hypre_StructMatrixBoxData(A, i, 12);
Ap13 = hypre_StructMatrixBoxData(A, i, 13);
Ap14 = hypre_StructMatrixBoxData(A, i, 14);
Ap15 = hypre_StructMatrixBoxData(A, i, 15);
Ap16 = hypre_StructMatrixBoxData(A, i, 16);
Ap17 = hypre_StructMatrixBoxData(A, i, 17);
Ap18 = hypre_StructMatrixBoxData(A, i, 18);
xp0 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[0]);
xp1 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[1]);
xp2 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[2]);
xp3 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[3]);
xp4 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[4]);
xp5 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[5]);
xp6 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[6]);
xp7 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[7]);
xp8 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[8]);
xp9 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[9]);
xp10 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[10]);
xp11 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[11]);
xp12 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[12]);
xp13 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[13]);
xp14 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[14]);
xp15 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[15]);
xp16 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[16]);
xp17 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[17]);
xp18 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[18]);
break;
case 27:
Ap0 = hypre_StructMatrixBoxData(A, i, 0);
Ap1 = hypre_StructMatrixBoxData(A, i, 1);
Ap2 = hypre_StructMatrixBoxData(A, i, 2);
Ap3 = hypre_StructMatrixBoxData(A, i, 3);
Ap4 = hypre_StructMatrixBoxData(A, i, 4);
Ap5 = hypre_StructMatrixBoxData(A, i, 5);
Ap6 = hypre_StructMatrixBoxData(A, i, 6);
Ap7 = hypre_StructMatrixBoxData(A, i, 7);
Ap8 = hypre_StructMatrixBoxData(A, i, 8);
Ap9 = hypre_StructMatrixBoxData(A, i, 9);
Ap10 = hypre_StructMatrixBoxData(A, i, 10);
Ap11 = hypre_StructMatrixBoxData(A, i, 11);
Ap12 = hypre_StructMatrixBoxData(A, i, 12);
Ap13 = hypre_StructMatrixBoxData(A, i, 13);
Ap14 = hypre_StructMatrixBoxData(A, i, 14);
Ap15 = hypre_StructMatrixBoxData(A, i, 15);
Ap16 = hypre_StructMatrixBoxData(A, i, 16);
Ap17 = hypre_StructMatrixBoxData(A, i, 17);
Ap18 = hypre_StructMatrixBoxData(A, i, 18);
Ap19 = hypre_StructMatrixBoxData(A, i, 19);
Ap20 = hypre_StructMatrixBoxData(A, i, 20);
Ap21 = hypre_StructMatrixBoxData(A, i, 21);
Ap22 = hypre_StructMatrixBoxData(A, i, 22);
Ap23 = hypre_StructMatrixBoxData(A, i, 23);
Ap24 = hypre_StructMatrixBoxData(A, i, 24);
Ap25 = hypre_StructMatrixBoxData(A, i, 25);
Ap26 = hypre_StructMatrixBoxData(A, i, 26);
xp0 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[0]);
xp1 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[1]);
xp2 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[2]);
xp3 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[3]);
xp4 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[4]);
xp5 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[5]);
xp6 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[6]);
xp7 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[7]);
xp8 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[8]);
xp9 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[9]);
xp10 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[10]);
xp11 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[11]);
xp12 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[12]);
xp13 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[13]);
xp14 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[14]);
xp15 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[15]);
xp16 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[16]);
xp17 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[17]);
xp18 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[18]);
xp19 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[19]);
xp20 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[20]);
xp21 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[21]);
xp22 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[22]);
xp23 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[23]);
xp24 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[24]);
xp25 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[25]);
xp26 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[26]);
break;
default:
;
}
hypre_ForBoxI(j, compute_box_a)
{
compute_box = hypre_BoxArrayBox(compute_box_a, j);
start = hypre_BoxIMin(compute_box);
/*------------------------------------------------------
* Switch statement to direct control to appropriate
* box loop depending on stencil size
*------------------------------------------------------*/
switch (stencil_size)
{
case 1:
hypre_BoxGetStrideSize(compute_box, base_stride, loop_size);
hypre_BoxLoop3Begin(hypre_StructMatrixDim(A), loop_size,
A_data_box, start, base_stride, Ai,
x_data_box, start, base_stride, xi,
r_data_box, start, base_stride, ri);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,Ai,xi,ri) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop3For(Ai, xi, ri)
{
rp[ri] = rp[ri]
- Ap0[Ai] * xp0[xi];
}
hypre_BoxLoop3End(Ai, xi, ri);
break;
case 3:
hypre_BoxGetStrideSize(compute_box, base_stride, loop_size);
hypre_BoxLoop3Begin(hypre_StructMatrixDim(A), loop_size,
A_data_box, start, base_stride, Ai,
x_data_box, start, base_stride, xi,
r_data_box, start, base_stride, ri);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,Ai,xi,ri) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop3For(Ai, xi, ri)
{
rp[ri] = rp[ri]
- Ap0[Ai] * xp0[xi]
- Ap1[Ai] * xp1[xi]
- Ap2[Ai] * xp2[xi];
}
hypre_BoxLoop3End(Ai, xi, ri);
break;
case 5:
hypre_BoxGetStrideSize(compute_box, base_stride, loop_size);
hypre_BoxLoop3Begin(hypre_StructMatrixDim(A), loop_size,
A_data_box, start, base_stride, Ai,
x_data_box, start, base_stride, xi,
r_data_box, start, base_stride, ri);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,Ai,xi,ri) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop3For(Ai, xi, ri)
{
rp[ri] = rp[ri]
- Ap0[Ai] * xp0[xi]
- Ap1[Ai] * xp1[xi]
- Ap2[Ai] * xp2[xi]
- Ap3[Ai] * xp3[xi]
- Ap4[Ai] * xp4[xi];
}
hypre_BoxLoop3End(Ai, xi, ri);
break;
case 7:
hypre_BoxGetStrideSize(compute_box, base_stride, loop_size);
hypre_BoxLoop3Begin(hypre_StructMatrixDim(A), loop_size,
A_data_box, start, base_stride, Ai,
x_data_box, start, base_stride, xi,
r_data_box, start, base_stride, ri);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,Ai,xi,ri) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop3For(Ai, xi, ri)
{
rp[ri] = rp[ri]
- Ap0[Ai] * xp0[xi]
- Ap1[Ai] * xp1[xi]
- Ap2[Ai] * xp2[xi]
- Ap3[Ai] * xp3[xi]
- Ap4[Ai] * xp4[xi]
- Ap5[Ai] * xp5[xi]
- Ap6[Ai] * xp6[xi];
}
hypre_BoxLoop3End(Ai, xi, ri);
break;
case 9:
hypre_BoxGetStrideSize(compute_box, base_stride, loop_size);
hypre_BoxLoop3Begin(hypre_StructMatrixDim(A), loop_size,
A_data_box, start, base_stride, Ai,
x_data_box, start, base_stride, xi,
r_data_box, start, base_stride, ri);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,Ai,xi,ri) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop3For(Ai, xi, ri)
{
rp[ri] = rp[ri]
- Ap0[Ai] * xp0[xi]
- Ap1[Ai] * xp1[xi]
- Ap2[Ai] * xp2[xi]
- Ap3[Ai] * xp3[xi]
- Ap4[Ai] * xp4[xi]
- Ap5[Ai] * xp5[xi]
- Ap6[Ai] * xp6[xi]
- Ap7[Ai] * xp7[xi]
- Ap8[Ai] * xp8[xi];
}
hypre_BoxLoop3End(Ai, xi, ri);
break;
case 15:
hypre_BoxGetStrideSize(compute_box, base_stride, loop_size);
hypre_BoxLoop3Begin(hypre_StructMatrixDim(A), loop_size,
A_data_box, start, base_stride, Ai,
x_data_box, start, base_stride, xi,
r_data_box, start, base_stride, ri);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,Ai,xi,ri) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop3For(Ai, xi, ri)
{
rp[ri] = rp[ri]
- Ap0[Ai] * xp0[xi]
- Ap1[Ai] * xp1[xi]
- Ap2[Ai] * xp2[xi]
- Ap3[Ai] * xp3[xi]
- Ap4[Ai] * xp4[xi]
- Ap5[Ai] * xp5[xi]
- Ap6[Ai] * xp6[xi]
- Ap7[Ai] * xp7[xi]
- Ap8[Ai] * xp8[xi]
- Ap9[Ai] * xp9[xi]
- Ap10[Ai] * xp10[xi]
- Ap11[Ai] * xp11[xi]
- Ap12[Ai] * xp12[xi]
- Ap13[Ai] * xp13[xi]
- Ap14[Ai] * xp14[xi];
}
hypre_BoxLoop3End(Ai, xi, ri);
break;
case 19:
hypre_BoxGetStrideSize(compute_box, base_stride, loop_size);
hypre_BoxLoop3Begin(hypre_StructMatrixDim(A), loop_size,
A_data_box, start, base_stride, Ai,
x_data_box, start, base_stride, xi,
r_data_box, start, base_stride, ri);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,Ai,xi,ri) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop3For(Ai, xi, ri)
{
rp[ri] = rp[ri]
- Ap0[Ai] * xp0[xi]
- Ap1[Ai] * xp1[xi]
- Ap2[Ai] * xp2[xi]
- Ap3[Ai] * xp3[xi]
- Ap4[Ai] * xp4[xi]
- Ap5[Ai] * xp5[xi]
- Ap6[Ai] * xp6[xi]
- Ap7[Ai] * xp7[xi]
- Ap8[Ai] * xp8[xi]
- Ap9[Ai] * xp9[xi]
- Ap10[Ai] * xp10[xi]
- Ap11[Ai] * xp11[xi]
- Ap12[Ai] * xp12[xi]
- Ap13[Ai] * xp13[xi]
- Ap14[Ai] * xp14[xi]
- Ap15[Ai] * xp15[xi]
- Ap16[Ai] * xp16[xi]
- Ap17[Ai] * xp17[xi]
- Ap18[Ai] * xp18[xi];
}
hypre_BoxLoop3End(Ai, xi, ri);
break;
case 27:
hypre_BoxGetStrideSize(compute_box, base_stride, loop_size);
hypre_BoxLoop3Begin(hypre_StructMatrixDim(A), loop_size,
A_data_box, start, base_stride, Ai,
x_data_box, start, base_stride, xi,
r_data_box, start, base_stride, ri);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,Ai,xi,ri) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop3For(Ai, xi, ri)
{
rp[ri] = rp[ri]
- Ap0[Ai] * xp0[xi]
- Ap1[Ai] * xp1[xi]
- Ap2[Ai] * xp2[xi]
- Ap3[Ai] * xp3[xi]
- Ap4[Ai] * xp4[xi]
- Ap5[Ai] * xp5[xi]
- Ap6[Ai] * xp6[xi]
- Ap7[Ai] * xp7[xi]
- Ap8[Ai] * xp8[xi]
- Ap9[Ai] * xp9[xi]
- Ap10[Ai] * xp10[xi]
- Ap11[Ai] * xp11[xi]
- Ap12[Ai] * xp12[xi]
- Ap13[Ai] * xp13[xi]
- Ap14[Ai] * xp14[xi]
- Ap15[Ai] * xp15[xi]
- Ap16[Ai] * xp16[xi]
- Ap17[Ai] * xp17[xi]
- Ap18[Ai] * xp18[xi]
- Ap19[Ai] * xp19[xi]
- Ap20[Ai] * xp20[xi]
- Ap21[Ai] * xp21[xi]
- Ap22[Ai] * xp22[xi]
- Ap23[Ai] * xp23[xi]
- Ap24[Ai] * xp24[xi]
- Ap25[Ai] * xp25[xi]
- Ap26[Ai] * xp26[xi];
}
hypre_BoxLoop3End(Ai, xi, ri);
break;
default:
for (si = 0; si < stencil_size; si++)
{
Ap0 = hypre_StructMatrixBoxData(A, i, si);
xp0 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[si]);
hypre_BoxGetStrideSize(compute_box, base_stride,
loop_size);
hypre_BoxLoop3Begin(hypre_StructMatrixDim(A), loop_size,
A_data_box, start, base_stride, Ai,
x_data_box, start, base_stride, xi,
r_data_box, start, base_stride, ri);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,Ai,xi,ri) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop3For(Ai, xi, ri)
{
rp[ri] -= Ap0[Ai] * xp0[xi];
}
hypre_BoxLoop3End(Ai, xi, ri);
}
}
}
}
}
/*-----------------------------------------------------------------------
* Return
*-----------------------------------------------------------------------*/
hypre_IncFLOPCount(residual_data -> flops);
hypre_EndTiming(residual_data -> time_index);
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_SMGResidualSetBase
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SMGResidualSetBase( void *residual_vdata,
hypre_Index base_index,
hypre_Index base_stride )
{
hypre_SMGResidualData *residual_data = residual_vdata;
HYPRE_Int d;
HYPRE_Int ierr = 0;
for (d = 0; d < 3; d++)
{
hypre_IndexD((residual_data -> base_index), d)
= hypre_IndexD(base_index, d);
hypre_IndexD((residual_data -> base_stride), d)
= hypre_IndexD(base_stride, d);
}
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_SMGResidualDestroy
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SMGResidualDestroy( void *residual_vdata )
{
HYPRE_Int ierr;
hypre_SMGResidualData *residual_data = residual_vdata;
if (residual_data)
{
hypre_StructMatrixDestroy(residual_data -> A);
hypre_StructVectorDestroy(residual_data -> x);
hypre_StructVectorDestroy(residual_data -> b);
hypre_StructVectorDestroy(residual_data -> r);
hypre_BoxArrayDestroy(residual_data -> base_points);
hypre_ComputePkgDestroy(residual_data -> compute_pkg );
hypre_FinalizeTiming(residual_data -> time_index);
hypre_TFree(residual_data);
}
return ierr;
}
|
nn_index.h | /***********************************************************************
* Software License Agreement (BSD License)
*
* Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved.
* Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved.
*
* THE BSD LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*************************************************************************/
#ifndef FLANN_NNINDEX_H
#define FLANN_NNINDEX_H
#include <vector>
#include "flann/general.h"
#include "flann/util/matrix.h"
#include "flann/util/params.h"
#include "flann/util/result_set.h"
#include "flann/util/dynamic_bitset.h"
#include "flann/util/saving.h"
namespace flann
{
#define KNN_HEAP_THRESHOLD 250
class IndexBase
{
public:
virtual ~IndexBase() {};
virtual size_t veclen() const = 0;
virtual size_t size() const = 0;
virtual flann_algorithm_t getType() const = 0;
virtual int usedMemory() const = 0;
virtual IndexParams getParameters() const = 0;
virtual void loadIndex(FILE* stream) = 0;
virtual void saveIndex(FILE* stream) = 0;
};
/**
* Nearest-neighbour index base class
*/
template <typename Distance>
class NNIndex : public IndexBase
{
public:
typedef typename Distance::ElementType ElementType;
typedef typename Distance::ResultType DistanceType;
NNIndex(Distance d) : distance_(d), last_id_(0), size_(0), size_at_build_(0), veclen_(0),
removed_(false), removed_count_(0), data_ptr_(NULL)
{
}
NNIndex(const IndexParams& params, Distance d) : distance_(d), last_id_(0), size_(0), size_at_build_(0), veclen_(0),
index_params_(params), removed_(false), removed_count_(0), data_ptr_(NULL)
{
}
NNIndex(const NNIndex& other) :
distance_(other.distance_),
last_id_(other.last_id_),
size_(other.size_),
size_at_build_(other.size_at_build_),
veclen_(other.veclen_),
index_params_(other.index_params_),
removed_(other.removed_),
removed_points_(other.removed_points_),
removed_count_(other.removed_count_),
ids_(other.ids_),
points_(other.points_),
data_ptr_(NULL)
{
if (other.data_ptr_) {
data_ptr_ = new ElementType[size_*veclen_];
std::copy(other.data_ptr_, other.data_ptr_+size_*veclen_, data_ptr_);
for (size_t i=0;i<size_;++i) {
points_[i] = data_ptr_ + i*veclen_;
}
}
}
virtual ~NNIndex()
{
if (data_ptr_) {
delete[] data_ptr_;
}
}
virtual NNIndex* clone() const = 0;
/**
* Builds the index
*/
virtual void buildIndex()
{
freeIndex();
cleanRemovedPoints();
// building index
buildIndexImpl();
size_at_build_ = size_;
}
/**
* Builds the index using the specified dataset
* @param dataset the dataset to use
*/
virtual void buildIndex(const Matrix<ElementType>& dataset)
{
setDataset(dataset);
this->buildIndex();
}
/**
* @brief Incrementally add points to the index.
* @param points Matrix with points to be added
* @param rebuild_threshold
*/
virtual void addPoints(const Matrix<ElementType>& points, float rebuild_threshold = 2)
{
throw FLANNException("Functionality not supported by this index");
}
/**
* Remove point from the index
* @param index Index of point to be removed
*/
virtual void removePoint(size_t id)
{
if (!removed_) {
ids_.resize(size_);
for (size_t i=0;i<size_;++i) {
ids_[i] = i;
}
removed_points_.resize(size_);
removed_points_.reset();
last_id_ = size_;
removed_ = true;
}
size_t point_index = id_to_index(id);
if (point_index!=size_t(-1) && !removed_points_.test(point_index)) {
removed_points_.set(point_index);
removed_count_++;
}
}
/**
* Get point with specific id
* @param id
* @return
*/
virtual ElementType* getPoint(size_t id)
{
size_t index = id_to_index(id);
if (index!=size_t(-1)) {
return points_[index];
}
else {
return NULL;
}
}
/**
* @return number of features in this index.
*/
inline size_t size() const
{
return size_ - removed_count_;
}
/**
* @return The dimensionality of the features in this index.
*/
inline size_t veclen() const
{
return veclen_;
}
/**
* Returns the parameters used by the index.
*
* @return The index parameters
*/
IndexParams getParameters() const
{
return index_params_;
}
template<typename Archive>
void serialize(Archive& ar)
{
IndexHeader header;
if (Archive::is_saving::value) {
header.h.data_type = flann_datatype_value<ElementType>::value;
header.h.index_type = getType();
header.h.rows = size_;
header.h.cols = veclen_;
}
ar & header;
// sanity checks
if (Archive::is_loading::value) {
if (strncmp(header.h.signature,
FLANN_SIGNATURE_,
strlen(FLANN_SIGNATURE_) - strlen("v0.0")) != 0) {
throw FLANNException("Invalid index file, wrong signature");
}
if (header.h.data_type != flann_datatype_value<ElementType>::value) {
throw FLANNException("Datatype of saved index is different than of the one to be created.");
}
if (header.h.index_type != getType()) {
throw FLANNException("Saved index type is different then the current index type.");
}
// TODO: check for distance type
}
ar & size_;
ar & veclen_;
ar & size_at_build_;
bool save_dataset;
if (Archive::is_saving::value) {
save_dataset = get_param(index_params_,"save_dataset", false);
}
ar & save_dataset;
if (save_dataset) {
if (Archive::is_loading::value) {
if (data_ptr_) {
delete[] data_ptr_;
}
data_ptr_ = new ElementType[size_*veclen_];
points_.resize(size_);
for (size_t i=0;i<size_;++i) {
points_[i] = data_ptr_ + i*veclen_;
}
}
for (size_t i=0;i<size_;++i) {
ar & serialization::make_binary_object (points_[i], veclen_*sizeof(ElementType));
}
} else {
if (points_.size()!=size_) {
throw FLANNException("Saved index does not contain the dataset and no dataset was provided.");
}
}
ar & last_id_;
ar & ids_;
ar & removed_;
if (removed_) {
ar & removed_points_;
}
ar & removed_count_;
}
/**
* @brief Perform k-nearest neighbor search
* @param[in] queries The query points for which to find the nearest neighbors
* @param[out] indices The indices of the nearest neighbors found
* @param[out] dists Distances to the nearest neighbors found
* @param[in] knn Number of nearest neighbors to return
* @param[in] params Search parameters
*/
virtual int knnSearch(const Matrix<ElementType>& queries,
Matrix<size_t>& indices,
Matrix<DistanceType>& dists,
size_t knn,
const SearchParams& params) const
{
assert(queries.cols == veclen());
assert(indices.rows >= queries.rows);
assert(dists.rows >= queries.rows);
assert(indices.cols >= knn);
assert(dists.cols >= knn);
bool use_heap;
if (params.use_heap==FLANN_Undefined) {
use_heap = (knn>KNN_HEAP_THRESHOLD)?true:false;
}
else {
use_heap = (params.use_heap==FLANN_True)?true:false;
}
int count = 0;
if (use_heap) {
#pragma omp parallel num_threads(params.cores)
{
KNNResultSet2<DistanceType> resultSet(knn);
#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params);
size_t n = std::min(resultSet.size(), knn);
resultSet.copy(indices[i], dists[i], n, params.sorted);
indices_to_ids(indices[i], indices[i], n);
count += n;
}
}
}
else {
#pragma omp parallel num_threads(params.cores)
{
KNNSimpleResultSet<DistanceType> resultSet(knn);
#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params);
size_t n = std::min(resultSet.size(), knn);
resultSet.copy(indices[i], dists[i], n, params.sorted);
indices_to_ids(indices[i], indices[i], n);
count += n;
}
}
}
return count;
}
/**
*
* @param queries
* @param indices
* @param dists
* @param knn
* @param params
* @return
*/
int knnSearch(const Matrix<ElementType>& queries,
Matrix<int>& indices,
Matrix<DistanceType>& dists,
size_t knn,
const SearchParams& params) const
{
flann::Matrix<size_t> indices_(new size_t[indices.rows*indices.cols], indices.rows, indices.cols);
int result = knnSearch(queries, indices_, dists, knn, params);
for (size_t i=0;i<indices.rows;++i) {
for (size_t j=0;j<indices.cols;++j) {
indices[i][j] = indices_[i][j];
}
}
delete[] indices_.ptr();
return result;
}
/**
* @brief Perform k-nearest neighbor search
* @param[in] queries The query points for which to find the nearest neighbors
* @param[out] indices The indices of the nearest neighbors found
* @param[out] dists Distances to the nearest neighbors found
* @param[in] knn Number of nearest neighbors to return
* @param[in] params Search parameters
*/
int knnSearch(const Matrix<ElementType>& queries,
std::vector< std::vector<size_t> >& indices,
std::vector<std::vector<DistanceType> >& dists,
size_t knn,
const SearchParams& params) const
{
assert(queries.cols == veclen());
bool use_heap;
if (params.use_heap==FLANN_Undefined) {
use_heap = (knn>KNN_HEAP_THRESHOLD)?true:false;
}
else {
use_heap = (params.use_heap==FLANN_True)?true:false;
}
if (indices.size() < queries.rows ) indices.resize(queries.rows);
if (dists.size() < queries.rows ) dists.resize(queries.rows);
int count = 0;
if (use_heap) {
#pragma omp parallel num_threads(params.cores)
{
KNNResultSet2<DistanceType> resultSet(knn);
#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params);
size_t n = std::min(resultSet.size(), knn);
indices[i].resize(n);
dists[i].resize(n);
if (n>0) {
resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted);
indices_to_ids(&indices[i][0], &indices[i][0], n);
}
count += n;
}
}
}
else {
#pragma omp parallel num_threads(params.cores)
{
KNNSimpleResultSet<DistanceType> resultSet(knn);
#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params);
size_t n = std::min(resultSet.size(), knn);
indices[i].resize(n);
dists[i].resize(n);
if (n>0) {
resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted);
indices_to_ids(&indices[i][0], &indices[i][0], n);
}
count += n;
}
}
}
return count;
}
/**
*
* @param queries
* @param indices
* @param dists
* @param knn
* @param params
* @return
*/
int knnSearch(const Matrix<ElementType>& queries,
std::vector< std::vector<int> >& indices,
std::vector<std::vector<DistanceType> >& dists,
size_t knn,
const SearchParams& params) const
{
std::vector<std::vector<size_t> > indices_;
int result = knnSearch(queries, indices_, dists, knn, params);
indices.resize(indices_.size());
for (size_t i=0;i<indices_.size();++i) {
indices[i].assign(indices_[i].begin(), indices_[i].end());
}
return result;
}
/**
* @brief Perform radius search
* @param[in] query The query point
* @param[out] indices The indices of the neighbors found within the given radius
* @param[out] dists The distances to the nearest neighbors found
* @param[in] radius The radius used for search
* @param[in] params Search parameters
* @return Number of neighbors found
*/
int radiusSearch(const Matrix<ElementType>& queries,
Matrix<size_t>& indices,
Matrix<DistanceType>& dists,
float radius,
const SearchParams& params) const
{
assert(queries.cols == veclen());
int count = 0;
size_t num_neighbors = std::min(indices.cols, dists.cols);
int max_neighbors = params.max_neighbors;
if (max_neighbors<0) max_neighbors = num_neighbors;
else max_neighbors = std::min(max_neighbors,(int)num_neighbors);
if (max_neighbors==0) {
#pragma omp parallel num_threads(params.cores)
{
CountRadiusResultSet<DistanceType> resultSet(radius);
#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params);
count += resultSet.size();
}
}
}
else {
// explicitly indicated to use unbounded radius result set
// and we know there'll be enough room for resulting indices and dists
if (params.max_neighbors<0 && (num_neighbors>=size())) {
#pragma omp parallel num_threads(params.cores)
{
RadiusResultSet<DistanceType> resultSet(radius);
#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params);
size_t n = resultSet.size();
count += n;
if (n>num_neighbors) n = num_neighbors;
resultSet.copy(indices[i], dists[i], n, params.sorted);
// mark the next element in the output buffers as unused
if (n<indices.cols) indices[i][n] = size_t(-1);
if (n<dists.cols) dists[i][n] = std::numeric_limits<DistanceType>::infinity();
indices_to_ids(indices[i], indices[i], n);
}
}
}
else {
// number of neighbors limited to max_neighbors
#pragma omp parallel num_threads(params.cores)
{
KNNRadiusResultSet<DistanceType> resultSet(radius, max_neighbors);
#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params);
size_t n = resultSet.size();
count += n;
if ((int)n>max_neighbors) n = max_neighbors;
resultSet.copy(indices[i], dists[i], n, params.sorted);
// mark the next element in the output buffers as unused
if (n<indices.cols) indices[i][n] = size_t(-1);
if (n<dists.cols) dists[i][n] = std::numeric_limits<DistanceType>::infinity();
indices_to_ids(indices[i], indices[i], n);
}
}
}
}
return count;
}
/**
*
* @param queries
* @param indices
* @param dists
* @param radius
* @param params
* @return
*/
int radiusSearch(const Matrix<ElementType>& queries,
Matrix<int>& indices,
Matrix<DistanceType>& dists,
float radius,
const SearchParams& params) const
{
flann::Matrix<size_t> indices_(new size_t[indices.rows*indices.cols], indices.rows, indices.cols);
int result = radiusSearch(queries, indices_, dists, radius, params);
for (size_t i=0;i<indices.rows;++i) {
for (size_t j=0;j<indices.cols;++j) {
indices[i][j] = indices_[i][j];
}
}
delete[] indices_.ptr();
return result;
}
/**
* @brief Perform radius search
* @param[in] query The query point
* @param[out] indices The indices of the neighbors found within the given radius
* @param[out] dists The distances to the nearest neighbors found
* @param[in] radius The radius used for search
* @param[in] params Search parameters
* @return Number of neighbors found
*/
int radiusSearch(const Matrix<ElementType>& queries,
std::vector< std::vector<size_t> >& indices,
std::vector<std::vector<DistanceType> >& dists,
float radius,
const SearchParams& params) const
{
assert(queries.cols == veclen());
int count = 0;
// just count neighbors
if (params.max_neighbors==0) {
#pragma omp parallel num_threads(params.cores)
{
CountRadiusResultSet<DistanceType> resultSet(radius);
#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params);
count += resultSet.size();
}
}
}
else {
if (indices.size() < queries.rows ) indices.resize(queries.rows);
if (dists.size() < queries.rows ) dists.resize(queries.rows);
if (params.max_neighbors<0) {
// search for all neighbors
#pragma omp parallel num_threads(params.cores)
{
RadiusResultSet<DistanceType> resultSet(radius);
#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params);
size_t n = resultSet.size();
count += n;
indices[i].resize(n);
dists[i].resize(n);
if (n > 0) {
resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted);
indices_to_ids(&indices[i][0], &indices[i][0], n);
}
}
}
}
else {
// number of neighbors limited to max_neighbors
#pragma omp parallel num_threads(params.cores)
{
KNNRadiusResultSet<DistanceType> resultSet(radius, params.max_neighbors);
#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params);
size_t n = resultSet.size();
count += n;
if ((int)n>params.max_neighbors) n = params.max_neighbors;
indices[i].resize(n);
dists[i].resize(n);
if (n > 0) {
resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted);
indices_to_ids(&indices[i][0], &indices[i][0], n);
}
}
}
}
}
return count;
}
/**
*
* @param queries
* @param indices
* @param dists
* @param radius
* @param params
* @return
*/
int radiusSearch(const Matrix<ElementType>& queries,
std::vector< std::vector<int> >& indices,
std::vector<std::vector<DistanceType> >& dists,
float radius,
const SearchParams& params) const
{
std::vector<std::vector<size_t> > indices_;
int result = radiusSearch(queries, indices_, dists, radius, params);
indices.resize(indices_.size());
for (size_t i=0;i<indices_.size();++i) {
indices[i].assign(indices_[i].begin(), indices_[i].end());
}
return result;
}
virtual void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& searchParams) const = 0;
protected:
virtual void freeIndex() = 0;
virtual void buildIndexImpl() = 0;
size_t id_to_index(size_t id)
{
if (ids_.size()==0) {
return id;
}
size_t point_index = size_t(-1);
if (id < ids_.size() && ids_[id]==id) {
return id;
}
else {
// binary search
size_t start = 0;
size_t end = ids_.size();
while (start<end) {
size_t mid = (start+end)/2;
if (ids_[mid]==id) {
point_index = mid;
break;
}
else if (ids_[mid]<id) {
start = mid + 1;
}
else {
end = mid;
}
}
}
return point_index;
}
void indices_to_ids(const size_t* in, size_t* out, size_t size) const
{
if (removed_) {
for (size_t i=0;i<size;++i) {
out[i] = ids_[in[i]];
}
}
}
void setDataset(const Matrix<ElementType>& dataset)
{
size_ = dataset.rows;
veclen_ = dataset.cols;
last_id_ = 0;
ids_.clear();
removed_points_.clear();
removed_ = false;
removed_count_ = 0;
points_.resize(size_);
for (size_t i=0;i<size_;++i) {
points_[i] = dataset[i];
}
}
void extendDataset(const Matrix<ElementType>& new_points)
{
size_t new_size = size_ + new_points.rows;
if (removed_) {
removed_points_.resize(new_size);
ids_.resize(new_size);
}
points_.resize(new_size);
for (size_t i=size_;i<new_size;++i) {
points_[i] = new_points[i-size_];
if (removed_) {
ids_[i] = last_id_++;
removed_points_.reset(i);
}
}
size_ = new_size;
}
void cleanRemovedPoints()
{
if (!removed_) return;
size_t last_idx = 0;
for (size_t i=0;i<size_;++i) {
if (!removed_points_.test(i)) {
points_[last_idx] = points_[i];
ids_[last_idx] = ids_[i];
removed_points_.reset(last_idx);
++last_idx;
}
}
points_.resize(last_idx);
ids_.resize(last_idx);
removed_points_.resize(last_idx);
size_ = last_idx;
removed_count_ = 0;
}
void swap(NNIndex& other)
{
std::swap(distance_, other.distance_);
std::swap(last_id_, other.last_id_);
std::swap(size_, other.size_);
std::swap(size_at_build_, other.size_at_build_);
std::swap(veclen_, other.veclen_);
std::swap(index_params_, other.index_params_);
std::swap(removed_, other.removed_);
std::swap(removed_points_, other.removed_points_);
std::swap(removed_count_, other.removed_count_);
std::swap(ids_, other.ids_);
std::swap(points_, other.points_);
std::swap(data_ptr_, other.data_ptr_);
}
protected:
/**
* The distance functor
*/
Distance distance_;
/**
* Each index point has an associated ID. IDs are assigned sequentially in
* increasing order. This indicates the ID assigned to the last point added to the
* index.
*/
size_t last_id_;
/**
* Number of points in the index (and database)
*/
size_t size_;
/**
* Number of features in the dataset when the index was last built.
*/
size_t size_at_build_;
/**
* Size of one point in the index (and database)
*/
size_t veclen_;
/**
* Parameters of the index.
*/
IndexParams index_params_;
/**
* Flag indicating if at least a point was removed from the index
*/
bool removed_;
/**
* Array used to mark points removed from the index
*/
DynamicBitset removed_points_;
/**
* Number of points removed from the index
*/
size_t removed_count_;
/**
* Array of point IDs, returned by nearest-neighbour operations
*/
std::vector<size_t> ids_;
/**
* Point data
*/
std::vector<ElementType*> points_;
/**
* Pointer to dataset memory if allocated by this index, otherwise NULL
*/
ElementType* data_ptr_;
};
#define USING_BASECLASS_SYMBOLS \
using NNIndex<Distance>::distance_;\
using NNIndex<Distance>::size_;\
using NNIndex<Distance>::size_at_build_;\
using NNIndex<Distance>::veclen_;\
using NNIndex<Distance>::index_params_;\
using NNIndex<Distance>::removed_points_;\
using NNIndex<Distance>::ids_;\
using NNIndex<Distance>::removed_;\
using NNIndex<Distance>::points_;\
using NNIndex<Distance>::extendDataset;\
using NNIndex<Distance>::setDataset;\
using NNIndex<Distance>::cleanRemovedPoints;\
using NNIndex<Distance>::indices_to_ids;
}
#endif //FLANN_NNINDEX_H
|
omp_reduction_bug_fixed.c | #include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <math.h>
float dotprod(float * a, float * b, size_t N)
{
int i, tid;
float sum = 0;
#pragma omp parallel for \
shared(a, b, N) \
reduction(+: sum)
for (i = 0; i < N; ++i)
{
sum += a[i] * b[i];
tid = omp_get_thread_num();
printf("tid = %d i = %d\n", tid, i);
}
return sum;
}
int main (int argc, char *argv[])
{
const size_t N = 100;
int i, tid;
float sum, seq_sum;
float a[N], b[N];
seq_sum = 0;
for (i = 0; i < N; ++i)
{
a[i] = b[i] = (float)i;
seq_sum += a[i] * b[i];
}
sum = dotprod(a, b, N);
assert(fabs(sum - seq_sum) < 1.e-7);
printf("Sum = %f, seq_sum = %f\n", sum, seq_sum);
return 0;
}
|
part_func_co.c | /* Last changed Time-stamp: <2007-05-09 16:11:21 ivo> */
/*
partiton function for RNA secondary structures
Ivo L Hofacker
Stephan Bernhart
Vienna RNA package
*/
/*
$Log: part_func_co.c,v $
Revision 1.10 2007/05/10 17:27:01 ivo
make sure the relative error eps is positive in newton iteration
Revision 1.9 2006/05/10 15:12:11 ivo
some compiler choked on double semicolon after declaration
Revision 1.8 2006/04/05 12:52:31 ivo
Fix performance bug (O(n^4) loop)
Revision 1.7 2006/01/19 11:30:04 ivo
compute_probabilities should only look at one dimer at a time
Revision 1.6 2006/01/18 12:55:40 ivo
major cleanup of berni code
fix bugs related to confusing which free energy is returned by co_pf_fold()
Revision 1.5 2006/01/16 11:32:25 ivo
small bug in multiloop pair probs
Revision 1.4 2006/01/05 18:13:40 ivo
update
Revision 1.3 2006/01/04 15:14:29 ivo
fix bug in concentration calculations
Revision 1.2 2004/12/23 12:14:41 berni
*** empty log message ***
Revision 1.1 2004/12/22 10:46:17 berni
Partition function Cofolding 0.9, Computation of concentrations.
Revision 1.16 2003/08/04 09:14:09 ivo
finish up stochastic backtracking
Revision 1.15 2002/03/19 16:51:12 ivo
more on stochastic backtracking (still incomplete)
Revision 1.13 2001/11/16 17:30:04 ivo
add stochastic backtracking (still incomplete)
*/
#include <config.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <float.h> /* #defines FLT_MAX ... */
#include <limits.h>
#include "utils.h"
#include "energy_par.h"
#include "fold_vars.h"
#include "pair_mat.h"
#include "PS_dot.h"
#include "params.h"
#include "loop_energies.h"
#include "part_func.h"
#include "part_func_co.h"
#ifdef _OPENMP
#include <omp.h>
#endif
/*@unused@*/
PRIVATE char rcsid[] UNUSED = "$Id: part_func_co.c,v 1.10 2007/05/10 17:27:01 ivo Exp $";
#define ISOLATED 256.0
#undef TURN
#define TURN 0
#define SAME_STRAND(I,J) (((I)>=cut_point)||((J)<cut_point))
/* #define SAME_STRAND(I,J) (((J)<cut_point)||((I)>=cut_point2)||(((I)>=cut_point)&&((J)<cut_point2)))
*/
/*
#################################
# GLOBAL VARIABLES #
#################################
*/
int mirnatog = 0;
double F_monomer[2] = {0,0}; /* free energies of the two monomers */
/*
#################################
# PRIVATE VARIABLES #
#################################
*/
PRIVATE FLT_OR_DBL *expMLbase=NULL;
PRIVATE FLT_OR_DBL *q=NULL, *qb=NULL, *qm=NULL, *qm1=NULL, *qqm=NULL, *qqm1=NULL, *qq=NULL, *qq1=NULL;
PRIVATE FLT_OR_DBL *prml=NULL, *prm_l=NULL, *prm_l1=NULL, *q1k=NULL, *qln=NULL, *probs=NULL;
PRIVATE FLT_OR_DBL *scale=NULL;
PRIVATE pf_paramT *pf_params = NULL;
PRIVATE char *ptype=NULL; /* precomputed array of pair types */
PRIVATE int *jindx=NULL;
PRIVATE int *my_iindx=NULL;
PRIVATE int init_length; /* length in last call to init_pf_fold() */
PRIVATE int do_bppm = 1; /* do backtracking per default */
PRIVATE short *S=NULL, *S1=NULL;
PRIVATE char *pstruc=NULL;
PRIVATE char *sequence=NULL;
PRIVATE double alpha = 1.0;
PRIVATE int struct_constrained = 0;
#ifdef _OPENMP
/* NOTE: all variables are assumed to be uninitialized if they are declared as threadprivate
*/
#pragma omp threadprivate(expMLbase, q, qb, qm, qm1, qqm, qqm1, qq, qq1, prml, prm_l, prm_l1, q1k, qln,\
scale, pf_params, ptype, jindx, my_iindx, init_length, S, S1, pstruc, sequence, probs, do_bppm, alpha, struct_constrained)
#endif
/*
#################################
# PRIVATE FUNCTION DECLARATIONS #
#################################
*/
PRIVATE void init_partfunc_co(int length, pf_paramT *parameters);
PRIVATE void pf_co(const char *sequence);
PRIVATE void pf_co_bppm(const char *sequence, char *structure);
PRIVATE double *Newton_Conc(double ZAB, double ZAA, double ZBB, double concA, double concB,double* ConcVec);
PRIVATE void scale_pf_params(unsigned int length, pf_paramT *parameters);
PRIVATE void get_arrays(unsigned int length);
PRIVATE void make_ptypes(const short *S, const char *structure);
PRIVATE void backtrack(int i, int j);
/*
#################################
# BEGIN OF FUNCTION DEFINITIONS #
#################################
*/
PRIVATE void init_partfunc_co(int length, pf_paramT *parameters){
if (length<1) nrerror("init_pf_fold: length must be greater 0");
#ifdef _OPENMP
/* Explicitly turn off dynamic threads */
omp_set_dynamic(0);
free_co_pf_arrays(); /* free previous allocation */
#else
if (init_length>0) free_co_pf_arrays(); /* free previous allocation */
#endif
#ifdef SUN4
nonstandard_arithmetic();
#else
#ifdef HP9
fpsetfastmode(1);
#endif
#endif
make_pair_matrix();
get_arrays((unsigned) length);
scale_pf_params((unsigned) length, parameters);
init_length = length;
}
PRIVATE void get_arrays(unsigned int length){
unsigned int size;
if((length +1) >= (unsigned int)sqrt((double)INT_MAX))
nrerror("get_arrays@part_func_co.c: sequence length exceeds addressable range");
size = sizeof(FLT_OR_DBL) * ((length+1)*(length+2)/2);
q = (FLT_OR_DBL *) space(size);
qb = (FLT_OR_DBL *) space(size);
qm = (FLT_OR_DBL *) space(size);
probs = (FLT_OR_DBL *) space(size);
qm1 = (FLT_OR_DBL *) space(size);
q1k = (FLT_OR_DBL *) space(sizeof(FLT_OR_DBL)*(length+1));
qln = (FLT_OR_DBL *) space(sizeof(FLT_OR_DBL)*(length+2));
qq = (FLT_OR_DBL *) space(sizeof(FLT_OR_DBL)*(length+2));
qq1 = (FLT_OR_DBL *) space(sizeof(FLT_OR_DBL)*(length+2));
qqm = (FLT_OR_DBL *) space(sizeof(FLT_OR_DBL)*(length+2));
qqm1 = (FLT_OR_DBL *) space(sizeof(FLT_OR_DBL)*(length+2));
prm_l = (FLT_OR_DBL *) space(sizeof(FLT_OR_DBL)*(length+2));
prm_l1 = (FLT_OR_DBL *) space(sizeof(FLT_OR_DBL)*(length+2));
prml = (FLT_OR_DBL *) space(sizeof(FLT_OR_DBL)*(length+2));
expMLbase = (FLT_OR_DBL *) space(sizeof(FLT_OR_DBL)*(length+1));
scale = (FLT_OR_DBL *) space(sizeof(FLT_OR_DBL)*(length+1));
ptype = (char *) space(sizeof(char)*((length+1)*(length+2)/2));
my_iindx = get_iindx(length);
iindx = get_iindx(length); /* for backward compatibility and Perl wrapper */
jindx = get_indx(length);
}
PUBLIC void free_co_pf_arrays(void){
if(q) free(q);
if(qb) free(qb);
if(qm) free(qm);
if(qm1) free(qm1);
if(ptype) free(ptype);
if(qq) free(qq);
if(qq1) free(qq1);
if(qqm) free(qqm);
if(qqm1) free(qqm1);
if(q1k) free(q1k);
if(qln) free(qln);
if(prm_l) free(prm_l);
if(prm_l1) free(prm_l1);
if(prml) free(prml);
if(probs) free(probs);
if(expMLbase) free(expMLbase);
if(scale) free(scale);
if(my_iindx) free(my_iindx);
if(iindx) free(iindx); /* for backward compatibility and Perl wrapper */
if(jindx) free(jindx);
if(S) free(S);
if(S1) free(S1);
init_length=0;
q = qb = qm = qm1 = qq = qq1 = qqm = qqm1 = q1k = qln = prm_l = prm_l1 = prml = expMLbase = scale = probs = NULL;
ptype = NULL;
S = S1 = NULL;
my_iindx = jindx = iindx = NULL;
#ifdef SUN4
standard_arithmetic();
#else
#ifdef HP9
fpsetfastmode(0);
#endif
#endif
}
/*-----------------------------------------------------------------*/
PUBLIC cofoldF co_pf_fold(char *sequence, char *structure){
return co_pf_fold_par(sequence, structure, NULL, do_backtrack, fold_constrained);
}
PUBLIC cofoldF co_pf_fold_par(char *sequence,
char *structure,
pf_paramT *parameters,
int calculate_bppm,
int is_constrained){
int n;
FLT_OR_DBL Q;
cofoldF X;
double free_energy;
n = (int) strlen(sequence);
do_bppm = calculate_bppm;
struct_constrained = is_constrained;
#ifdef _OPENMP
/* always init everything since all global static variables are uninitialized when entering a thread */
init_partfunc_co(n, parameters);
#else
if(parameters) init_partfunc_co(n, parameters);
else if (n > init_length) init_partfunc_co(n, parameters);
else if (fabs(pf_params->temperature - temperature)>1e-6) update_co_pf_params_par(n, parameters);
#endif
/* printf("mirnatog=%d\n",mirnatog); */
if(S) free(S);
S = encode_sequence(sequence, 0);
if(S1) free(S1);
S1 = encode_sequence(sequence, 1);
make_ptypes(S, structure);
pf_co(sequence);
if (backtrack_type=='C') Q = qb[my_iindx[1]-n];
else if (backtrack_type=='M') Q = qm[my_iindx[1]-n];
else Q = q[my_iindx[1]-n];
/* ensemble free energy in Kcal/mol */
if (Q<=FLT_MIN) fprintf(stderr, "pf_scale too large\n");
free_energy = (-log(Q)-n*log(pf_params->pf_scale))*pf_params->kT/1000.0;
/* in case we abort because of floating point errors */
if (n>1600) fprintf(stderr, "free energy = %8.2f\n", free_energy);
/*probability of molecules being bound together*/
/*Computation of "real" Partition function*/
/*Need that for concentrations*/
if (cut_point>0){
double kT, pbound, QAB, QToT, Qzero;
kT = pf_params->kT/1000.0;
Qzero=q[my_iindx[1]-n];
QAB=(q[my_iindx[1]-n]-q[my_iindx[1]-(cut_point-1)]*q[my_iindx[cut_point]-n])*pf_params->expDuplexInit;
/*correction for symmetry*/
if((n-(cut_point-1)*2)==0) {
if ((strncmp(sequence, sequence+cut_point-1, cut_point-1))==0) {
QAB/=2;
}}
QToT=q[my_iindx[1]-(cut_point-1)]*q[my_iindx[cut_point]-n]+QAB;
pbound=1-(q[my_iindx[1]-(cut_point-1)]*q[my_iindx[cut_point]-n]/QToT);
X.FAB = -kT*(log(QToT)+n*log(pf_params->pf_scale));
X.F0AB = -kT*(log(Qzero)+n*log(pf_params->pf_scale));
X.FcAB = (QAB>1e-17) ? -kT*(log(QAB)+n*log(pf_params->pf_scale)) : 999;
X.FA = -kT*(log(q[my_iindx[1]-(cut_point-1)]) + (cut_point-1)*log(pf_params->pf_scale));
X.FB = -kT*(log(q[my_iindx[cut_point]-n]) + (n-cut_point+1)*log(pf_params->pf_scale));
/* printf("QAB=%.9f\tQtot=%.9f\n",QAB/scale[n],QToT/scale[n]);*/
}
else {
X.FA = X.FB = X.FAB = X.F0AB = free_energy;
X.FcAB = 0;
}
/* backtracking to construct binding probabilities of pairs*/
if(do_bppm){
pf_co_bppm(sequence, structure);
/*
* Backward compatibility:
* This block may be removed if deprecated functions
* relying on the global variable "pr" vanish from within the package!
*/
pr = probs;
/*
{
if(pr) free(pr);
pr = (FLT_OR_DBL *) space(sizeof(FLT_OR_DBL) * ((n+1)*(n+2)/2));
memcpy(pr, probs, sizeof(FLT_OR_DBL) * ((n+1)*(n+2)/2));
}
*/
}
return X;
}
/* forward recursion of pf cofolding */
PRIVATE void pf_co(const char *sequence){
int n, i,j,k,l, ij, u,u1,ii, type, type_2, tt;
FLT_OR_DBL temp, Qmax=0;
FLT_OR_DBL qbt1, *tmp;
FLT_OR_DBL expMLclosing;
double max_real;
int noGUclosure = pf_params->model_details.noGUclosure;
max_real = (sizeof(FLT_OR_DBL) == sizeof(float)) ? FLT_MAX : DBL_MAX;
n = (int) strlen(sequence);
expMLclosing = pf_params->expMLclosing;
/*array initialization ; qb,qm,q
qb,qm,q (i,j) are stored as ((n+1-i)*(n-i) div 2 + n+1-j */
/* for (d=0; d<=TURN; d++) */
for (i=1; i<=n/*-d*/; i++) {
ij = my_iindx[i]-i;
q[ij]=scale[1];
qb[ij]=qm[ij]=0.0;
}
for (i=0; i<=n; i++)
qq[i]=qq1[i]=qqm[i]=qqm1[i]=prm_l[i]=prm_l1[i]=prml[i]=0;
for (j=TURN+2;j<=n; j++) {
for (i=j-TURN-1; i>=1; i--) {
/* construction of partition function of segment i,j*/
/*firstly that given i bound to j : qb(i,j) */
u = j-i-1; ij = my_iindx[i]-j;
type = ptype[ij];
qbt1=0;
if (type!=0) {
/*hairpin contribution*/
if SAME_STRAND(i,j){
if (((type==3)||(type==4))&&noGUclosure) qbt1 = 0;
else
qbt1 = exp_E_Hairpin(u, type, S1[i+1], S1[j-1], sequence+i-1, pf_params)*scale[u+2];
}
/* interior loops with interior pair k,l */
for (k=i+1; k<=MIN2(i+MAXLOOP+1,j-TURN-2); k++) {
u1 = k-i-1;
for (l=MAX2(k+TURN+1,j-1-MAXLOOP+u1); l<j; l++) {
if ((SAME_STRAND(i,k))&&(SAME_STRAND(l,j))){
type_2 = ptype[my_iindx[k]-l];
if (type_2) {
type_2 = rtype[type_2];
qbt1 += qb[my_iindx[k]-l] *
exp_E_IntLoop(u1, j-l-1, type, type_2,
S1[i+1], S1[j-1], S1[k-1], S1[l+1], pf_params)*scale[u1+j-l+1];
}
}
}
}
/*multiple stem loop contribution*/
ii = my_iindx[i+1]; /* ii-k=[i+1,k-1] */
temp = 0.0;
if (SAME_STRAND(i,i+1) && SAME_STRAND(j-1,j)) {
for (k=i+2; k<=j-1; k++) {
if (SAME_STRAND(k-1,k))
temp += qm[ii-(k-1)]*qqm1[k];
}
tt = rtype[type];
temp*=exp_E_MLstem(tt, S1[j-1], S1[i+1], pf_params)*scale[2];
temp*=expMLclosing;
qbt1 += temp;
}
/*qc contribution*/
temp=0.0;
if (!SAME_STRAND(i,j)){
tt = rtype[type];
temp=q[my_iindx[i+1]-(cut_point-1)]*q[my_iindx[cut_point]-(j-1)];
if ((j==cut_point)&&(i==cut_point-1)) temp=scale[2];
else if (i==cut_point-1) temp=q[my_iindx[cut_point]-(j-1)]*scale[1];
else if (j==cut_point) temp=q[my_iindx[i+1]-(cut_point-1)]*scale[1];
if (j>cut_point) temp*=scale[1];
if (i<cut_point-1) temp*=scale[1];
temp *= exp_E_ExtLoop(tt, SAME_STRAND(j-1,j) ? S1[j-1] : -1, SAME_STRAND(i,i+1) ? S1[i+1] : -1, pf_params);
qbt1+=temp;
}
qb[ij] = qbt1;
} /* end if (type!=0) */
else qb[ij] = 0.0;
/* construction of qqm matrix containing final stem
contributions to multiple loop partition function
from segment i,j */
if (SAME_STRAND(j-1,j)) {
qqm[i] = qqm1[i]*expMLbase[1];
}
else qqm[i]=0;
if (type&&SAME_STRAND(i-1,i)&&SAME_STRAND(j,j+1)) {
qbt1 = qb[ij];
qbt1 *= exp_E_MLstem(type, (i>1) ? S1[i-1] : -1, (j<n) ? S1[j+1] : -1, pf_params);
qqm[i] += qbt1;
}
if (qm1) qm1[jindx[j]+i] = qqm[i]; /* for stochastic backtracking */
/*construction of qm matrix containing multiple loop
partition function contributions from segment i,j */
temp = 0.0;
ii = my_iindx[i]; /* ii-k=[i,k] */
for (k=i+1; k<=j; k++) {
if (SAME_STRAND(k-1,k)) temp += (qm[ii-(k-1)])*qqm[k];
if (SAME_STRAND(i,k)) temp += expMLbase[k-i]*qqm[k];
}
qm[ij] = (temp + qqm[i]);
/*auxiliary matrix qq for cubic order q calculation below */
qbt1 = qb[ij];
if (type) {
qbt1 *= exp_E_ExtLoop(type, ((i>1)&&(SAME_STRAND(i-1,i))) ? S1[i-1] : -1, ((j<n)&&(SAME_STRAND(j,j+1))) ? S1[j+1] : -1, pf_params);
}
qq[i] = qq1[i]*scale[1] + qbt1;
/*construction of partition function for segment i,j */
temp = 1.0*scale[1+j-i] + qq[i];
for (k=i; k<=j-1; k++) temp += q[ii-k]*qq[k+1];
q[ij] = temp;
if (temp>Qmax) {
Qmax = temp;
if (Qmax>max_real/10.)
fprintf(stderr, "Q close to overflow: %d %d %g\n", i,j,temp);
}
if (temp>=max_real) {
PRIVATE char msg[128];
snprintf(msg, 127, "overflow in co_pf_fold while calculating q[%d,%d]\n"
"use larger pf_scale", i,j);
nrerror(msg);
}
}
tmp = qq1; qq1 =qq; qq =tmp;
tmp = qqm1; qqm1=qqm; qqm=tmp;
}
}
/* backward recursion of pf cofolding */
PRIVATE void pf_co_bppm(const char *sequence, char *structure){
int n, i,j,k,l, ij, kl, ii, ll, type, type_2, tt, ov=0;
FLT_OR_DBL temp, Qmax=0, prm_MLb;
FLT_OR_DBL prmt,prmt1;
FLT_OR_DBL *tmp;
FLT_OR_DBL expMLclosing;
double max_real;
max_real = (sizeof(FLT_OR_DBL) == sizeof(float)) ? FLT_MAX : DBL_MAX;
n = (int) strlen(sequence);
expMLclosing = pf_params->expMLclosing;
/* backtracking to construct binding probabilities of pairs*/
if ((S != NULL) && (S1 != NULL)) {
FLT_OR_DBL *Qlout, *Qrout;
Qmax=0;
Qrout=(FLT_OR_DBL *)space(sizeof(FLT_OR_DBL) * (n+2));
Qlout=(FLT_OR_DBL *)space(sizeof(FLT_OR_DBL) * (cut_point+2));
for (k=1; k<=n; k++) {
q1k[k] = q[my_iindx[1] - k];
qln[k] = q[my_iindx[k] -n];
}
q1k[0] = 1.0;
qln[n+1] = 1.0;
/* pr = q; / * recycling */
/* 1. exterior pair i,j and initialization of pr array */
for (i=1; i<=n; i++) {
for (j=i; j<=MIN2(i+TURN,n); j++) probs[my_iindx[i]-j] = 0;
for (j=i+TURN+1; j<=n; j++) {
ij = my_iindx[i]-j;
type = ptype[ij];
if (type&&(qb[ij]>0.)) {
probs[ij] = q1k[i-1]*qln[j+1]/q1k[n];
probs[ij] *= exp_E_ExtLoop(type, ((i>1)&&(SAME_STRAND(i-1,i))) ? S1[i-1] : -1, ((j<n)&&(SAME_STRAND(j,j+1))) ? S1[j+1] : -1, pf_params);
} else
probs[ij] = 0;
}
}
for (l=n; l>TURN+1; l--) {
/* 2. bonding k,l as substem of 2:loop enclosed by i,j */
for (k=1; k<l-TURN; k++) {
kl = my_iindx[k]-l;
type_2 = ptype[kl]; type_2 = rtype[type_2];
if (qb[kl]==0) continue;
for (i=MAX2(1,k-MAXLOOP-1); i<=k-1; i++)
for (j=l+1; j<=MIN2(l+ MAXLOOP -k+i+2,n); j++) {
if ((SAME_STRAND(i,k))&&(SAME_STRAND(l,j))){
ij = my_iindx[i] - j;
type = ptype[ij];
if ((probs[ij]>0)) {
probs[kl] += probs[ij]*exp_E_IntLoop(k-i-1, j-l-1, type, type_2,
S1[i+1], S1[j-1], S1[k-1], S1[l+1], pf_params)*scale[k-i+j-l];
}
}
}
}
/* 3. bonding k,l as substem of multi-loop enclosed by i,j */
prm_MLb = 0.;
if ((l<n)&&(SAME_STRAND(l,l+1)))
for (k=2; k<l-TURN; k++) {
i = k-1;
prmt = prmt1 = 0.0;
ii = my_iindx[i]; /* ii-j=[i,j] */
ll = my_iindx[l+1]; /* ll-j=[l+1,j] */
tt = ptype[ii-(l+1)]; tt=rtype[tt];
if (SAME_STRAND(i,k)){
prmt1 = probs[ii-(l+1)]*expMLclosing;
prmt1 *= exp_E_MLstem(tt, S1[l], S1[i+1], pf_params);
for (j=l+2; j<=n; j++) {
if (SAME_STRAND(j-1,j)){ /*??*/
tt = ptype[ii-j]; tt = rtype[tt];
prmt += probs[ii-j]*exp_E_MLstem(tt, S1[j-1], S1[i+1], pf_params)*qm[ll-(j-1)];
}
}
}
kl = my_iindx[k]-l;
tt = ptype[kl];
prmt *= expMLclosing;
prml[ i] = prmt;
prm_l[i] = prm_l1[i]*expMLbase[1]+prmt1;
prm_MLb = prm_MLb*expMLbase[1] + prml[i];
/* same as: prm_MLb = 0;
for (i=1; i<=k-1; i++) prm_MLb += prml[i]*expMLbase[k-i-1]; */
prml[i] = prml[ i] + prm_l[i];
if (qb[kl] == 0.) continue;
temp = prm_MLb;
for (i=1;i<=k-2; i++) {
if ((SAME_STRAND(i,i+1))&&(SAME_STRAND(k-1,k))){
temp += prml[i]*qm[my_iindx[i+1] - (k-1)];
}
}
temp *= exp_E_MLstem( tt,
((k>1)&&SAME_STRAND(k-1,k)) ? S1[k-1] : -1,
((l<n)&&SAME_STRAND(l,l+1)) ? S1[l+1] : -1,
pf_params) * scale[2];
probs[kl] += temp;
if (probs[kl]>Qmax) {
Qmax = probs[kl];
if (Qmax>max_real/10.)
fprintf(stderr, "P close to overflow: %d %d %g %g\n",
i, j, probs[kl], qb[kl]);
}
if (probs[kl]>=max_real) {
ov++;
probs[kl]=FLT_MAX;
}
} /* end for (k=..) multloop*/
else /* set prm_l to 0 to get prm_l1 to be 0 */
for (i=0; i<=n; i++) prm_l[i]=0;
tmp = prm_l1; prm_l1=prm_l; prm_l=tmp;
/*computation of .(..(...)..&..). type features?*/
if (cut_point<=0) continue; /* no .(..(...)..&..). type features*/
if ((l==n)||(l<=2)) continue; /* no .(..(...)..&..). type features*/
/*new version with O(n^3)??*/
if (l>cut_point) {
if (l<n) {
int t,kt;
for (t=n; t>l; t--) {
for (k=1; k<cut_point; k++) {
kt=my_iindx[k]-t;
type=rtype[ptype[kt]];
temp = probs[kt] * exp_E_ExtLoop(type, S1[t-1], (SAME_STRAND(k,k+1)) ? S1[k+1] : -1, pf_params) * scale[2];
if (l+1<t) temp*=q[my_iindx[l+1]-(t-1)];
if (SAME_STRAND(k,k+1)) temp*=q[my_iindx[k+1]-(cut_point-1)];
Qrout[l]+=temp;
}
}
}
for (k=l-1; k>=cut_point; k--) {
if (qb[my_iindx[k]-l]) {
kl=my_iindx[k]-l;
type=ptype[kl];
temp = Qrout[l];
temp *= exp_E_ExtLoop(type, (k>cut_point) ? S1[k-1] : -1, (l < n) ? S1[l+1] : -1, pf_params);
if (k>cut_point) temp*=q[my_iindx[cut_point]-(k-1)];
probs[kl]+=temp;
}
}
}
else if (l==cut_point ) {
int t, sk,s;
for (t=2; t<cut_point;t++) {
for (s=1; s<t; s++) {
for (k=cut_point; k<=n; k++) {
sk=my_iindx[s]-k;
if (qb[sk]) {
type=rtype[ptype[sk]];
temp=probs[sk]*exp_E_ExtLoop(type, (SAME_STRAND(k-1,k)) ? S1[k-1] : -1, S1[s+1], pf_params)*scale[2];
if (s+1<t) temp*=q[my_iindx[s+1]-(t-1)];
if (SAME_STRAND(k-1,k)) temp*=q[my_iindx[cut_point]-(k-1)];
Qlout[t]+=temp;
}
}
}
}
}
else if (l<cut_point) {
for (k=1; k<l; k++) {
if (qb[my_iindx[k]-l]) {
type=ptype[my_iindx[k]-l];
temp=Qlout[k];
temp *= exp_E_ExtLoop(type, (k>1) ? S1[k-1] : -1, (l<(cut_point-1)) ? S1[l+1] : -1, pf_params);
if (l+1<cut_point) temp*=q[my_iindx[l+1]-(cut_point-1)];
probs[my_iindx[k]-l]+=temp;
}
}
}
} /* end for (l=..) */
free(Qlout);
free(Qrout);
for (i=1; i<=n; i++)
for (j=i+TURN+1; j<=n; j++) {
ij = my_iindx[i]-j;
probs[ij] *= qb[ij];
}
if (structure!=NULL)
bppm_to_structure(structure, probs, n);
} /* end if (do_backtrack)*/
if (ov>0) fprintf(stderr, "%d overflows occurred while backtracking;\n"
"you might try a smaller pf_scale than %g\n",
ov, pf_params->pf_scale);
}
PRIVATE void scale_pf_params(unsigned int length, pf_paramT *parameters){
unsigned int i;
double kT, scaling_factor;
if(pf_params) free(pf_params);
if(parameters){
pf_params = get_boltzmann_factor_copy(parameters);
} else {
model_detailsT md;
set_model_details(&md);
pf_params = get_boltzmann_factors(temperature, alpha, md, pf_scale);
}
scaling_factor = pf_params->pf_scale;
kT = pf_params->kT; /* kT in cal/mol */
/* scaling factors (to avoid overflows) */
if (scaling_factor == -1) { /* mean energy for random sequences: 184.3*length cal */
scaling_factor = exp(-(-185+(pf_params->temperature-37.)*7.27)/kT);
if (scaling_factor<1) scaling_factor=1;
pf_params->pf_scale = scaling_factor;
}
scale[0] = 1.;
scale[1] = 1./scaling_factor;
expMLbase[0] = 1;
expMLbase[1] = pf_params->expMLbase/scaling_factor;
for (i=2; i<=length; i++) {
scale[i] = scale[i/2]*scale[i-(i/2)];
expMLbase[i] = pow(pf_params->expMLbase, (double)i) * scale[i];
}
}
/*----------------------------------------------------------------------*/
/*----------------------------------------------------------------------*/
/*---------------------------------------------------------------------------*/
PUBLIC void update_co_pf_params(int length){
update_co_pf_params_par(length, NULL);
}
PUBLIC void update_co_pf_params_par(int length, pf_paramT *parameters){
make_pair_matrix();
scale_pf_params((unsigned) length, parameters);
}
/*---------------------------------------------------------------------------*/
PRIVATE void make_ptypes(const short *S, const char *structure) {
int n,i,j,k,l;
int noLP = pf_params->model_details.noLP;
n=S[0];
for (k=1; k<=n-TURN-1; k++)
for (l=1; l<=2; l++) {
int type,ntype=0,otype=0;
i=k; j = i+TURN+l;
if (j>n) continue;
type = pair[S[i]][S[j]];
while ((i>=1)&&(j<=n)) {
if ((i>1)&&(j<n)) ntype = pair[S[i-1]][S[j+1]];
if (noLP && (!otype) && (!ntype))
type = 0; /* i.j can only form isolated pairs */
qb[my_iindx[i]-j] = 0.;
ptype[my_iindx[i]-j] = (char) type;
otype = type;
type = ntype;
i--; j++;
}
}
if (struct_constrained&&(structure!=NULL)) {
constrain_ptypes(structure, (unsigned int)n, ptype, NULL, TURN, 1);
for(j=1; j<=n; j++) {
switch (structure[j-1]) {
case 'l': /*only intramolecular basepairing*/
if (j<cut_point) for (l=cut_point; l<=n; l++) ptype[my_iindx[j]-l] = 0;
else for (l=1; l<cut_point; l++) ptype[my_iindx[l]-j] =0;
break;
case 'e': /*only intermolecular bp*/
if (j<cut_point) {
for (l=1; l<j; l++) ptype[my_iindx[l]-j] =0;
for (l=j+1; l<cut_point; l++) ptype[my_iindx[j]-l] = 0;
}
else {
for (l=cut_point; l<j; l++) ptype[my_iindx[l]-j] =0;
for (l=j+1; l<=n; l++) ptype[my_iindx[j]-l] = 0;
}
break;
}
}
if(pf_params->model_details.canonicalBPonly)
for(i=1;i<n;i++)
for(j=i+1;j<=n;j++)
if(ptype[my_iindx[i]+j] == 7){
warn_user("removing non-canonical base pair from constraint");
ptype[my_iindx[i]+j] = 0;
}
}
if (mirnatog==1) { /*microRNA toggle: no intramolec. bp in 2. molec*/
for (j=cut_point; j<n; j++) {
for (l=j+1; l<=n; l++) {
ptype[my_iindx[j]-l] = 0;
}
}
}
}
/*
stochastic backtracking in pf_fold arrays
returns random structure S with Boltzman probabilty
p(S) = exp(-E(S)/kT)/Z
*/
PRIVATE void backtrack_qm1(int i,int j) {
/* i is paired to l, i<l<j; backtrack in qm1 to find l */
int ii, l, type;
double qt, r;
r = urn() * qm1[jindx[j]+i];
ii = my_iindx[i];
for (qt=0., l=i+TURN+1; l<=j; l++) {
type = ptype[ii-l];
if (type)
qt += qb[ii-l]*exp_E_MLstem(type, S1[i-1], S1[l+1], pf_params) * expMLbase[j-l];
if (qt>=r) break;
}
if (l>j) nrerror("backtrack failed in qm1");
backtrack(i,l);
}
PRIVATE void backtrack(int i, int j) {
int noGUclosure = pf_params->model_details.noGUclosure;
do {
double r, qbt1;
int k, l, type, u, u1;
pstruc[i-1] = '('; pstruc[j-1] = ')';
r = urn() * qb[my_iindx[i]-j];
type = ptype[my_iindx[i]-j];
u = j-i-1;
/*hairpin contribution*/
if (((type==3)||(type==4))&&noGUclosure) qbt1 = 0;
else
qbt1 = exp_E_Hairpin(u, type, S1[i+1], S1[j-1], sequence+i-1, pf_params)*scale[u+2];
if (qbt1>r) return; /* found the hairpin we're done */
for (k=i+1; k<=MIN2(i+MAXLOOP+1,j-TURN-2); k++) {
u1 = k-i-1;
for (l=MAX2(k+TURN+1,j-1-MAXLOOP+u1); l<j; l++) {
int type_2;
type_2 = ptype[my_iindx[k]-l];
if (type_2) {
type_2 = rtype[type_2];
qbt1 += qb[my_iindx[k]-l] *
exp_E_IntLoop(u1, j-l-1, type, type_2,
S1[i+1], S1[j-1], S1[k-1], S1[l+1], pf_params)*scale[u1+j-l+1];
}
if (qbt1 > r) break;
}
if (qbt1 > r) break;
}
if (l<j) {
i=k; j=l;
}
else break;
} while (1);
/* backtrack in multi-loop */
{
double r, qt;
int k, ii, jj;
i++; j--;
/* find the first split index */
ii = my_iindx[i]; /* ii-j=[i,j] */
jj = jindx[j]; /* jj+i=[j,i] */
for (qt=0., k=i+1; k<j; k++) qt += qm[ii-(k-1)]*qm1[jj+k];
r = urn() * qt;
for (qt=0., k=i+1; k<j; k++) {
qt += qm[ii-(k-1)]*qm1[jj+k];
if (qt>=r) break;
}
if (k>=j) nrerror("backtrack failed, can't find split index ");
backtrack_qm1(k, j);
j = k-1;
while (j>i) {
/* now backtrack [i ... j] in qm[] */
jj = jindx[j];
ii = my_iindx[i];
r = urn() * qm[ii - j];
qt = qm1[jj+i]; k=i;
if (qt<r)
for (k=i+1; k<=j; k++) {
qt += (qm[ii-(k-1)]+expMLbase[k-i])*qm1[jj+k];
if (qt >= r) break;
}
if (k>j) nrerror("backtrack failed in qm");
backtrack_qm1(k,j);
if (k<i+TURN) break; /* no more pairs */
r = urn() * (qm[ii-(k-1)] + expMLbase[k-i]);
if (expMLbase[k-i] >= r) break; /* no more pairs */
j = k-1;
}
}
}
PUBLIC void compute_probabilities(double FAB, double FA,double FB,
struct plist *prAB,
struct plist *prA, struct plist *prB,
int Alength) {
/*computes binding probabilities and dimer free energies*/
int i, j;
double pAB;
double mykT;
struct plist *lp1, *lp2;
int offset;
mykT=pf_params->kT/1000.;
/* pair probabilities in pr are relative to the null model (without DuplexInit) */
/*Compute probabilities pAB, pAA, pBB*/
pAB=1.-exp((1/mykT)*(FAB-FA-FB));
/* compute pair probabilities given that it is a dimer */
/* AB dimer */
offset=0;
lp2=prA;
if (pAB>0)
for (lp1=prAB; lp1->j>0; lp1++) {
float pp=0;
i=lp1->i; j=lp1->j;
while (offset+lp2->i < i && lp2->i>0) lp2++;
if (offset+lp2->i == i)
while ((offset+lp2->j) < j && (lp2->j>0)) lp2++;
if (lp2->j == 0) {lp2=prB; offset=Alength;}/* jump to next list */
if ((offset+lp2->i==i) && (offset+lp2->j ==j)) {
pp = lp2->p;
lp2++;
}
lp1->p=(lp1->p-(1-pAB)*pp)/pAB;
if(lp1->p < 0.){
warn_user("part_func_co: numeric instability detected, probability below zero!");
lp1->p = 0.;
}
}
return;
}
PRIVATE double *Newton_Conc(double KAB, double KAA, double KBB, double concA, double concB,double* ConcVec) {
double TOL, EPS, xn, yn, det, cA, cB;
int i=0;
/*Newton iteration for computing concentrations*/
cA=concA;
cB=concB;
TOL=1e-6; /*Tolerance for convergence*/
ConcVec=(double*)space(5*sizeof(double)); /* holds concentrations */
do {
/* det = (4.0 * KAA * cA + KAB *cB + 1.0) * (4.0 * KBB * cB + KAB *cA + 1.0) - (KAB *cB) * (KAB *cA); */
det = 1 + 16. *KAA*KBB*cA*cB + KAB*(cA+cB) + 4.*KAA*cA + 4.*KBB*cB + 4.*KAB*(KBB*cB*cB + KAA*cA*cA);
/* xn = ( (2.0 * KBB * cB*cB + KAB *cA *cB + cB - concB) * (KAB *cA) -
(2.0 * KAA * cA*cA + KAB *cA *cB + cA - concA) * (4.0 * KBB * cB + KAB *cA + 1.0) ) /det; */
xn = ( (2.0 * KBB * cB*cB + cB - concB) * (KAB *cA) - KAB*cA*cB*(4. * KBB*cB + 1.) -
(2.0 * KAA * cA*cA + cA - concA) * (4.0 * KBB * cB + KAB *cA + 1.0) ) /det;
/* yn = ( (2.0 * KAA * cA*cA + KAB *cA *cB + cA - concA) * (KAB *cB) -
(2.0 * KBB * cB*cB + KAB *cA *cB + cB - concB) * (4.0 * KAA * cA + KAB *cB + 1.0) ) /det; */
yn = ( (2.0 * KAA * cA*cA + cA - concA) * (KAB *cB) - KAB*cA*cB*(4. * KAA*cA + 1.) -
(2.0 * KBB * cB*cB + cB - concB) * (4.0 * KAA * cA + KAB *cB + 1.0) ) /det;
EPS = fabs(xn/cA) + fabs(yn/cB);
cA += xn;
cB += yn;
i++;
if (i>10000) {
fprintf(stderr, "Newton did not converge after %d steps!!\n",i);
break;
}
} while(EPS>TOL);
ConcVec[0]= cA*cB*KAB ;/*AB concentration*/
ConcVec[1]= cA*cA*KAA ;/*AA concentration*/
ConcVec[2]= cB*cB*KBB ;/*BB concentration*/
ConcVec[3]= cA; /* A concentration*/
ConcVec[4]= cB; /* B concentration*/
return ConcVec;
}
PUBLIC struct ConcEnt *get_concentrations(double FcAB, double FcAA, double FcBB, double FEA, double FEB, double *startconc)
{
/*takes an array of start concentrations, computes equilibrium concentrations of dimers, monomers, returns array of concentrations in strucutre ConcEnt*/
double *ConcVec;
int i;
struct ConcEnt *Concentration;
double KAA, KAB, KBB, kT;
kT=pf_params->kT/1000.;
Concentration=(struct ConcEnt *)space(20*sizeof(struct ConcEnt));
/* Compute equilibrium constants */
/* again note the input free energies are not from the null model (without DuplexInit) */
KAA = exp(( 2.0 * FEA - FcAA)/kT);
KBB = exp(( 2.0 * FEB - FcBB)/kT);
KAB = exp(( FEA + FEB - FcAB)/kT);
/* printf("Kaa..%g %g %g\n", KAA, KBB, KAB); */
for (i=0; ((startconc[i]!=0)||(startconc[i+1]!=0));i+=2) {
ConcVec=Newton_Conc(KAB, KAA, KBB, startconc[i], startconc[i+1], ConcVec);
Concentration[i/2].A0=startconc[i];
Concentration[i/2].B0=startconc[i+1];
Concentration[i/2].ABc=ConcVec[0];
Concentration[i/2].AAc=ConcVec[1];
Concentration[i/2].BBc=ConcVec[2];
Concentration[i/2].Ac=ConcVec[3];
Concentration[i/2].Bc=ConcVec[4];
if (!(((i+2)/2)%20)) {
Concentration=(struct ConcEnt *)xrealloc(Concentration,((i+2)/2+20)*sizeof(struct ConcEnt));
}
free(ConcVec);
}
return Concentration;
}
PUBLIC FLT_OR_DBL *export_co_bppm(void){
return probs;
}
/*###########################################*/
/*# deprecated functions below #*/
/*###########################################*/
PUBLIC struct plist *get_plist(struct plist *pl, int length, double cut_off) {
int i, j,n, count;
/*get pair probibilities out of pr array*/
count=0;
n=2;
for (i=1; i<length; i++) {
for (j=i+1; j<=length; j++) {
if (pr[my_iindx[i]-j]<cut_off) continue;
if (count==n*length-1) {
n*=2;
pl=(struct plist *)xrealloc(pl,n*length*sizeof(struct plist));
}
pl[count].i=i;
pl[count].j=j;
pl[count++].p=pr[my_iindx[i]-j];
/* printf("gpl: %2d %2d %.9f\n",i,j,pr[my_iindx[i]-j]);*/
}
}
pl[count].i=0;
pl[count].j=0; /*->??*/
pl[count++].p=0.;
pl=(struct plist *)xrealloc(pl,(count)*sizeof(struct plist));
return pl;
}
PUBLIC void init_co_pf_fold(int length){ /* DO NOTHING */ }
|
GB_binop__div_int8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__div_int8)
// A.*B function (eWiseMult): GB (_AemultB_08__div_int8)
// A.*B function (eWiseMult): GB (_AemultB_02__div_int8)
// A.*B function (eWiseMult): GB (_AemultB_04__div_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__div_int8)
// A*D function (colscale): GB (_AxD__div_int8)
// D*A function (rowscale): GB (_DxB__div_int8)
// C+=B function (dense accum): GB (_Cdense_accumB__div_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__div_int8)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__div_int8)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__div_int8)
// C=scalar+B GB (_bind1st__div_int8)
// C=scalar+B' GB (_bind1st_tran__div_int8)
// C=A+scalar GB (_bind2nd__div_int8)
// C=A'+scalar GB (_bind2nd_tran__div_int8)
// C type: int8_t
// A type: int8_t
// B,b type: int8_t
// BinaryOp: cij = GB_IDIV_SIGNED (aij, bij, 8)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int8_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_IDIV_SIGNED (x, y, 8) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_DIV || GxB_NO_INT8 || GxB_NO_DIV_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__div_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__div_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__div_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__div_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__div_int8)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__div_int8)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__div_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__div_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__div_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__div_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__div_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__div_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_IDIV_SIGNED (x, bij, 8) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__div_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_IDIV_SIGNED (aij, y, 8) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IDIV_SIGNED (x, aij, 8) ; \
}
GrB_Info GB (_bind1st_tran__div_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IDIV_SIGNED (aij, y, 8) ; \
}
GrB_Info GB (_bind2nd_tran__div_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
vecadd.c | #include<stdio.h>
#include<stdlib.h>
#include "omp.h"
#define max 1000
void main()
{
int id,size;
int i,*A,*B,*C;
//allocating space dynamically
A=(int *)malloc(max*sizeof(int));
B=(int *)malloc(max*sizeof(int));
C=(int *)malloc(max*sizeof(int));
for(unsigned int i=0;i<max;i++){
A[i]=rand()%100;
B[i]=rand()%100;
C[i]=0;
}
double start=omp_get_wtime();
#pragma omp parallel for default(none) shared(A,B,C)
for(i=0;i<max;i++)
{
printf("%d thread is executing row %d\n",omp_get_thread_num(),i);
C[i]=A[i]+B[i];
}
double end=omp_get_wtime();
printf("Total time required is %f",end-start);
printf("\nDone");
} |
GB_binop__pow_int16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__pow_int16)
// A.*B function (eWiseMult): GB (_AemultB_08__pow_int16)
// A.*B function (eWiseMult): GB (_AemultB_02__pow_int16)
// A.*B function (eWiseMult): GB (_AemultB_04__pow_int16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__pow_int16)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__pow_int16)
// C+=b function (dense accum): GB (_Cdense_accumb__pow_int16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pow_int16)
// C=scalar+B GB (_bind1st__pow_int16)
// C=scalar+B' GB (_bind1st_tran__pow_int16)
// C=A+scalar GB (_bind2nd__pow_int16)
// C=A'+scalar GB (_bind2nd_tran__pow_int16)
// C type: int16_t
// A type: int16_t
// B,b type: int16_t
// BinaryOp: cij = GB_pow_int16 (aij, bij)
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
int16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int16_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int16_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_pow_int16 (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_POW || GxB_NO_INT16 || GxB_NO_POW_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__pow_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__pow_int16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__pow_int16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__pow_int16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__pow_int16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__pow_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__pow_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__pow_int16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__pow_int16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *Cx = (int16_t *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_pow_int16 (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__pow_int16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int16_t *Cx = (int16_t *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_pow_int16 (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_pow_int16 (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__pow_int16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_pow_int16 (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__pow_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
nbnxn_kernel_simd_2xnn.c | /*
* This file is part of the GROMACS molecular simulation package.
*
* Copyright (c) 2012,2013, by the GROMACS development team, led by
* David van der Spoel, Berk Hess, Erik Lindahl, and including many
* others, as listed in the AUTHORS file in the top-level source
* directory and at http://www.gromacs.org.
*
* GROMACS is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public License
* as published by the Free Software Foundation; either version 2.1
* of the License, or (at your option) any later version.
*
* GROMACS is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with GROMACS; if not, see
* http://www.gnu.org/licenses, or write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
* If you want to redistribute modifications to GROMACS, please
* consider that scientific software is very special. Version
* control is crucial - bugs must be traceable. We will be happy to
* consider code for inclusion in the official distribution, but
* derived work must not be called official GROMACS. Details are found
* in the README & COPYING files - if they are missing, get the
* official version at http://www.gromacs.org.
*
* To help us fund GROMACS development, we humbly ask that you cite
* the research papers on the package. Check out http://www.gromacs.org.
*/
/*
* Note: this file was generated by the Verlet kernel generator for
* kernel type 2xnn.
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#include "typedefs.h"
#ifdef GMX_NBNXN_SIMD_2XNN
/* Include the full-width SIMD macros */
#include "gmx_simd_macros.h"
#include "gmx_simd_vec.h"
#if !(GMX_SIMD_WIDTH_HERE == 8 || GMX_SIMD_WIDTH_HERE == 16)
#error "unsupported SIMD width"
#endif
#define GMX_SIMD_J_UNROLL_SIZE 2
#include "nbnxn_kernel_simd_2xnn.h"
#include "../nbnxn_kernel_common.h"
#include "gmx_omp_nthreads.h"
#include "types/force_flags.h"
/*! \brief Kinds of electrostatic treatments in SIMD Verlet kernels
*/
enum {
coultRF, coultTAB, coultTAB_TWIN, coultEWALD, coultEWALD_TWIN, coultNR
};
/* Declare and define the kernel function pointer lookup tables. */
static p_nbk_func_ener p_nbk_ener[coultNR][ljcrNR] =
{
{
nbnxn_kernel_simd_2xnn_rf_comb_geom_ener,
nbnxn_kernel_simd_2xnn_rf_comb_lb_ener,
nbnxn_kernel_simd_2xnn_rf_comb_none_ener,
},
{
nbnxn_kernel_simd_2xnn_tab_comb_geom_ener,
nbnxn_kernel_simd_2xnn_tab_comb_lb_ener,
nbnxn_kernel_simd_2xnn_tab_comb_none_ener,
},
{
nbnxn_kernel_simd_2xnn_tab_twin_comb_geom_ener,
nbnxn_kernel_simd_2xnn_tab_twin_comb_lb_ener,
nbnxn_kernel_simd_2xnn_tab_twin_comb_none_ener,
},
{
nbnxn_kernel_simd_2xnn_ewald_comb_geom_ener,
nbnxn_kernel_simd_2xnn_ewald_comb_lb_ener,
nbnxn_kernel_simd_2xnn_ewald_comb_none_ener,
},
{
nbnxn_kernel_simd_2xnn_ewald_twin_comb_geom_ener,
nbnxn_kernel_simd_2xnn_ewald_twin_comb_lb_ener,
nbnxn_kernel_simd_2xnn_ewald_twin_comb_none_ener,
},
};
static p_nbk_func_ener p_nbk_energrp[coultNR][ljcrNR] =
{
{
nbnxn_kernel_simd_2xnn_rf_comb_geom_energrp,
nbnxn_kernel_simd_2xnn_rf_comb_lb_energrp,
nbnxn_kernel_simd_2xnn_rf_comb_none_energrp,
},
{
nbnxn_kernel_simd_2xnn_tab_comb_geom_energrp,
nbnxn_kernel_simd_2xnn_tab_comb_lb_energrp,
nbnxn_kernel_simd_2xnn_tab_comb_none_energrp,
},
{
nbnxn_kernel_simd_2xnn_tab_twin_comb_geom_energrp,
nbnxn_kernel_simd_2xnn_tab_twin_comb_lb_energrp,
nbnxn_kernel_simd_2xnn_tab_twin_comb_none_energrp,
},
{
nbnxn_kernel_simd_2xnn_ewald_comb_geom_energrp,
nbnxn_kernel_simd_2xnn_ewald_comb_lb_energrp,
nbnxn_kernel_simd_2xnn_ewald_comb_none_energrp,
},
{
nbnxn_kernel_simd_2xnn_ewald_twin_comb_geom_energrp,
nbnxn_kernel_simd_2xnn_ewald_twin_comb_lb_energrp,
nbnxn_kernel_simd_2xnn_ewald_twin_comb_none_energrp,
},
};
static p_nbk_func_noener p_nbk_noener[coultNR][ljcrNR] =
{
{
nbnxn_kernel_simd_2xnn_rf_comb_geom_noener,
nbnxn_kernel_simd_2xnn_rf_comb_lb_noener,
nbnxn_kernel_simd_2xnn_rf_comb_none_noener,
},
{
nbnxn_kernel_simd_2xnn_tab_comb_geom_noener,
nbnxn_kernel_simd_2xnn_tab_comb_lb_noener,
nbnxn_kernel_simd_2xnn_tab_comb_none_noener,
},
{
nbnxn_kernel_simd_2xnn_tab_twin_comb_geom_noener,
nbnxn_kernel_simd_2xnn_tab_twin_comb_lb_noener,
nbnxn_kernel_simd_2xnn_tab_twin_comb_none_noener,
},
{
nbnxn_kernel_simd_2xnn_ewald_comb_geom_noener,
nbnxn_kernel_simd_2xnn_ewald_comb_lb_noener,
nbnxn_kernel_simd_2xnn_ewald_comb_none_noener,
},
{
nbnxn_kernel_simd_2xnn_ewald_twin_comb_geom_noener,
nbnxn_kernel_simd_2xnn_ewald_twin_comb_lb_noener,
nbnxn_kernel_simd_2xnn_ewald_twin_comb_none_noener,
},
};
static void
reduce_group_energies(int ng, int ng_2log,
const real *VSvdw, const real *VSc,
real *Vvdw, real *Vc)
{
const int unrollj = GMX_SIMD_WIDTH_HERE/GMX_SIMD_J_UNROLL_SIZE;
const int unrollj_half = unrollj/2;
int ng_p2, i, j, j0, j1, c, s;
ng_p2 = (1<<ng_2log);
/* The size of the x86 SIMD energy group buffer array is:
* ng*ng*ng_p2*unrollj_half*simd_width
*/
for (i = 0; i < ng; i++)
{
for (j = 0; j < ng; j++)
{
Vvdw[i*ng+j] = 0;
Vc[i*ng+j] = 0;
}
for (j1 = 0; j1 < ng; j1++)
{
for (j0 = 0; j0 < ng; j0++)
{
c = ((i*ng + j1)*ng_p2 + j0)*unrollj_half*unrollj;
for (s = 0; s < unrollj_half; s++)
{
Vvdw[i*ng+j0] += VSvdw[c+0];
Vvdw[i*ng+j1] += VSvdw[c+1];
Vc [i*ng+j0] += VSc [c+0];
Vc [i*ng+j1] += VSc [c+1];
c += unrollj + 2;
}
}
}
}
}
#else /* GMX_NBNXN_SIMD_2XNN */
#include "gmx_fatal.h"
#endif /* GMX_NBNXN_SIMD_2XNN */
void
nbnxn_kernel_simd_2xnn(nbnxn_pairlist_set_t *nbl_list,
const nbnxn_atomdata_t *nbat,
const interaction_const_t *ic,
int ewald_excl,
rvec *shift_vec,
int force_flags,
int clearF,
real *fshift,
real *Vc,
real *Vvdw)
#ifdef GMX_NBNXN_SIMD_2XNN
{
int nnbl;
nbnxn_pairlist_t **nbl;
int coult;
int nb;
nnbl = nbl_list->nnbl;
nbl = nbl_list->nbl;
if (EEL_RF(ic->eeltype) || ic->eeltype == eelCUT)
{
coult = coultRF;
}
else
{
if (ewald_excl == ewaldexclTable)
{
if (ic->rcoulomb == ic->rvdw)
{
coult = coultTAB;
}
else
{
coult = coultTAB_TWIN;
}
}
else
{
if (ic->rcoulomb == ic->rvdw)
{
coult = coultEWALD;
}
else
{
coult = coultEWALD_TWIN;
}
}
}
#pragma omp parallel for schedule(static) num_threads(gmx_omp_nthreads_get(emntNonbonded))
for (nb = 0; nb < nnbl; nb++)
{
nbnxn_atomdata_output_t *out;
real *fshift_p;
out = &nbat->out[nb];
if (clearF == enbvClearFYes)
{
clear_f(nbat, nb, out->f);
}
if ((force_flags & GMX_FORCE_VIRIAL) && nnbl == 1)
{
fshift_p = fshift;
}
else
{
fshift_p = out->fshift;
if (clearF == enbvClearFYes)
{
clear_fshift(fshift_p);
}
}
if (!(force_flags & GMX_FORCE_ENERGY))
{
/* Don't calculate energies */
p_nbk_noener[coult][nbat->comb_rule](nbl[nb], nbat,
ic,
shift_vec,
out->f,
fshift_p);
}
else if (out->nV == 1)
{
/* No energy groups */
out->Vvdw[0] = 0;
out->Vc[0] = 0;
p_nbk_ener[coult][nbat->comb_rule](nbl[nb], nbat,
ic,
shift_vec,
out->f,
fshift_p,
out->Vvdw,
out->Vc);
}
else
{
/* Calculate energy group contributions */
int i;
for (i = 0; i < out->nVS; i++)
{
out->VSvdw[i] = 0;
}
for (i = 0; i < out->nVS; i++)
{
out->VSc[i] = 0;
}
p_nbk_energrp[coult][nbat->comb_rule](nbl[nb], nbat,
ic,
shift_vec,
out->f,
fshift_p,
out->VSvdw,
out->VSc);
reduce_group_energies(nbat->nenergrp, nbat->neg_2log,
out->VSvdw, out->VSc,
out->Vvdw, out->Vc);
}
}
if (force_flags & GMX_FORCE_ENERGY)
{
reduce_energies_over_lists(nbat, nnbl, Vvdw, Vc);
}
}
#else
{
gmx_incons("nbnxn_kernel_simd_2xnn called when such kernels "
" are not enabled.");
}
#endif
#undef GMX_SIMD_J_UNROLL_SIZE
|
GB_unop__identity_int32_uint64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_int32_uint64)
// op(A') function: GB (_unop_tran__identity_int32_uint64)
// C type: int32_t
// A type: uint64_t
// cast: int32_t cij = (int32_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int32_t z = (int32_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int32_t z = (int32_t) aij ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT32 || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_int32_uint64)
(
int32_t *Cx, // Cx and Ax may be aliased
const uint64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (uint64_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint64_t aij = Ax [p] ;
int32_t z = (int32_t) aij ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint64_t aij = Ax [p] ;
int32_t z = (int32_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_int32_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__rdiv_uint8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__rdiv_uint8)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__rdiv_uint8)
// A.*B function (eWiseMult): GB (_AemultB_03__rdiv_uint8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__rdiv_uint8)
// A*D function (colscale): GB (_AxD__rdiv_uint8)
// D*A function (rowscale): GB (_DxB__rdiv_uint8)
// C+=B function (dense accum): GB (_Cdense_accumB__rdiv_uint8)
// C+=b function (dense accum): GB (_Cdense_accumb__rdiv_uint8)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rdiv_uint8)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rdiv_uint8)
// C=scalar+B GB (_bind1st__rdiv_uint8)
// C=scalar+B' GB (_bind1st_tran__rdiv_uint8)
// C=A+scalar GB (_bind2nd__rdiv_uint8)
// C=A'+scalar GB (_bind2nd_tran__rdiv_uint8)
// C type: uint8_t
// A type: uint8_t
// B,b type: uint8_t
// BinaryOp: cij = GB_IDIV_UNSIGNED (bij, aij, 8)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = GB_IDIV_UNSIGNED (y, x, 8) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_RDIV || GxB_NO_UINT8 || GxB_NO_RDIV_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__rdiv_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__rdiv_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__rdiv_uint8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__rdiv_uint8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__rdiv_uint8)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__rdiv_uint8)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__rdiv_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__rdiv_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__rdiv_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__rdiv_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__rdiv_uint8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__rdiv_uint8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint8_t bij = Bx [p] ;
Cx [p] = GB_IDIV_UNSIGNED (bij, x, 8) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__rdiv_uint8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = Ax [p] ;
Cx [p] = GB_IDIV_UNSIGNED (y, aij, 8) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = Ax [pA] ; \
Cx [pC] = GB_IDIV_UNSIGNED (aij, x, 8) ; \
}
GrB_Info GB (_bind1st_tran__rdiv_uint8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = Ax [pA] ; \
Cx [pC] = GB_IDIV_UNSIGNED (y, aij, 8) ; \
}
GrB_Info GB (_bind2nd_tran__rdiv_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__plus_fc64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__plus_fc64)
// A.*B function (eWiseMult): GB (_AemultB_08__plus_fc64)
// A.*B function (eWiseMult): GB (_AemultB_02__plus_fc64)
// A.*B function (eWiseMult): GB (_AemultB_04__plus_fc64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__plus_fc64)
// A*D function (colscale): GB (_AxD__plus_fc64)
// D*A function (rowscale): GB (_DxB__plus_fc64)
// C+=B function (dense accum): GB (_Cdense_accumB__plus_fc64)
// C+=b function (dense accum): GB (_Cdense_accumb__plus_fc64)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__plus_fc64)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__plus_fc64)
// C=scalar+B GB (_bind1st__plus_fc64)
// C=scalar+B' GB (_bind1st_tran__plus_fc64)
// C=A+scalar GB (_bind2nd__plus_fc64)
// C=A'+scalar GB (_bind2nd_tran__plus_fc64)
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// A pattern? 0
// B type: GxB_FC64_t
// B pattern? 0
// BinaryOp: cij = GB_FC64_add (aij, bij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_BTYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
GxB_FC64_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
GxB_FC64_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
GxB_FC64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_FC64_add (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_PLUS || GxB_NO_FC64 || GxB_NO_PLUS_FC64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__plus_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__plus_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__plus_fc64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__plus_fc64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type GxB_FC64_t
GxB_FC64_t bwork = (*((GxB_FC64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__plus_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__plus_fc64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__plus_fc64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
GxB_FC64_t alpha_scalar ;
GxB_FC64_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((GxB_FC64_t *) alpha_scalar_in)) ;
beta_scalar = (*((GxB_FC64_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__plus_fc64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__plus_fc64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__plus_fc64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__plus_fc64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__plus_fc64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ;
GxB_FC64_t x = (*((GxB_FC64_t *) x_input)) ;
GxB_FC64_t *Bx = (GxB_FC64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
GxB_FC64_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_FC64_add (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__plus_fc64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ;
GxB_FC64_t *Ax = (GxB_FC64_t *) Ax_input ;
GxB_FC64_t y = (*((GxB_FC64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
GxB_FC64_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_FC64_add (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_FC64_add (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__plus_fc64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t x = (*((const GxB_FC64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_FC64_add (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__plus_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t y = (*((const GxB_FC64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__rdiv_fc64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__rdiv_fc64)
// A.*B function (eWiseMult): GB (_AemultB_08__rdiv_fc64)
// A.*B function (eWiseMult): GB (_AemultB_02__rdiv_fc64)
// A.*B function (eWiseMult): GB (_AemultB_04__rdiv_fc64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__rdiv_fc64)
// A*D function (colscale): GB (_AxD__rdiv_fc64)
// D*A function (rowscale): GB (_DxB__rdiv_fc64)
// C+=B function (dense accum): GB (_Cdense_accumB__rdiv_fc64)
// C+=b function (dense accum): GB (_Cdense_accumb__rdiv_fc64)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rdiv_fc64)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rdiv_fc64)
// C=scalar+B GB (_bind1st__rdiv_fc64)
// C=scalar+B' GB (_bind1st_tran__rdiv_fc64)
// C=A+scalar GB (_bind2nd__rdiv_fc64)
// C=A'+scalar GB (_bind2nd_tran__rdiv_fc64)
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// A pattern? 0
// B type: GxB_FC64_t
// B pattern? 0
// BinaryOp: cij = GB_FC64_div (bij, aij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_BTYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
GxB_FC64_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
GxB_FC64_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
GxB_FC64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_FC64_div (y, x) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_RDIV || GxB_NO_FC64 || GxB_NO_RDIV_FC64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__rdiv_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__rdiv_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__rdiv_fc64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__rdiv_fc64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type GxB_FC64_t
GxB_FC64_t bwork = (*((GxB_FC64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__rdiv_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__rdiv_fc64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__rdiv_fc64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
GxB_FC64_t alpha_scalar ;
GxB_FC64_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((GxB_FC64_t *) alpha_scalar_in)) ;
beta_scalar = (*((GxB_FC64_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__rdiv_fc64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__rdiv_fc64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__rdiv_fc64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__rdiv_fc64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__rdiv_fc64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ;
GxB_FC64_t x = (*((GxB_FC64_t *) x_input)) ;
GxB_FC64_t *Bx = (GxB_FC64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
GxB_FC64_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_FC64_div (bij, x) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__rdiv_fc64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ;
GxB_FC64_t *Ax = (GxB_FC64_t *) Ax_input ;
GxB_FC64_t y = (*((GxB_FC64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
GxB_FC64_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_FC64_div (y, aij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_FC64_div (aij, x) ; \
}
GrB_Info GB (_bind1st_tran__rdiv_fc64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t x = (*((const GxB_FC64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_FC64_div (y, aij) ; \
}
GrB_Info GB (_bind2nd_tran__rdiv_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t y = (*((const GxB_FC64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
valid.res2.src.h | #pragma once
#include "ukr.h"
#include "omp.h"
#include "transpose.h"
#include "gen_ukr_A6B2gemm_1_64_56_56_64_3_3.h"
#include "gen_ukr_A4B2gemm_1_64_56_56_64_3_3.h"
void testrun(float* A ,float*B, float*C, float*oriB ){
int tid = omp_get_thread_num();
int Nx = 56;
int Ny = 56;
int Nh = 3;
long long Astrides[6] = {0,1,2,3,4,5};
int b1 = 0;
for (int fpck = (tid%1)*16; fpck < uNf; fpck+=1*16){
for(int cwh = (tid/1)*8; cwh < uNc*uNw*uNh/8*8; cwh+=8*1){
transpose8x8_avx(oriB+ (fpck+0)*uNc*uNw*uNh + cwh, B + fpck*uNc*uNw*uNh + cwh* 16 + 0, uNc*uNw*uNh, 16);
transpose8x8_avx(oriB+ (fpck+8)*uNc*uNw*uNh + cwh, B + fpck*uNc*uNw*uNh + cwh* 16 + 8, uNc*uNw*uNh, 16);
}
}
#pragma omp barrier// begin push button generated block
for(int xy5=0;xy5<3136+0;xy5+=3136)
{
for(int f5=0;f5<64+0;f5+=64)
{
for(int c5=0;c5<64+0;c5+=64)
{
for(int c4=c5;c4<min(64, 64+c5);c4+=32)
{
for(int f4=f5;f4<min(64, 64+f5);f4+=Tf2)
{
for(int xy4=xy5;xy4<min(3136, 3136+xy5);xy4+=3136)
{
for(int c3=c4;c3<min(64, 32+c4);c3+=Tc1)
{
for(int f3=f4;f3<min(64, Tf2+f4);f3+=Tf2)
{
for(int xy3=xy4;xy3<min(3136, 3136+xy4);xy3+=Txy3)
{
for(int xy2=xy3;xy2<min(3136, Txy3+xy3);xy2+=6)
{
for(int f2=f3;f2<min(64, Tf2+f3);f2+=16)
{
for(int c2=c3;c2<min(64, Tc1+c3);c2+=Tc1)
{
for(int c1=c2;c1<min(64, Tc1+c2);c1+=Tc1)
{
for(int xy1=xy2;xy1<min(3136, 6+xy2);xy1+=6)
{
for(int f1=f2;f1<min(64, 16+f2);f1+=16)
{
int ctile=min(Tc1, 64-c1);
int x1=xy1/56;
int y1=xy1%56/1;
int c1_1=c1/1;
int c1_2=c1%1/1;
int kf1_1=f1/16;
int kf1_2=f1%16/1;
int of1_1=f1/1;
int of1_2=f1%1/1;
int offsetA=0+b1*215296+c1_1*3364+1*x1*58+1*y1*1+c1_2*1;
int offsetB=0+kf1_1*9216+c1*144+0*48+0*16+kf1_2*1;
int offsetC=0+b1*200704+of1_1*3136+x1*56+y1*1+of1_2*1;
if(56-y1>=6){
cnn_ukr_float_scatter_6x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides);
}
else if(56*56-xy1>=6){
for(int sti=56-y1;sti<6;sti+=1)
{
Astrides[sti]+=2;
}
cnn_ukr_float_scatter_6x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides);
for(int sti=56-y1;sti<6;sti+=1)
{
Astrides[sti]-=2;
}
}
else{
cnn_ukr_float_scatter_4x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides);
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
// end push button generated block
} |
2218.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4000. */
#include "atax.h"
/* Array initialization. */
static
void init_array (int nx, int ny,
DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny),
DATA_TYPE POLYBENCH_1D(x,NY,ny))
{
int i, j;
for (i = 0; i < ny; i++)
x[i] = i * M_PI;
for (i = 0; i < nx; i++)
for (j = 0; j < ny; j++)
A[i][j] = ((DATA_TYPE) i*(j+1)) / nx;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int nx,
DATA_TYPE POLYBENCH_1D(y,NX,nx))
{
int i;
for (i = 0; i < nx; i++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, y[i]);
if (i % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_atax(int nx, int ny,
DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny),
DATA_TYPE POLYBENCH_1D(x,NY,ny),
DATA_TYPE POLYBENCH_1D(y,NY,ny),
DATA_TYPE POLYBENCH_1D(tmp,NX,nx))
{
int i, j;
#pragma scop
{
#pragma omp target teams distribute
for (i = 0; i < _PB_NY; i++)
{
y[i] = 0;
}
#pragma omp target teams distribute
for (i = 0; i < _PB_NX; i++)
{
tmp[i] = 0;
for (j = 0; j < _PB_NY; j++)
tmp[i] = tmp[i] + A[i][j] * x[j];
for (j = 0; j < _PB_NY; j++)
y[j] = y[j] + A[i][j] * tmp[i];
}
}
#pragma endscop
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int nx = NX;
int ny = NY;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NX, NY, nx, ny);
POLYBENCH_1D_ARRAY_DECL(x, DATA_TYPE, NY, ny);
POLYBENCH_1D_ARRAY_DECL(y, DATA_TYPE, NY, ny);
POLYBENCH_1D_ARRAY_DECL(tmp, DATA_TYPE, NX, nx);
/* Initialize array(s). */
init_array (nx, ny, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(x));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_atax (nx, ny,
POLYBENCH_ARRAY(A),
POLYBENCH_ARRAY(x),
POLYBENCH_ARRAY(y),
POLYBENCH_ARRAY(tmp));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(nx, POLYBENCH_ARRAY(y)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(x);
POLYBENCH_FREE_ARRAY(y);
POLYBENCH_FREE_ARRAY(tmp);
return 0;
}
|
GB_unop__abs_int64_int64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__abs_int64_int64
// op(A') function: GB_unop_tran__abs_int64_int64
// C type: int64_t
// A type: int64_t
// cast: int64_t cij = aij
// unaryop: cij = GB_IABS (aij)
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
int64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IABS (x) ;
// casting
#define GB_CAST(z, aij) \
int64_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int64_t z = aij ; \
Cx [pC] = GB_IABS (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__abs_int64_int64
(
int64_t *Cx, // Cx and Ax may be aliased
const int64_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (int64_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int64_t aij = Ax [p] ;
int64_t z = aij ;
Cx [p] = GB_IABS (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int64_t aij = Ax [p] ;
int64_t z = aij ;
Cx [p] = GB_IABS (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__abs_int64_int64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
helper.c | #include "helper.h"
#include <omp.h>
#include <math.h>
#include <assert.h>
void createMatrixScheme1(double** D, double** E, int n) {
*D = malloc(n * sizeof(double));
*E = malloc((n-1) * sizeof(double));
double diagSpacing = (100.0-1.0) / (n-1);
int i;
#pragma omp parallel for default(shared) private(i) schedule(static)
for (i = 0; i < n-1; ++i) {
(*E)[i] = -1; // off diagonal
(*D)[i] = 1.0 + i * diagSpacing;
}
(*D)[n-1] = 1.0 + (n-1) * diagSpacing; // one more diagonal element than off diagonal elements
}
void createMatrixScheme2(double **D, double **E, int n) {
*D = malloc(n * sizeof(double));
*E = malloc((n-1) * sizeof(double));
int i;
#pragma omp parallel for default(shared) private(i) schedule(static)
for (i = 0; i < n-1; ++i) {
(*E)[i] = -1; // off diagonal
(*D)[i] = 2;
}
(*D)[n-1] = 2.0; // one more diagonal element than off diagonal elements
}
double* computeZ(double* Q1l, double* Q2f, int nq1, int nq2, double theta) {
double* z = malloc((nq1+nq2) * sizeof(double));
// copy last row of Q1 into z
memcpy(z, Q1l, nq1*sizeof(double));
// multiply first row of Q2 by theta^-1
int i;
#pragma omp parallel for default(shared) private(i) schedule(static)
for(i = 0; i < nq2; ++i) {
z[nq1+i] = Q2f[i] / theta;
}
return z;
}
double* computeEigenvaluesOfScheme2(int n) {
double* L = malloc(n * sizeof(double));
int i;
#pragma omp parallel for default(shared) private(i) schedule(static)
for (i = 0; i < n; ++i)
L[i] = 2 + 2 * cos((M_PI*(i+1))/(n+1));
return L;
}
void printVector(double* vec, int n) {
int i = 0;
for (i = 0; i < n-1; ++i)
printf("%g, ", vec[i]);
printf("%g\n", vec[n-1]);
}
void printTridiagonalMatrix(double* D, double* E, int n) {
assert(n>0);
if (n == 1)
printf("%g\n", D[0]);
else if (n == 2) {
printf("%g\t%g\n", D[0], E[0]);
printf("%g\t%g\n", E[0], D[1]);
}
else {
int i = 0;
printf("0\t%g\t%g\n", D[0], E[0]);
for (i = 1; i < n-1; ++i)
printf("%g\t%g\t%g\n", E[i-1], D[i], E[i]);
printf("%g\t%g\t0\n", E[n-2], D[n-1]);
}
}
void printMatrix(double* M, int r, int c) {
int i;
for (i = 0; i < r; ++i) {
printVector(M+i*c,c);
}
}
int compareDiagElem( const void* a, const void* b)
{
DiagElem e1 = * ( (DiagElem*) a );
DiagElem e2 = * ( (DiagElem*) b );
if ( e1.e == e2.e ) return 0;
else if ( e1.e < e2.e ) return -1;
else return 1;
}
|
scale.h | /* Copyright (c) 2016 Drew Schmidt
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __COOP_LIB_SCALE_H__
#define __COOP_LIB_SCALE_H__
static inline void centerscalevec(const int j, const int m, double *restrict x, double *restrict colmean, double *restrict colvar)
{
const double tmp = 1. / ((double) m-1);
const int mj = m*j;
*colmean = 0;
*colvar = 0;
SAFE_FOR_SIMD
for (int i=0; i<m; i++)
{
double dt = x[i + mj] - *colmean;
*colmean += dt/((double) i+1);
*colvar += dt * (x[i + mj] - *colmean);
}
*colvar = sqrt(*colvar * tmp);
// Remove mean and variance
SAFE_FOR_SIMD
for (int i=0; i<m; i++)
x[i + mj] = (x[i + mj] - *colmean) / *colvar;
}
static inline double centervec(const int j, const int m, double *x)
{
const double div = 1. / ((double) m);
const int mj = m*j;
double colmean = 0;
// Get column mean
SAFE_FOR_SIMD
for (int i=0; i<m; i++)
colmean += x[i + mj] * div;
// Remove mean from column
SAFE_FOR_SIMD
for (int i=0; i<m; i++)
x[i + mj] -= colmean;
return colmean;
}
static inline double scalevec(const int j, const int m, double *x)
{
const double div = 1./((double) m-1);
const int mj = m*j;
double colvar = 0;
// Get column variance
SAFE_FOR_SIMD
for (int i=0; i<m; i++)
{
double tmp = x[i + mj];
colvar += tmp*tmp*div;
}
colvar = sqrt(colvar);
// Remove variance from column
SAFE_FOR_SIMD
for (int i=0; i<m; i++)
x[i + mj] /= colvar;
return colvar;
}
static inline int scale_nostore(const bool centerx, const bool scalex, const int m, const int n, double *restrict x)
{
if (m == 0 || n == 0)
return COOP_OK;
// Doing both at once, if needed, is more performant
if (centerx && scalex)
{
double colmean;
double colvar;
#pragma omp parallel for shared(x) if (m*n > OMP_MIN_SIZE)
for (int j=0; j<n; j++)
centerscalevec(j, m, x, &colmean, &colvar);
}
else if (centerx)
{
#pragma omp parallel for shared(x) if (m*n > OMP_MIN_SIZE)
for (int j=0; j<n; j++)
centervec(j, m, x);
}
else if (scalex) // RMSE
{
#pragma omp parallel for shared(x) if (m*n > OMP_MIN_SIZE)
for (int j=0; j<n; j++)
scalevec(j, m, x);
}
return COOP_OK;
}
#endif
|
convolution_3x3.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float* kernel = _kernel;
const float* bias = _bias;
int nn_outch = outch >> 1;
int remain_outch_start = nn_outch << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 2;
Mat out0 = top_blob.channel(p);
Mat out1 = top_blob.channel(p + 1);
const float bias0 = bias ? bias[p] : 0.f;
const float bias1 = bias ? bias[p + 1] : 0.f;
out0.fill(bias0);
out1.fill(bias1);
const float* k0 = kernel + p * inch * 9;
const float* k1 = kernel + (p + 1) * inch * 9;
for (int q = 0; q < inch; q++)
{
float* outptr0 = out0;
float* outptr1 = out1;
float* outptr0n = outptr0 + outw;
float* outptr1n = outptr1 + outw;
const float* img0 = bottom_blob.channel(q);
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w * 2;
const float* r3 = img0 + w * 3;
#if __ARM_NEON
float32x4_t _k00 = vld1q_f32(k0);
float32x4_t _k03 = vld1q_f32(k0 + 3);
float32x4_t _k06 = vld1q_f32(k0 + 6);
float32x4_t _k10 = vld1q_f32(k1);
float32x4_t _k13 = vld1q_f32(k1 + 3);
float32x4_t _k16 = vld1q_f32(k1 + 6);
#endif // __ARM_NEON
int i = 0;
for (; i + 1 < outh; i += 2)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw & 3;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v8.4s, v9.4s}, [%5] \n" // r0
"add %5, %5, #16 \n"
"prfm pldl1keep, [%8, #256] \n"
"ld1 {v14.4s, v15.4s}, [%8] \n" // r3
"add %8, %8, #16 \n"
"ext v10.16b, v8.16b, v9.16b, #4 \n"
"ext v11.16b, v14.16b, v15.16b, #8 \n"
"0: \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v6.4s}, [%1] \n" // _sum0
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v7.4s}, [%2] \n" // _sum1
"fmla v6.4s, v8.4s, %18.s[0] \n"
"fmla v7.4s, v8.4s, %21.s[0] \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v12.4s}, [%3] \n" // _sum0n
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v13.4s}, [%4] \n" // _sum1n
"fmla v12.4s, v14.4s, %20.s[0] \n"
"fmla v13.4s, v14.4s, %23.s[0] \n"
"ext v8.16b, v8.16b, v9.16b, #8 \n"
"ext v9.16b, v14.16b, v15.16b, #4 \n"
"fmla v6.4s, v10.4s, %18.s[1] \n"
"fmla v7.4s, v10.4s, %21.s[1] \n"
"fmla v12.4s, v11.4s, %20.s[2] \n"
"fmla v13.4s, v11.4s, %23.s[2] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v14.4s, v15.4s}, [%6] \n" // r1
"add %6, %6, #16 \n"
"fmla v6.4s, v8.4s, %18.s[2] \n"
"fmla v7.4s, v8.4s, %21.s[2] \n"
"fmla v12.4s, v9.4s, %20.s[1] \n"
"fmla v13.4s, v9.4s, %23.s[1] \n"
"ext v10.16b, v14.16b, v15.16b, #4 \n"
"fmla v6.4s, v14.4s, %19.s[0] \n"
"fmla v7.4s, v14.4s, %22.s[0] \n"
"fmla v12.4s, v14.4s, %18.s[0] \n"
"fmla v13.4s, v14.4s, %21.s[0] \n"
"ext v11.16b, v14.16b, v15.16b, #8 \n"
"fmla v6.4s, v10.4s, %19.s[1] \n"
"fmla v7.4s, v10.4s, %22.s[1] \n"
"fmla v12.4s, v10.4s, %18.s[1] \n"
"fmla v13.4s, v10.4s, %21.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v8.4s, v9.4s}, [%7] \n" // r2
"add %7, %7, #16 \n"
"fmla v6.4s, v11.4s, %19.s[2] \n"
"fmla v7.4s, v11.4s, %22.s[2] \n"
"fmla v12.4s, v11.4s, %18.s[2] \n"
"fmla v13.4s, v11.4s, %21.s[2] \n"
"ext v10.16b, v8.16b, v9.16b, #4 \n"
"fmla v6.4s, v8.4s, %20.s[0] \n"
"fmla v7.4s, v8.4s, %23.s[0] \n"
"fmla v12.4s, v8.4s, %19.s[0] \n"
"fmla v13.4s, v8.4s, %22.s[0] \n"
"ext v11.16b, v8.16b, v9.16b, #8 \n"
"fmla v6.4s, v10.4s, %20.s[1] \n"
"fmla v7.4s, v10.4s, %23.s[1] \n"
"fmla v12.4s, v10.4s, %19.s[1] \n"
"fmla v13.4s, v10.4s, %22.s[1] \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v8.4s, v9.4s}, [%5] \n" // r0
"add %5, %5, #16 \n"
"fmla v6.4s, v11.4s, %20.s[2] \n"
"fmla v7.4s, v11.4s, %23.s[2] \n"
"fmla v12.4s, v11.4s, %19.s[2] \n"
"fmla v13.4s, v11.4s, %22.s[2] \n"
"prfm pldl1keep, [%8, #256] \n"
"ld1 {v14.4s, v15.4s}, [%8] \n" // r3
"add %8, %8, #16 \n"
"ext v10.16b, v8.16b, v9.16b, #4 \n"
"st1 {v6.4s}, [%1], #16 \n"
"st1 {v7.4s}, [%2], #16 \n"
"ext v11.16b, v14.16b, v15.16b, #8 \n"
"st1 {v12.4s}, [%3], #16 \n"
"st1 {v13.4s}, [%4], #16 \n"
"subs %w0, %w0, #1 \n"
"bne 0b \n"
"sub %5, %5, #16 \n"
"sub %8, %8, #16 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr0n), // %3
"=r"(outptr1n), // %4
"=r"(r0), // %5
"=r"(r1), // %6
"=r"(r2), // %7
"=r"(r3) // %8
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr0n),
"4"(outptr1n),
"5"(r0),
"6"(r1),
"7"(r2),
"8"(r3),
"w"(_k00), // %18
"w"(_k03), // %19
"w"(_k06), // %20
"w"(_k10), // %21
"w"(_k13), // %22
"w"(_k16) // %23
: "cc", "memory", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15");
}
#else
if (nn > 0)
{
asm volatile(
"pld [%5, #192] \n"
"vld1.f32 {d16-d18}, [%5 :64] \n" // r0
"add %5, #16 \n"
"pld [%8, #192] \n"
"vld1.f32 {d28-d30}, [%8] \n" // r3
"add %8, #16 \n"
"vext.32 q10, q8, q9, #1 \n"
"vext.32 q11, q14, q15, #2 \n"
"0: \n"
"pld [%1, #128] \n"
"vld1.f32 {d12-d13}, [%1 :64] \n" // _sum0
"pld [%2, #128] \n"
"vld1.f32 {d14-d15}, [%2 :64] \n" // _sum1
"vmla.f32 q6, q8, %e18[0] \n"
"vmla.f32 q7, q8, %e21[0] \n"
"pld [%3, #128] \n"
"vld1.f32 {d24-d25}, [%3] \n" // _sum0n
"pld [%4, #128] \n"
"vld1.f32 {d26-d27}, [%4] \n" // _sum1n
"vmla.f32 q12, q14, %e20[0] \n"
"vmla.f32 q13, q14, %e23[0] \n"
"vext.32 q8, q8, q9, #2 \n"
"vext.32 q9, q14, q15, #1 \n"
"vmla.f32 q6, q10, %e18[1] \n"
"vmla.f32 q7, q10, %e21[1] \n"
"vmla.f32 q12, q11, %f20[0] \n"
"vmla.f32 q13, q11, %f23[0] \n"
"pld [%6, #192] \n"
"vld1.f32 {d28-d30}, [%6] \n" // r1
"add %6, #16 \n"
"vmla.f32 q6, q8, %f18[0] \n"
"vmla.f32 q7, q8, %f21[0] \n"
"vmla.f32 q12, q9, %e20[1] \n"
"vmla.f32 q13, q9, %e23[1] \n"
"vext.32 q10, q14, q15, #1 \n"
"vmla.f32 q6, q14, %e19[0] \n"
"vmla.f32 q7, q14, %e22[0] \n"
"vmla.f32 q12, q14, %e18[0] \n"
"vmla.f32 q13, q14, %e21[0] \n"
"vext.32 q11, q14, q15, #2 \n"
"vmla.f32 q6, q10, %e19[1] \n"
"vmla.f32 q7, q10, %e22[1] \n"
"vmla.f32 q12, q10, %e18[1] \n"
"vmla.f32 q13, q10, %e21[1] \n"
"pld [%7, #192] \n"
"vld1.f32 {d16-d18}, [%7 :64] \n" // r2
"add %7, #16 \n"
"vmla.f32 q6, q11, %f19[0] \n"
"vmla.f32 q7, q11, %f22[0] \n"
"vmla.f32 q12, q11, %f18[0] \n"
"vmla.f32 q13, q11, %f21[0] \n"
"vext.32 q10, q8, q9, #1 \n"
"vmla.f32 q6, q8, %e20[0] \n"
"vmla.f32 q7, q8, %e23[0] \n"
"vmla.f32 q12, q8, %e19[0] \n"
"vmla.f32 q13, q8, %e22[0] \n"
"vext.32 q11, q8, q9, #2 \n"
"vmla.f32 q6, q10, %e20[1] \n"
"vmla.f32 q7, q10, %e23[1] \n"
"vmla.f32 q12, q10, %e19[1] \n"
"vmla.f32 q13, q10, %e22[1] \n"
"pld [%5, #192] \n"
"vld1.f32 {d16-d18}, [%5 :64] \n" // r0
"add %5, #16 \n"
"vmla.f32 q6, q11, %f20[0] \n"
"vmla.f32 q7, q11, %f23[0] \n"
"vmla.f32 q12, q11, %f19[0] \n"
"vmla.f32 q13, q11, %f22[0] \n"
"pld [%8, #192] \n"
"vld1.f32 {d28-d30}, [%8] \n" // r3
"add %8, #16 \n"
"vext.32 q10, q8, q9, #1 \n"
"vst1.f32 {d12-d13}, [%1 : 64]!\n"
"vst1.f32 {d14-d15}, [%2 : 64]!\n"
"vext.32 q11, q14, q15, #2 \n"
"vst1.f32 {d24-d25}, [%3]! \n"
"vst1.f32 {d26-d27}, [%4]! \n"
"subs %0, #1 \n"
"bne 0b \n"
"sub %5, #16 \n"
"sub %8, #16 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr0n), // %3
"=r"(outptr1n), // %4
"=r"(r0), // %5
"=r"(r1), // %6
"=r"(r2), // %7
"=r"(r3) // %8
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr0n),
"4"(outptr1n),
"5"(r0),
"6"(r1),
"7"(r2),
"8"(r3),
"w"(_k00), // %18
"w"(_k03), // %19
"w"(_k06), // %20
"w"(_k10), // %21
"w"(_k13), // %22
"w"(_k16) // %23
: "cc", "memory", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain > 0; remain--)
{
#if __ARM_NEON
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _r30 = vld1q_f32(r3);
float32x4_t _sum0 = vmulq_f32(_r00, _k00);
float32x4_t _sum1 = vmulq_f32(_r00, _k10);
_sum0 = vmlaq_f32(_sum0, _r10, _k03);
_sum1 = vmlaq_f32(_sum1, _r10, _k13);
_sum0 = vmlaq_f32(_sum0, _r20, _k06);
_sum1 = vmlaq_f32(_sum1, _r20, _k16);
float32x4_t _sum0n = vmulq_f32(_r10, _k00);
float32x4_t _sum1n = vmulq_f32(_r10, _k10);
_sum0n = vmlaq_f32(_sum0n, _r20, _k03);
_sum1n = vmlaq_f32(_sum1n, _r20, _k13);
_sum0n = vmlaq_f32(_sum0n, _r30, _k06);
_sum1n = vmlaq_f32(_sum1n, _r30, _k16);
_sum0 = vsetq_lane_f32(*outptr0, _sum0, 3);
_sum1 = vsetq_lane_f32(*outptr1, _sum1, 3);
_sum0n = vsetq_lane_f32(*outptr0n, _sum0n, 3);
_sum1n = vsetq_lane_f32(*outptr1n, _sum1n, 3);
#if __aarch64__
*outptr0 = vaddvq_f32(_sum0);
*outptr1 = vaddvq_f32(_sum1);
*outptr0n = vaddvq_f32(_sum0n);
*outptr1n = vaddvq_f32(_sum1n);
#else
float32x2_t _ss0 = vadd_f32(vget_low_f32(_sum0), vget_high_f32(_sum0));
float32x2_t _ss1 = vadd_f32(vget_low_f32(_sum1), vget_high_f32(_sum1));
float32x2_t _ss0n = vadd_f32(vget_low_f32(_sum0n), vget_high_f32(_sum0n));
float32x2_t _ss1n = vadd_f32(vget_low_f32(_sum1n), vget_high_f32(_sum1n));
float32x2_t _ss01 = vpadd_f32(_ss0, _ss1);
float32x2_t _ss01n = vpadd_f32(_ss0n, _ss1n);
*outptr0 = vget_lane_f32(_ss01, 0);
*outptr1 = vget_lane_f32(_ss01, 1);
*outptr0n = vget_lane_f32(_ss01n, 0);
*outptr1n = vget_lane_f32(_ss01n, 1);
#endif // __aarch64__
#else
float sum0 = 0.f;
float sum0n = 0.f;
float sum1 = 0.f;
float sum1n = 0.f;
sum0 += r0[0] * k0[0];
sum0 += r0[1] * k0[1];
sum0 += r0[2] * k0[2];
sum0 += r1[0] * k0[3];
sum0 += r1[1] * k0[4];
sum0 += r1[2] * k0[5];
sum0 += r2[0] * k0[6];
sum0 += r2[1] * k0[7];
sum0 += r2[2] * k0[8];
sum1 += r0[0] * k1[0];
sum1 += r0[1] * k1[1];
sum1 += r0[2] * k1[2];
sum1 += r1[0] * k1[3];
sum1 += r1[1] * k1[4];
sum1 += r1[2] * k1[5];
sum1 += r2[0] * k1[6];
sum1 += r2[1] * k1[7];
sum1 += r2[2] * k1[8];
sum0n += r1[0] * k0[0];
sum0n += r1[1] * k0[1];
sum0n += r1[2] * k0[2];
sum0n += r2[0] * k0[3];
sum0n += r2[1] * k0[4];
sum0n += r2[2] * k0[5];
sum0n += r3[0] * k0[6];
sum0n += r3[1] * k0[7];
sum0n += r3[2] * k0[8];
sum1n += r1[0] * k1[0];
sum1n += r1[1] * k1[1];
sum1n += r1[2] * k1[2];
sum1n += r2[0] * k1[3];
sum1n += r2[1] * k1[4];
sum1n += r2[2] * k1[5];
sum1n += r3[0] * k1[6];
sum1n += r3[1] * k1[7];
sum1n += r3[2] * k1[8];
*outptr0 += sum0;
*outptr1 += sum1;
*outptr0n += sum0n;
*outptr1n += sum1n;
#endif // __ARM_NEON
r0++;
r1++;
r2++;
r3++;
outptr0++;
outptr1++;
outptr0n++;
outptr1n++;
}
r0 += 2 + w;
r1 += 2 + w;
r2 += 2 + w;
r3 += 2 + w;
outptr0 += outw;
outptr1 += outw;
outptr0n += outw;
outptr1n += outw;
}
for (; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw & 3;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"0: \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v8.4s, v9.4s}, [%3] \n" // r0
"add %3, %3, #16 \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v6.4s}, [%1] \n" // _sum0
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v7.4s}, [%2] \n" // _sum1
"fmul v14.4s, v8.4s, %12.s[0] \n"
"fmul v15.4s, v8.4s, %15.s[0] \n"
"ext v10.16b, v8.16b, v9.16b, #4 \n"
"ext v11.16b, v8.16b, v9.16b, #8 \n"
"fmla v6.4s, v10.4s, %12.s[1] \n"
"fmla v7.4s, v10.4s, %15.s[1] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v8.4s, v9.4s}, [%4] \n" // r1
"add %4, %4, #16 \n"
"fmla v14.4s, v11.4s, %12.s[2] \n"
"fmla v15.4s, v11.4s, %15.s[2] \n"
"fmla v6.4s, v8.4s, %13.s[0] \n"
"fmla v7.4s, v8.4s, %16.s[0] \n"
"ext v10.16b, v8.16b, v9.16b, #4 \n"
"ext v11.16b, v8.16b, v9.16b, #8 \n"
"fmla v14.4s, v10.4s, %13.s[1] \n"
"fmla v15.4s, v10.4s, %16.s[1] \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v8.4s, v9.4s}, [%5] \n" // r2
"add %5, %5, #16 \n"
"fmla v6.4s, v11.4s, %13.s[2] \n"
"fmla v7.4s, v11.4s, %16.s[2] \n"
"fmla v14.4s, v8.4s, %14.s[0] \n"
"fmla v15.4s, v8.4s, %17.s[0] \n"
"ext v10.16b, v8.16b, v9.16b, #4 \n"
"ext v11.16b, v8.16b, v9.16b, #8 \n"
"fmla v6.4s, v10.4s, %14.s[1] \n"
"fmla v7.4s, v10.4s, %17.s[1] \n"
"fmla v14.4s, v11.4s, %14.s[2] \n"
"fmla v15.4s, v11.4s, %17.s[2] \n"
"fadd v6.4s, v6.4s, v14.4s \n"
"fadd v7.4s, v7.4s, v15.4s \n"
"st1 {v6.4s}, [%1], #16 \n"
"st1 {v7.4s}, [%2], #16 \n"
"subs %w0, %w0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2) // %5
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(r0),
"4"(r1),
"5"(r2),
"w"(_k00), // %12
"w"(_k03), // %13
"w"(_k06), // %14
"w"(_k10), // %15
"w"(_k13), // %16
"w"(_k16) // %17
: "cc", "memory", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15");
}
#else
if (nn > 0)
{
asm volatile(
"0: \n"
"pld [%3, #192] \n"
"vld1.f32 {d16-d18}, [%3] \n" // r0
"add %3, #16 \n"
"pld [%1, #128] \n"
"vld1.f32 {d12-d13}, [%1] \n" // _sum0
"pld [%2, #128] \n"
"vld1.f32 {d14-d15}, [%2] \n" // _sum1
"vmul.f32 q14, q8, %e12[0] \n"
"vmul.f32 q15, q8, %e15[0] \n"
"vext.32 q10, q8, q9, #1 \n"
"vext.32 q11, q8, q9, #2 \n"
"vmla.f32 q6, q10, %e12[1] \n"
"vmla.f32 q7, q10, %e15[1] \n"
"pld [%4, #192] \n"
"vld1.f32 {d16-d18}, [%4] \n" // r1
"add %4, #16 \n"
"vmla.f32 q14, q11, %f12[0] \n"
"vmla.f32 q15, q11, %f15[0] \n"
"vmla.f32 q6, q8, %e13[0] \n"
"vmla.f32 q7, q8, %e16[0] \n"
"vext.32 q10, q8, q9, #1 \n"
"vext.32 q11, q8, q9, #2 \n"
"vmla.f32 q14, q10, %e13[1] \n"
"vmla.f32 q15, q10, %e16[1] \n"
"pld [%5, #192] \n"
"vld1.f32 {d16-d18}, [%5] \n" // r2
"add %5, #16 \n"
"vmla.f32 q6, q11, %f13[0] \n"
"vmla.f32 q7, q11, %f16[0] \n"
"vmla.f32 q14, q8, %e14[0] \n"
"vmla.f32 q15, q8, %e17[0] \n"
"vext.32 q10, q8, q9, #1 \n"
"vext.32 q11, q8, q9, #2 \n"
"vmla.f32 q6, q10, %e14[1] \n"
"vmla.f32 q7, q10, %e17[1] \n"
"vmla.f32 q14, q11, %f14[0] \n"
"vmla.f32 q15, q11, %f17[0] \n"
"vadd.f32 q6, q6, q14 \n"
"vadd.f32 q7, q7, q15 \n"
"vst1.f32 {d12-d13}, [%1]! \n"
"vst1.f32 {d14-d15}, [%2]! \n"
"subs %0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2) // %5
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(r0),
"4"(r1),
"5"(r2),
"w"(_k00), // %12
"w"(_k03), // %13
"w"(_k06), // %14
"w"(_k10), // %15
"w"(_k13), // %16
"w"(_k16) // %17
: "cc", "memory", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain > 0; remain--)
{
#if __ARM_NEON
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _sum0 = vmulq_f32(_r00, _k00);
float32x4_t _sum1 = vmulq_f32(_r00, _k10);
_sum0 = vmlaq_f32(_sum0, _r10, _k03);
_sum1 = vmlaq_f32(_sum1, _r10, _k13);
_sum0 = vmlaq_f32(_sum0, _r20, _k06);
_sum1 = vmlaq_f32(_sum1, _r20, _k16);
_sum0 = vsetq_lane_f32(*outptr0, _sum0, 3);
_sum1 = vsetq_lane_f32(*outptr1, _sum1, 3);
#if __aarch64__
*outptr0 = vaddvq_f32(_sum0);
*outptr1 = vaddvq_f32(_sum1);
#else
float32x2_t _ss0 = vadd_f32(vget_low_f32(_sum0), vget_high_f32(_sum0));
float32x2_t _ss1 = vadd_f32(vget_low_f32(_sum1), vget_high_f32(_sum1));
float32x2_t _ss01 = vpadd_f32(_ss0, _ss1);
*outptr0 = vget_lane_f32(_ss01, 0);
*outptr1 = vget_lane_f32(_ss01, 1);
#endif // __aarch64__
#else
float sum0 = 0.f;
float sum1 = 0.f;
sum0 += r0[0] * k0[0];
sum0 += r0[1] * k0[1];
sum0 += r0[2] * k0[2];
sum0 += r1[0] * k0[3];
sum0 += r1[1] * k0[4];
sum0 += r1[2] * k0[5];
sum0 += r2[0] * k0[6];
sum0 += r2[1] * k0[7];
sum0 += r2[2] * k0[8];
sum1 += r0[0] * k1[0];
sum1 += r0[1] * k1[1];
sum1 += r0[2] * k1[2];
sum1 += r1[0] * k1[3];
sum1 += r1[1] * k1[4];
sum1 += r1[2] * k1[5];
sum1 += r2[0] * k1[6];
sum1 += r2[1] * k1[7];
sum1 += r2[2] * k1[8];
*outptr0 += sum0;
*outptr1 += sum1;
#endif // __ARM_NEON
r0++;
r1++;
r2++;
outptr0++;
outptr1++;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
k0 += 9;
k1 += 9;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
const float* kernel0 = kernel + p * inch * 9;
for (int q = 0; q < inch; q++)
{
float* outptr = out;
float* outptr2 = outptr + outw;
const float* img0 = bottom_blob.channel(q);
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w * 2;
const float* r3 = img0 + w * 3;
#if __ARM_NEON
float32x4_t _k0123 = vld1q_f32(kernel0);
float32x4_t _k3456 = vld1q_f32(kernel0 + 3);
float32x4_t _k6789 = vld1q_f32(kernel0 + 6);
#else
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
#endif // __ARM_NEON
int i = 0;
for (; i + 1 < outh; i += 2)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw & 3;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v9.4s, v10.4s}, [%3] \n" // r0
"add %3, %3, #16 \n"
"ext v11.16b, v9.16b, v10.16b, #4 \n"
"ext v12.16b, v9.16b, v10.16b, #8 \n"
"0: \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v7.4s}, [%1] \n" // _sum
"fmla v7.4s, v9.4s, %14.s[0] \n"
"fmul v6.4s, v11.4s, %14.s[1] \n"
"fmul v13.4s, v12.4s, %14.s[2] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v9.4s, v10.4s}, [%4] \n" // r1
"add %4, %4, #16 \n"
"fmla v7.4s, v9.4s, %15.s[0] \n"
"ext v11.16b, v9.16b, v10.16b, #4 \n"
"ext v12.16b, v9.16b, v10.16b, #8 \n"
"fmla v6.4s, v11.4s, %15.s[1] \n"
"fmla v13.4s, v12.4s, %15.s[2] \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v8.4s}, [%2] \n" // _sum2
"fmla v8.4s, v9.4s, %14.s[0] \n"
"fmul v14.4s, v11.4s, %14.s[1] \n"
"fmul v15.4s, v12.4s, %14.s[2] \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v9.4s, v10.4s}, [%5] \n" // r2
"add %5, %5, #16 \n"
"fmla v7.4s, v9.4s, %16.s[0] \n"
"ext v11.16b, v9.16b, v10.16b, #4 \n"
"ext v12.16b, v9.16b, v10.16b, #8 \n"
"fmla v6.4s, v11.4s, %16.s[1] \n"
"fmla v13.4s, v12.4s, %16.s[2] \n"
"fmla v8.4s, v9.4s, %15.s[0] \n"
"fmla v14.4s, v11.4s, %15.s[1] \n"
"fmla v15.4s, v12.4s, %15.s[2] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v9.4s, v10.4s}, [%6] \n" // r3
"add %6, %6, #16 \n"
"fmla v8.4s, v9.4s, %16.s[0] \n"
"ext v11.16b, v9.16b, v10.16b, #4 \n"
"ext v12.16b, v9.16b, v10.16b, #8 \n"
"fmla v14.4s, v11.4s, %16.s[1] \n"
"fmla v15.4s, v12.4s, %16.s[2] \n"
"fadd v7.4s, v7.4s, v6.4s \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v9.4s, v10.4s}, [%3] \n" // r0
"fadd v8.4s, v8.4s, v14.4s \n"
"fadd v7.4s, v7.4s, v13.4s \n"
"fadd v8.4s, v8.4s, v15.4s \n"
"ext v11.16b, v9.16b, v10.16b, #4 \n"
"ext v12.16b, v9.16b, v10.16b, #8 \n"
"add %3, %3, #16 \n"
"st1 {v7.4s}, [%1], #16 \n"
"st1 {v8.4s}, [%2], #16 \n"
"subs %w0, %w0, #1 \n"
"bne 0b \n"
"sub %3, %3, #16 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(outptr2), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2), // %5
"=r"(r3) // %6
: "0"(nn),
"1"(outptr),
"2"(outptr2),
"3"(r0),
"4"(r1),
"5"(r2),
"6"(r3),
"w"(_k0123), // %14
"w"(_k3456), // %15
"w"(_k6789) // %16
: "cc", "memory", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15");
}
#else
if (nn > 0)
{
asm volatile(
"pld [%3, #192] \n"
"vld1.f32 {d18-d20}, [%3 :64] \n" // r0
"add %3, #16 \n"
"vext.32 q11, q9, q10, #1 \n"
"vext.32 q12, q9, q10, #2 \n"
"0: \n"
"pld [%1, #128] \n"
"vld1.f32 {d14-d15}, [%1 :64] \n" // _sum
"vmla.f32 q7, q9, %e14[0] \n"
"vmul.f32 q6, q11, %e14[1] \n"
"vmul.f32 q13, q12, %f14[0] \n"
"pld [%4, #192] \n"
"vld1.f32 {d18-d20}, [%4] \n" // r1
"add %4, #16 \n"
"vmla.f32 q7, q9, %e15[0] \n"
"vext.32 q11, q9, q10, #1 \n"
"vext.32 q12, q9, q10, #2 \n"
"vmla.f32 q6, q11, %e15[1] \n"
"vmla.f32 q13, q12, %f15[0] \n"
"pld [%2, #128] \n"
"vld1.f32 {d16-d17}, [%2] \n" // _sum2
"vmla.f32 q8, q9, %e14[0] \n"
"vmul.f32 q14, q11, %e14[1] \n"
"vmul.f32 q15, q12, %f14[0] \n"
"pld [%5, #192] \n"
"vld1.f32 {d18-d20}, [%5 :64] \n" // r2
"add %5, #16 \n"
"vmla.f32 q7, q9, %e16[0] \n"
"vext.32 q11, q9, q10, #1 \n"
"vext.32 q12, q9, q10, #2 \n"
"vmla.f32 q6, q11, %e16[1] \n"
"vmla.f32 q13, q12, %f16[0] \n"
"vmla.f32 q8, q9, %e15[0] \n"
"vmla.f32 q14, q11, %e15[1] \n"
"vmla.f32 q15, q12, %f15[0] \n"
"pld [%6, #192] \n"
"vld1.f32 {d18-d20}, [%6] \n" // r3
"add %6, #16 \n"
"vmla.f32 q8, q9, %e16[0] \n"
"vext.32 q11, q9, q10, #1 \n"
"vext.32 q12, q9, q10, #2 \n"
"vmla.f32 q14, q11, %e16[1] \n"
"vmla.f32 q15, q12, %f16[0] \n"
"vadd.f32 q7, q7, q6 \n"
"pld [%3, #192] \n"
"vld1.f32 {d18-d20}, [%3 :64] \n" // r0
"vadd.f32 q8, q8, q14 \n"
"vadd.f32 q7, q7, q13 \n"
"vadd.f32 q8, q8, q15 \n"
"vext.32 q11, q9, q10, #1 \n"
"vext.32 q12, q9, q10, #2 \n"
"add %3, #16 \n"
"vst1.f32 {d14-d15}, [%1]! \n"
"vst1.f32 {d16-d17}, [%2]! \n"
"subs %0, #1 \n"
"bne 0b \n"
"sub %3, #16 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(outptr2), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2), // %5
"=r"(r3) // %6
: "0"(nn),
"1"(outptr),
"2"(outptr2),
"3"(r0),
"4"(r1),
"5"(r2),
"6"(r3),
"w"(_k0123), // %14
"w"(_k3456), // %15
"w"(_k6789) // %16
: "cc", "memory", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain > 0; remain--)
{
#if __ARM_NEON
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _r30 = vld1q_f32(r3);
float32x4_t _sum = vmulq_f32(_r00, _k0123);
_sum = vmlaq_f32(_sum, _r10, _k3456);
_sum = vmlaq_f32(_sum, _r20, _k6789);
float32x4_t _sum2 = vmulq_f32(_r10, _k0123);
_sum2 = vmlaq_f32(_sum2, _r20, _k3456);
_sum2 = vmlaq_f32(_sum2, _r30, _k6789);
_sum = vsetq_lane_f32(*outptr, _sum, 3);
_sum2 = vsetq_lane_f32(*outptr2, _sum2, 3);
#if __aarch64__
*outptr = vaddvq_f32(_sum);
*outptr2 = vaddvq_f32(_sum2);
#else
float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum));
float32x2_t _ss2 = vadd_f32(vget_low_f32(_sum2), vget_high_f32(_sum2));
float32x2_t _sss2 = vpadd_f32(_ss, _ss2);
*outptr = vget_lane_f32(_sss2, 0);
*outptr2 = vget_lane_f32(_sss2, 1);
#endif // __aarch64__
#else
float sum = 0;
float sum2 = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum2 += r1[0] * k0[0];
sum2 += r1[1] * k0[1];
sum2 += r1[2] * k0[2];
sum2 += r2[0] * k1[0];
sum2 += r2[1] * k1[1];
sum2 += r2[2] * k1[2];
sum2 += r3[0] * k2[0];
sum2 += r3[1] * k2[1];
sum2 += r3[2] * k2[2];
*outptr += sum;
*outptr2 += sum2;
#endif
r0++;
r1++;
r2++;
r3++;
outptr++;
outptr2++;
}
r0 += 2 + w;
r1 += 2 + w;
r2 += 2 + w;
r3 += 2 + w;
outptr += outw;
outptr2 += outw;
}
for (; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw & 3;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v8.4s, v9.4s}, [%2] \n" // r0
"add %2, %2, #16 \n"
"ext v10.16b, v8.16b, v9.16b, #4 \n"
"ext v11.16b, v8.16b, v9.16b, #8 \n"
"0: \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v7.4s}, [%1] \n" // _sum
"fmla v7.4s, v8.4s, %10.s[0] \n"
"fmul v13.4s, v10.4s, %10.s[1] \n"
"fmul v14.4s, v11.4s, %10.s[2] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v8.4s, v9.4s}, [%3] \n" // r1
"add %3, %3, #16 \n"
"fmla v7.4s, v8.4s, %11.s[0] \n"
"ext v10.16b, v8.16b, v9.16b, #4 \n"
"ext v11.16b, v8.16b, v9.16b, #8 \n"
"fmla v13.4s, v10.4s, %11.s[1] \n"
"fmla v14.4s, v11.4s, %11.s[2] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v8.4s, v9.4s}, [%4] \n" // r2
"add %4, %4, #16 \n"
"fmla v7.4s, v8.4s, %12.s[0] \n"
"ext v10.16b, v8.16b, v9.16b, #4 \n"
"ext v11.16b, v8.16b, v9.16b, #8 \n"
"fmla v13.4s, v10.4s, %12.s[1] \n"
"fmla v14.4s, v11.4s, %12.s[2] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v8.4s, v9.4s}, [%2] \n" // r0
"add %2, %2, #16 \n"
"fadd v7.4s, v7.4s, v13.4s \n"
"fadd v7.4s, v7.4s, v14.4s \n"
"ext v10.16b, v8.16b, v9.16b, #4 \n"
"ext v11.16b, v8.16b, v9.16b, #8 \n"
"st1 {v7.4s}, [%1], #16 \n"
"subs %w0, %w0, #1 \n"
"bne 0b \n"
"sub %2, %2, #16 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k0123), // %10
"w"(_k3456), // %11
"w"(_k6789) // %12
: "cc", "memory", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15");
}
#else
if (nn > 0)
{
asm volatile(
"pld [%2, #192] \n"
"vld1.f32 {d16-d18}, [%2] \n" // r0
"add %2, #16 \n"
"vext.32 q10, q8, q9, #1 \n"
"vext.32 q11, q8, q9, #2 \n"
"0: \n"
"pld [%1, #128] \n"
"vld1.f32 {d14-d15}, [%1] \n" // _sum
"vmla.f32 q7, q8, %e10[0] \n"
"vmul.f32 q13, q10, %e10[1] \n"
"vmul.f32 q14, q11, %f10[0] \n"
"pld [%3, #192] \n"
"vld1.f32 {d16-d18}, [%3] \n" // r1
"add %3, #16 \n"
"vmla.f32 q7, q8, %e11[0] \n"
"vext.32 q10, q8, q9, #1 \n"
"vext.32 q11, q8, q9, #2 \n"
"vmla.f32 q13, q10, %e11[1] \n"
"vmla.f32 q14, q11, %f11[0] \n"
"pld [%4, #192] \n"
"vld1.f32 {d16-d18}, [%4] \n" // r2
"add %4, #16 \n"
"vmla.f32 q7, q8, %e12[0] \n"
"vext.32 q10, q8, q9, #1 \n"
"vext.32 q11, q8, q9, #2 \n"
"vmla.f32 q13, q10, %e12[1] \n"
"vmla.f32 q14, q11, %f12[0] \n"
"pld [%2, #192] \n"
"vld1.f32 {d16-d18}, [%2] \n" // r0
"add %2, #16 \n"
"vadd.f32 q7, q7, q13 \n"
"vadd.f32 q7, q7, q14 \n"
"vext.32 q10, q8, q9, #1 \n"
"vext.32 q11, q8, q9, #2 \n"
"vst1.f32 {d14-d15}, [%1]! \n"
"subs %0, #1 \n"
"bne 0b \n"
"sub %2, #16 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k0123), // %10
"w"(_k3456), // %11
"w"(_k6789) // %12
: "cc", "memory", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain > 0; remain--)
{
#if __ARM_NEON
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _sum = vmulq_f32(_r00, _k0123);
_sum = vmlaq_f32(_sum, _r10, _k3456);
_sum = vmlaq_f32(_sum, _r20, _k6789);
_sum = vsetq_lane_f32(*outptr, _sum, 3);
#if __aarch64__
*outptr = vaddvq_f32(_sum);
#else
float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum));
_ss = vpadd_f32(_ss, _ss);
*outptr = vget_lane_f32(_ss, 0);
#endif // __aarch64__
#else
float sum = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
*outptr += sum;
#endif
r0++;
r1++;
r2++;
outptr++;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
kernel0 += 9;
}
}
}
static void conv3x3s1_winograd64_transform_kernel_neon(const Mat& kernel, Mat& kernel_tm, int inch, int outch, const Option& opt)
{
kernel_tm.create(8 * 8, inch, outch);
const float ktm[8][3] = {
{1.0f, 0.0f, 0.0f},
{-2.0f / 9, -2.0f / 9, -2.0f / 9},
{-2.0f / 9, 2.0f / 9, -2.0f / 9},
{1.0f / 90, 1.0f / 45, 2.0f / 45},
{1.0f / 90, -1.0f / 45, 2.0f / 45},
{1.0f / 45, 1.0f / 90, 1.0f / 180},
{1.0f / 45, -1.0f / 90, 1.0f / 180},
{0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel, transposed
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[8][3];
for (int i = 0; i < 8; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// v
for (int j = 0; j < 8; j++)
{
float* tmpp = &tmp[j][0];
for (int i = 0; i < 8; i++)
{
kernel_tm0[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
// optimized layout for winograd4
// interleave weights
int nn_outch = outch >> 2;
int remain_outch_start = nn_outch << 2;
Mat kernel_tm2(8 * 8 * inch * 4, 1, nn_outch + (outch % 4 + 3) / 4);
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 4;
float* ktm2 = kernel_tm2.channel(pp);
const Mat kernel0_tm = kernel_tm.channel(p);
const Mat kernel1_tm = kernel_tm.channel(p + 1);
const Mat kernel2_tm = kernel_tm.channel(p + 2);
const Mat kernel3_tm = kernel_tm.channel(p + 3);
int q = 0;
#if __ARM_NEON && __aarch64__
for (; q + 3 < inch; q += 4)
{
const float* k00 = kernel0_tm.row(q);
const float* k01 = kernel0_tm.row(q + 1);
const float* k02 = kernel0_tm.row(q + 2);
const float* k03 = kernel0_tm.row(q + 3);
const float* k10 = kernel1_tm.row(q);
const float* k11 = kernel1_tm.row(q + 1);
const float* k12 = kernel1_tm.row(q + 2);
const float* k13 = kernel1_tm.row(q + 3);
const float* k20 = kernel2_tm.row(q);
const float* k21 = kernel2_tm.row(q + 1);
const float* k22 = kernel2_tm.row(q + 2);
const float* k23 = kernel2_tm.row(q + 3);
const float* k30 = kernel3_tm.row(q);
const float* k31 = kernel3_tm.row(q + 1);
const float* k32 = kernel3_tm.row(q + 2);
const float* k33 = kernel3_tm.row(q + 3);
for (int r = 0; r < 16; r++)
{
// split into two asm blocks for gcc reject over 30 oprands :(
asm volatile(
"ld1 {v0.4s}, [%1], #16 \n"
"ld1 {v1.4s}, [%2], #16 \n"
"ld1 {v2.4s}, [%3], #16 \n"
"ld1 {v3.4s}, [%4], #16 \n"
"st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n"
"ld1 {v0.4s}, [%5], #16 \n"
"ld1 {v1.4s}, [%6], #16 \n"
"ld1 {v2.4s}, [%7], #16 \n"
"ld1 {v3.4s}, [%8], #16 \n"
"st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n"
: "=r"(ktm2), // %0
"=r"(k00), // %1
"=r"(k01), // %2
"=r"(k02), // %3
"=r"(k03), // %4
"=r"(k10), // %5
"=r"(k11), // %6
"=r"(k12), // %7
"=r"(k13) // %8
: "0"(ktm2),
"1"(k00),
"2"(k01),
"3"(k02),
"4"(k03),
"5"(k10),
"6"(k11),
"7"(k12),
"8"(k13)
: "cc", "memory", "v0", "v1", "v2", "v3");
asm volatile(
"ld1 {v0.4s}, [%1], #16 \n"
"ld1 {v1.4s}, [%2], #16 \n"
"ld1 {v2.4s}, [%3], #16 \n"
"ld1 {v3.4s}, [%4], #16 \n"
"st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n"
"ld1 {v0.4s}, [%5], #16 \n"
"ld1 {v1.4s}, [%6], #16 \n"
"ld1 {v2.4s}, [%7], #16 \n"
"ld1 {v3.4s}, [%8], #16 \n"
"st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n"
: "=r"(ktm2), // %0
"=r"(k20), // %1
"=r"(k21), // %2
"=r"(k22), // %3
"=r"(k23), // %4
"=r"(k30), // %5
"=r"(k31), // %6
"=r"(k32), // %7
"=r"(k33) // %8
: "0"(ktm2),
"1"(k20),
"2"(k21),
"3"(k22),
"4"(k23),
"5"(k30),
"6"(k31),
"7"(k32),
"8"(k33)
: "cc", "memory", "v0", "v1", "v2", "v3");
}
}
#endif // __ARM_NEON && __aarch64__
for (; q + 1 < inch; q += 2)
{
const float* k00 = kernel0_tm.row(q);
const float* k01 = kernel0_tm.row(q + 1);
const float* k10 = kernel1_tm.row(q);
const float* k11 = kernel1_tm.row(q + 1);
const float* k20 = kernel2_tm.row(q);
const float* k21 = kernel2_tm.row(q + 1);
const float* k30 = kernel3_tm.row(q);
const float* k31 = kernel3_tm.row(q + 1);
for (int r = 0; r < 16; r++)
{
#if __ARM_NEON
#if __aarch64__
asm volatile(
"ld1 {v0.4s}, [%1], #16 \n"
"ld1 {v1.4s}, [%2], #16 \n"
"st1 {v0.4s, v1.4s}, [%0], #32 \n"
"ld1 {v0.4s}, [%3], #16 \n"
"ld1 {v1.4s}, [%4], #16 \n"
"st1 {v0.4s, v1.4s}, [%0], #32 \n"
"ld1 {v0.4s}, [%5], #16 \n"
"ld1 {v1.4s}, [%6], #16 \n"
"st1 {v0.4s, v1.4s}, [%0], #32 \n"
"ld1 {v0.4s}, [%7], #16 \n"
"ld1 {v1.4s}, [%8], #16 \n"
"st1 {v0.4s, v1.4s}, [%0], #32 \n"
: "=r"(ktm2), // %0
"=r"(k00), // %1
"=r"(k01), // %2
"=r"(k10), // %3
"=r"(k11), // %4
"=r"(k20), // %5
"=r"(k21), // %6
"=r"(k30), // %7
"=r"(k31) // %8
: "0"(ktm2),
"1"(k00),
"2"(k01),
"3"(k10),
"4"(k11),
"5"(k20),
"6"(k21),
"7"(k30),
"8"(k31)
: "cc", "memory", "v0", "v1");
#else
asm volatile(
"vld1.f32 {d0-d1}, [%1 :128]! \n"
"vld1.f32 {d2-d3}, [%2 :128]! \n"
"vst1.f32 {d0-d3}, [%0 :128]! \n"
"vld1.f32 {d0-d1}, [%3 :128]! \n"
"vld1.f32 {d2-d3}, [%4 :128]! \n"
"vst1.f32 {d0-d3}, [%0 :128]! \n"
"vld1.f32 {d0-d1}, [%5 :128]! \n"
"vld1.f32 {d2-d3}, [%6 :128]! \n"
"vst1.f32 {d0-d3}, [%0 :128]! \n"
"vld1.f32 {d0-d1}, [%7 :128]! \n"
"vld1.f32 {d2-d3}, [%8 :128]! \n"
"vst1.f32 {d0-d3}, [%0 :128]! \n"
: "=r"(ktm2), // %0
"=r"(k00), // %1
"=r"(k01), // %2
"=r"(k10), // %3
"=r"(k11), // %4
"=r"(k20), // %5
"=r"(k21), // %6
"=r"(k30), // %7
"=r"(k31) // %8
: "0"(ktm2),
"1"(k00),
"2"(k01),
"3"(k10),
"4"(k11),
"5"(k20),
"6"(k21),
"7"(k30),
"8"(k31)
: "cc", "memory", "q0", "q1");
#endif // __aarch64__
#else
for (int m = 0; m < 4; m++)
{
ktm2[0 + m] = k00[m];
ktm2[4 + m] = k01[m];
ktm2[8 + m] = k10[m];
ktm2[12 + m] = k11[m];
ktm2[16 + m] = k20[m];
ktm2[20 + m] = k21[m];
ktm2[24 + m] = k30[m];
ktm2[28 + m] = k31[m];
}
k00 += 4;
k01 += 4;
k10 += 4;
k11 += 4;
k20 += 4;
k21 += 4;
k30 += 4;
k31 += 4;
ktm2 += 32;
#endif // __ARM_NEON
}
}
for (; q < inch; q++)
{
const float* k00 = kernel0_tm.row(q);
const float* k10 = kernel1_tm.row(q);
const float* k20 = kernel2_tm.row(q);
const float* k30 = kernel3_tm.row(q);
for (int r = 0; r < 16; r++)
{
#if __ARM_NEON
#if __aarch64__
asm volatile(
"ld1 {v0.4s}, [%1], #16 \n"
"ld1 {v1.4s}, [%2], #16 \n"
"st1 {v0.4s, v1.4s}, [%0], #32 \n"
"ld1 {v0.4s}, [%3], #16 \n"
"ld1 {v1.4s}, [%4], #16 \n"
"st1 {v0.4s, v1.4s}, [%0], #32 \n"
: "=r"(ktm2), // %0
"=r"(k00), // %1
"=r"(k10), // %2
"=r"(k20), // %3
"=r"(k30) // %4
: "0"(ktm2),
"1"(k00),
"2"(k10),
"3"(k20),
"4"(k30)
: "cc", "memory", "v0", "v1");
#else
asm volatile(
"vld1.f32 {d0-d1}, [%1 :128]! \n"
"vld1.f32 {d2-d3}, [%2 :128]! \n"
"vst1.f32 {d0-d3}, [%0 :128]! \n"
"vld1.f32 {d0-d1}, [%3 :128]! \n"
"vld1.f32 {d2-d3}, [%4 :128]! \n"
"vst1.f32 {d0-d3}, [%0 :128]! \n"
: "=r"(ktm2), // %0
"=r"(k00), // %1
"=r"(k10), // %2
"=r"(k20), // %3
"=r"(k30) // %4
: "0"(ktm2),
"1"(k00),
"2"(k10),
"3"(k20),
"4"(k30)
: "cc", "memory", "q0", "q1");
#endif // __aarch64__
#else
for (int m = 0; m < 4; m++)
{
ktm2[0 + m] = k00[m];
ktm2[4 + m] = k10[m];
ktm2[8 + m] = k20[m];
ktm2[12 + m] = k30[m];
}
k00 += 4;
k10 += 4;
k20 += 4;
k30 += 4;
ktm2 += 16;
#endif // __ARM_NEON
}
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
float* ktm2 = (float*)kernel_tm2.channel(nn_outch) + 8 * 8 * inch * (p - remain_outch_start);
const Mat kernel0_tm = kernel_tm.channel(p);
int q = 0;
for (; q < inch; q++)
{
const float* k00 = kernel0_tm.row(q);
for (int r = 0; r < 16; r++)
{
#if __ARM_NEON
#if __aarch64__
asm volatile(
"ld1 {v0.4s}, [%1], #16 \n"
"st1 {v0.4s}, [%0], #16 \n"
: "=r"(ktm2), // %0
"=r"(k00) // %1
: "0"(ktm2),
"1"(k00)
: "cc", "memory", "v0");
#else
asm volatile(
"vld1.f32 {d0-d1}, [%1 :128]! \n"
"vst1.f32 {d0-d1}, [%0 :128]! \n"
: "=r"(ktm2), // %0
"=r"(k00) // %1
: "0"(ktm2),
"1"(k00)
: "cc", "memory", "q0");
#endif // __aarch64__
#else
for (int m = 0; m < 4; m++)
{
ktm2[m] = k00[m];
}
k00 += 4;
ktm2 += 4;
#endif // __ARM_NEON
}
}
}
kernel_tm = kernel_tm2;
}
static void conv3x3s1_winograd64_transform_kernel_neon5(const Mat& kernel, Mat& kernel_tm, int inch, int outch, const Option& opt)
{
kernel_tm.create(8 * 8, inch, outch);
const float ktm[8][3] = {
{1.0f, 0.0f, 0.0f},
{-2.0f / 9, -2.0f / 9, -2.0f / 9},
{-2.0f / 9, 2.0f / 9, -2.0f / 9},
{1.0f / 90, 1.0f / 45, 2.0f / 45},
{1.0f / 90, -1.0f / 45, 2.0f / 45},
{1.0f / 45, 1.0f / 90, 1.0f / 180},
{1.0f / 45, -1.0f / 90, 1.0f / 180},
{0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel, transposed
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[8][3];
for (int i = 0; i < 8; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// v
for (int j = 0; j < 8; j++)
{
float* tmpp = &tmp[j][0];
for (int i = 0; i < 8; i++)
{
kernel_tm0[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
// optimized layout for winograd5
// interleave weights
// Mat kernel_tm2(8*8, inch, outch);
// Mat kernel_tm2(inch, 64, outch);
#if __ARM_NEON && __aarch64__
Mat kernel_tm2(8 * 4 * (inch / 4) + 8 * (inch % 4), 64, outch / 8 + (outch % 8) / 4 + outch % 4);
#else
Mat kernel_tm2(4 * 4 * (inch / 4) + 4 * (inch % 4), 64, outch / 4 + outch % 4);
#endif
int p = 0;
#if __aarch64__
for (; p + 7 < outch; p += 8)
{
const Mat kernel0_tm = kernel_tm.channel(p);
const Mat kernel1_tm = kernel_tm.channel(p + 1);
const Mat kernel2_tm = kernel_tm.channel(p + 2);
const Mat kernel3_tm = kernel_tm.channel(p + 3);
const Mat kernel4_tm = kernel_tm.channel(p + 4);
const Mat kernel5_tm = kernel_tm.channel(p + 5);
const Mat kernel6_tm = kernel_tm.channel(p + 6);
const Mat kernel7_tm = kernel_tm.channel(p + 7);
Mat ktm2 = kernel_tm2.channel(p / 8);
for (int r = 0; r < 64; r++)
{
float* ktm2p = ktm2.row(r);
for (int q = 0; q < inch; q++)
{
const float* ktm0_0 = kernel0_tm.row(q);
const float* ktm1_0 = kernel1_tm.row(q);
const float* ktm2_0 = kernel2_tm.row(q);
const float* ktm3_0 = kernel3_tm.row(q);
const float* ktm4_0 = kernel4_tm.row(q);
const float* ktm5_0 = kernel5_tm.row(q);
const float* ktm6_0 = kernel6_tm.row(q);
const float* ktm7_0 = kernel7_tm.row(q);
ktm2p[0] = ktm0_0[r];
ktm2p[1] = ktm1_0[r];
ktm2p[2] = ktm2_0[r];
ktm2p[3] = ktm3_0[r];
ktm2p[4] = ktm4_0[r];
ktm2p[5] = ktm5_0[r];
ktm2p[6] = ktm6_0[r];
ktm2p[7] = ktm7_0[r];
ktm2p += 8;
}
}
}
#endif // __aarch64__
for (; p + 3 < outch; p += 4)
{
const Mat kernel0_tm = kernel_tm.channel(p);
const Mat kernel1_tm = kernel_tm.channel(p + 1);
const Mat kernel2_tm = kernel_tm.channel(p + 2);
const Mat kernel3_tm = kernel_tm.channel(p + 3);
#if __ARM_NEON && __aarch64__
Mat ktm2 = kernel_tm2.channel(p / 8 + (p % 8) / 4);
#else
Mat ktm2 = kernel_tm2.channel(p / 4);
#endif
for (int r = 0; r < 64; r++)
{
float* ktm2p = ktm2.row(r);
for (int q = 0; q < inch; q++)
{
const float* ktm0_0 = kernel0_tm.row(q);
const float* ktm1_0 = kernel1_tm.row(q);
const float* ktm2_0 = kernel2_tm.row(q);
const float* ktm3_0 = kernel3_tm.row(q);
ktm2p[0] = ktm0_0[r];
ktm2p[1] = ktm1_0[r];
ktm2p[2] = ktm2_0[r];
ktm2p[3] = ktm3_0[r];
ktm2p += 4;
}
}
}
for (; p < outch; p++)
{
const Mat kernel0_tm = kernel_tm.channel(p);
#if __ARM_NEON && __aarch64__
Mat ktm2 = kernel_tm2.channel(p / 8 + (p % 8) / 4 + p % 4);
#else
Mat ktm2 = kernel_tm2.channel(p / 4 + p % 4);
#endif
for (int r = 0; r < 64; r++)
{
float* ktm2p = ktm2.row(r);
for (int q = 0; q < inch; q++)
{
const float* ktm0_0 = kernel0_tm.row(q);
ktm2p[0] = ktm0_0[r];
ktm2p += 1;
}
}
}
kernel_tm = kernel_tm2;
}
static void conv3x3s1_winograd64_neon4(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 6n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 5) / 6 * 6;
outh = (outh + 5) / 6 * 6;
w = outw + 2;
h = outh + 2;
Option opt_b = opt;
opt_b.blob_allocator = opt.workspace_allocator;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt_b);
const float* bias = _bias;
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
bottom_blob_tm.create(4, 16 * w_tm / 8 * h_tm / 8, inch, 4u, opt.workspace_allocator);
const int tiles = w_tm / 8 * h_tm / 8;
// const float itm[8][8] = {
// {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f},
//
// {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f},
// {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f},
//
// {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f},
// {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f},
//
// {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f},
// {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f},
//
// {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f}
// };
// 0 = r00 - r06 + (r04 - r02) * 5.25
// 7 = r07 - r01 + (r03 - r05) * 5.25
// 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05)
// 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05)
// 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// reuse r04 * 1.25
// reuse r03 * 2.5
// 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5)
// 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5)
#if __ARM_NEON
const float coeff[8] = {
0.25f, 0.5f, -1.25f, 2.f,
-2.5f, 4.f, 4.25f, 5.25f
};
float32x4_t _coeff0 = vld1q_f32(coeff);
float32x4_t _coeff1 = vld1q_f32(coeff + 4);
#endif // __ARM_NEON
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const Mat img0 = bottom_blob_bordered.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
float tmp[8][8];
// tile
for (int i = 0; i < h_tm / 8; i++)
{
for (int j = 0; j < w_tm / 8; j++)
{
#if __ARM_NEON
const float* r0 = img0.row(i * 6) + j * 6;
const float* r1 = r0 + w;
const float* r2 = r0 + w * 2;
const float* r3 = r0 + w * 3;
// the assembly block for armv7 input transform requires 13 general registers
// old gcc may fail to allocate register on debug build without -fomit-frame-pointer
// so, fallback to intrinsic version for armv7 debug build --- nihui
#if __aarch64__ || !defined(NDEBUG)
for (int m = 0; m + 3 < 8; m += 4)
{
float32x4_t _r0_0123 = vld1q_f32(r0);
float32x4_t _r0_4567 = vld1q_f32(r0 + 4);
float32x4_t _r1_0123 = vld1q_f32(r1);
float32x4_t _r1_4567 = vld1q_f32(r1 + 4);
float32x4_t _r2_0123 = vld1q_f32(r2);
float32x4_t _r2_4567 = vld1q_f32(r2 + 4);
float32x4_t _r3_0123 = vld1q_f32(r3);
float32x4_t _r3_4567 = vld1q_f32(r3 + 4);
float32x4x2_t _r01_00221133 = vtrnq_f32(_r0_0123, _r1_0123);
float32x4x2_t _r01_44665577 = vtrnq_f32(_r0_4567, _r1_4567);
float32x4x2_t _r23_00221133 = vtrnq_f32(_r2_0123, _r3_0123);
float32x4x2_t _r23_44665577 = vtrnq_f32(_r2_4567, _r3_4567);
// no vswp intrinsic :(
float32x4_t _r_00 = vcombine_f32(vget_low_f32(_r01_00221133.val[0]), vget_low_f32(_r23_00221133.val[0]));
float32x4_t _r_11 = vcombine_f32(vget_low_f32(_r01_00221133.val[1]), vget_low_f32(_r23_00221133.val[1]));
float32x4_t _r_22 = vcombine_f32(vget_high_f32(_r01_00221133.val[0]), vget_high_f32(_r23_00221133.val[0]));
float32x4_t _r_33 = vcombine_f32(vget_high_f32(_r01_00221133.val[1]), vget_high_f32(_r23_00221133.val[1]));
float32x4_t _r_44 = vcombine_f32(vget_low_f32(_r01_44665577.val[0]), vget_low_f32(_r23_44665577.val[0]));
float32x4_t _r_55 = vcombine_f32(vget_low_f32(_r01_44665577.val[1]), vget_low_f32(_r23_44665577.val[1]));
float32x4_t _r_66 = vcombine_f32(vget_high_f32(_r01_44665577.val[0]), vget_high_f32(_r23_44665577.val[0]));
float32x4_t _r_77 = vcombine_f32(vget_high_f32(_r01_44665577.val[1]), vget_high_f32(_r23_44665577.val[1]));
float32x4_t _r_0_m_6 = vsubq_f32(_r_00, _r_66);
float32x4_t _r_7_m_1 = vsubq_f32(_r_77, _r_11);
float32x4_t _r_4_m_2 = vsubq_f32(_r_44, _r_22);
float32x4_t _r_3_m_5 = vsubq_f32(_r_33, _r_55);
float32x4_t _tmp0 = vmlaq_lane_f32(_r_0_m_6, _r_4_m_2, vget_high_f32(_coeff1), 1);
float32x4_t _tmp7 = vmlaq_lane_f32(_r_7_m_1, _r_3_m_5, vget_high_f32(_coeff1), 1);
vst1q_f32(&tmp[0][m], _tmp0);
vst1q_f32(&tmp[7][m], _tmp7);
float32x4_t _r_2_a_6 = vaddq_f32(_r_22, _r_66);
float32x4_t _r_1_a_5 = vaddq_f32(_r_11, _r_55);
float32x4_t _tmp12a = vmlsq_lane_f32(_r_2_a_6, _r_44, vget_high_f32(_coeff1), 0);
float32x4_t _tmp12b = vmlsq_lane_f32(_r_1_a_5, _r_33, vget_high_f32(_coeff1), 0);
float32x4_t _tmp1 = vaddq_f32(_tmp12a, _tmp12b);
float32x4_t _tmp2 = vsubq_f32(_tmp12a, _tmp12b);
vst1q_f32(&tmp[1][m], _tmp1);
vst1q_f32(&tmp[2][m], _tmp2);
float32x4_t _r_4_x_c = vmulq_lane_f32(_r_44, vget_high_f32(_coeff0), 0);
float32x4_t _r_3_x_c = vmulq_lane_f32(_r_33, vget_low_f32(_coeff1), 0);
float32x4_t _tmp34a = vaddq_f32(_r_66, _r_4_x_c);
_tmp34a = vmlaq_lane_f32(_tmp34a, _r_22, vget_low_f32(_coeff0), 0);
float32x4_t _tmp34b = vmlaq_lane_f32(_r_3_x_c, _r_11, vget_low_f32(_coeff0), 1);
_tmp34b = vmlaq_lane_f32(_tmp34b, _r_55, vget_high_f32(_coeff0), 1);
float32x4_t _tmp3 = vaddq_f32(_tmp34a, _tmp34b);
float32x4_t _tmp4 = vsubq_f32(_tmp34a, _tmp34b);
vst1q_f32(&tmp[3][m], _tmp3);
vst1q_f32(&tmp[4][m], _tmp4);
// reuse r04 * 1.25
// reuse r03 * 2.5
float32x4_t _r_2_a_4c = vaddq_f32(_r_22, _r_4_x_c);
float32x4_t _tmp56a = vmlaq_lane_f32(_r_66, _r_2_a_4c, vget_low_f32(_coeff1), 1);
float32x4_t _tmp56b = vmlaq_lane_f32(_r_3_x_c, _r_11, vget_high_f32(_coeff0), 1);
_tmp56b = vmlaq_lane_f32(_tmp56b, _r_55, vget_low_f32(_coeff0), 1);
float32x4_t _tmp5 = vaddq_f32(_tmp56a, _tmp56b);
float32x4_t _tmp6 = vsubq_f32(_tmp56a, _tmp56b);
vst1q_f32(&tmp[5][m], _tmp5);
vst1q_f32(&tmp[6][m], _tmp6);
r0 += w * 4;
r1 += w * 4;
r2 += w * 4;
r3 += w * 4;
}
const float* t0 = tmp[0];
const float* t1 = tmp[1];
const float* t2 = tmp[2];
const float* t3 = tmp[3];
float* r0_tm0_0 = img0_tm.row(i * w_tm / 8 + j);
float* r0_tm0_4 = img0_tm.row(i * w_tm / 8 + j + tiles);
float* r0_tm1_0 = img0_tm.row(i * w_tm / 8 + j + tiles * 2);
float* r0_tm1_4 = img0_tm.row(i * w_tm / 8 + j + tiles * 3);
float* r0_tm2_0 = img0_tm.row(i * w_tm / 8 + j + tiles * 4);
float* r0_tm2_4 = img0_tm.row(i * w_tm / 8 + j + tiles * 5);
float* r0_tm3_0 = img0_tm.row(i * w_tm / 8 + j + tiles * 6);
float* r0_tm3_4 = img0_tm.row(i * w_tm / 8 + j + tiles * 7);
for (int m = 0; m + 3 < 8; m += 4)
{
float32x4_t _t0_0123 = vld1q_f32(t0);
float32x4_t _t0_4567 = vld1q_f32(t0 + 4);
float32x4_t _t1_0123 = vld1q_f32(t1);
float32x4_t _t1_4567 = vld1q_f32(t1 + 4);
float32x4_t _t2_0123 = vld1q_f32(t2);
float32x4_t _t2_4567 = vld1q_f32(t2 + 4);
float32x4_t _t3_0123 = vld1q_f32(t3);
float32x4_t _t3_4567 = vld1q_f32(t3 + 4);
float32x4x2_t _t01_00221133 = vtrnq_f32(_t0_0123, _t1_0123);
float32x4x2_t _t01_44665577 = vtrnq_f32(_t0_4567, _t1_4567);
float32x4x2_t _t23_00221133 = vtrnq_f32(_t2_0123, _t3_0123);
float32x4x2_t _t23_44665577 = vtrnq_f32(_t2_4567, _t3_4567);
// no vswp intrinsic :(
float32x4_t _t_00 = vcombine_f32(vget_low_f32(_t01_00221133.val[0]), vget_low_f32(_t23_00221133.val[0]));
float32x4_t _t_11 = vcombine_f32(vget_low_f32(_t01_00221133.val[1]), vget_low_f32(_t23_00221133.val[1]));
float32x4_t _t_22 = vcombine_f32(vget_high_f32(_t01_00221133.val[0]), vget_high_f32(_t23_00221133.val[0]));
float32x4_t _t_33 = vcombine_f32(vget_high_f32(_t01_00221133.val[1]), vget_high_f32(_t23_00221133.val[1]));
float32x4_t _t_44 = vcombine_f32(vget_low_f32(_t01_44665577.val[0]), vget_low_f32(_t23_44665577.val[0]));
float32x4_t _t_55 = vcombine_f32(vget_low_f32(_t01_44665577.val[1]), vget_low_f32(_t23_44665577.val[1]));
float32x4_t _t_66 = vcombine_f32(vget_high_f32(_t01_44665577.val[0]), vget_high_f32(_t23_44665577.val[0]));
float32x4_t _t_77 = vcombine_f32(vget_high_f32(_t01_44665577.val[1]), vget_high_f32(_t23_44665577.val[1]));
float32x4_t _t_0_m_6 = vsubq_f32(_t_00, _t_66);
float32x4_t _t_7_m_1 = vsubq_f32(_t_77, _t_11);
float32x4_t _t_4_m_2 = vsubq_f32(_t_44, _t_22);
float32x4_t _t_3_m_5 = vsubq_f32(_t_33, _t_55);
float32x4_t _r0_tm_0_0 = vmlaq_lane_f32(_t_0_m_6, _t_4_m_2, vget_high_f32(_coeff1), 1);
float32x4_t _r0_tm_4_3 = vmlaq_lane_f32(_t_7_m_1, _t_3_m_5, vget_high_f32(_coeff1), 1);
r0_tm0_0[0] = vgetq_lane_f32(_r0_tm_0_0, 0);
r0_tm1_0[0] = vgetq_lane_f32(_r0_tm_0_0, 1);
r0_tm2_0[0] = vgetq_lane_f32(_r0_tm_0_0, 2);
r0_tm3_0[0] = vgetq_lane_f32(_r0_tm_0_0, 3);
r0_tm0_4[3] = vgetq_lane_f32(_r0_tm_4_3, 0);
r0_tm1_4[3] = vgetq_lane_f32(_r0_tm_4_3, 1);
r0_tm2_4[3] = vgetq_lane_f32(_r0_tm_4_3, 2);
r0_tm3_4[3] = vgetq_lane_f32(_r0_tm_4_3, 3);
float32x4_t _t_2_m_6 = vaddq_f32(_t_22, _t_66);
float32x4_t _t_1_m_5 = vaddq_f32(_t_11, _t_55);
float32x4_t _tmp12a = vmlsq_lane_f32(_t_2_m_6, _t_44, vget_high_f32(_coeff1), 0);
float32x4_t _tmp12b = vmlsq_lane_f32(_t_1_m_5, _t_33, vget_high_f32(_coeff1), 0);
float32x4_t _r0_tm_0_1 = vaddq_f32(_tmp12a, _tmp12b);
float32x4_t _r0_tm_0_2 = vsubq_f32(_tmp12a, _tmp12b);
r0_tm0_0[1] = vgetq_lane_f32(_r0_tm_0_1, 0);
r0_tm1_0[1] = vgetq_lane_f32(_r0_tm_0_1, 1);
r0_tm2_0[1] = vgetq_lane_f32(_r0_tm_0_1, 2);
r0_tm3_0[1] = vgetq_lane_f32(_r0_tm_0_1, 3);
r0_tm0_0[2] = vgetq_lane_f32(_r0_tm_0_2, 0);
r0_tm1_0[2] = vgetq_lane_f32(_r0_tm_0_2, 1);
r0_tm2_0[2] = vgetq_lane_f32(_r0_tm_0_2, 2);
r0_tm3_0[2] = vgetq_lane_f32(_r0_tm_0_2, 3);
float32x4_t _t_4_x_c = vmulq_lane_f32(_t_44, vget_high_f32(_coeff0), 0);
float32x4_t _t_3_x_c = vmulq_lane_f32(_t_33, vget_low_f32(_coeff1), 0);
float32x4_t _tmp34a = vaddq_f32(_t_66, _t_4_x_c);
_tmp34a = vmlaq_lane_f32(_tmp34a, _t_22, vget_low_f32(_coeff0), 0);
float32x4_t _tmp34b = vmlaq_lane_f32(_t_3_x_c, _t_11, vget_low_f32(_coeff0), 1);
_tmp34b = vmlaq_lane_f32(_tmp34b, _t_55, vget_high_f32(_coeff0), 1);
float32x4_t _r0_tm_0_3 = vaddq_f32(_tmp34a, _tmp34b);
float32x4_t _r0_tm_4_0 = vsubq_f32(_tmp34a, _tmp34b);
r0_tm0_0[3] = vgetq_lane_f32(_r0_tm_0_3, 0);
r0_tm1_0[3] = vgetq_lane_f32(_r0_tm_0_3, 1);
r0_tm2_0[3] = vgetq_lane_f32(_r0_tm_0_3, 2);
r0_tm3_0[3] = vgetq_lane_f32(_r0_tm_0_3, 3);
r0_tm0_4[0] = vgetq_lane_f32(_r0_tm_4_0, 0);
r0_tm1_4[0] = vgetq_lane_f32(_r0_tm_4_0, 1);
r0_tm2_4[0] = vgetq_lane_f32(_r0_tm_4_0, 2);
r0_tm3_4[0] = vgetq_lane_f32(_r0_tm_4_0, 3);
float32x4_t _t_2_a_4c = vaddq_f32(_t_22, _t_4_x_c);
float32x4_t _tmp56a = vmlaq_lane_f32(_t_66, _t_2_a_4c, vget_low_f32(_coeff1), 1);
float32x4_t _tmp56b = vmlaq_lane_f32(_t_3_x_c, _t_11, vget_high_f32(_coeff0), 1);
_tmp56b = vmlaq_lane_f32(_tmp56b, _t_55, vget_low_f32(_coeff0), 1);
float32x4_t _r0_tm_4_1 = vaddq_f32(_tmp56a, _tmp56b);
float32x4_t _r0_tm_4_2 = vsubq_f32(_tmp56a, _tmp56b);
r0_tm0_4[1] = vgetq_lane_f32(_r0_tm_4_1, 0);
r0_tm1_4[1] = vgetq_lane_f32(_r0_tm_4_1, 1);
r0_tm2_4[1] = vgetq_lane_f32(_r0_tm_4_1, 2);
r0_tm3_4[1] = vgetq_lane_f32(_r0_tm_4_1, 3);
r0_tm0_4[2] = vgetq_lane_f32(_r0_tm_4_2, 0);
r0_tm1_4[2] = vgetq_lane_f32(_r0_tm_4_2, 1);
r0_tm2_4[2] = vgetq_lane_f32(_r0_tm_4_2, 2);
r0_tm3_4[2] = vgetq_lane_f32(_r0_tm_4_2, 3);
t0 += 8 * 4;
t1 += 8 * 4;
t2 += 8 * 4;
t3 += 8 * 4;
r0_tm0_0 += img0_tm.w * tiles * 2 * 4;
r0_tm0_4 += img0_tm.w * tiles * 2 * 4;
r0_tm1_0 += img0_tm.w * tiles * 2 * 4;
r0_tm1_4 += img0_tm.w * tiles * 2 * 4;
r0_tm2_0 += img0_tm.w * tiles * 2 * 4;
r0_tm2_4 += img0_tm.w * tiles * 2 * 4;
r0_tm3_0 += img0_tm.w * tiles * 2 * 4;
r0_tm3_4 += img0_tm.w * tiles * 2 * 4;
}
#else // __aarch64__
float* t0 = tmp[0];
float* t1 = tmp[1];
float* t2 = tmp[2];
float* t3 = tmp[3];
float* t4 = tmp[4];
float* t5 = tmp[5];
float* t6 = tmp[6];
float* t7 = tmp[7];
int stepw = w * 4 * 4;
asm volatile(
// loop0
"vld1.f32 {d16-d19}, [%8], %26 \n"
"vld1.f32 {d20-d23}, [%9], %26 \n"
"vld1.f32 {d24-d27}, [%10], %26 \n"
"vtrn.32 q8, q10 \n"
"vld1.f32 {d28-d31}, [%11], %26 \n"
"vtrn.32 q9, q11 \n"
"vtrn.32 q12, q14 \n"
"vtrn.32 q13, q15 \n"
"vswp d17, d24 \n"
"vswp d19, d26 \n"
"vswp d21, d28 \n" // q8 = 00 q9 = 44 q10 = 11 q11 = 55
"vswp d23, d30 \n" // q12 = 22 q13 = 66 q14 = 33 q15 = 77
"vsub.f32 q2, q8, q13 \n"
"vsub.f32 q3, q9, q12 \n"
"vadd.f32 q4, q12, q13 \n"
"vadd.f32 q5, q10, q11 \n"
"vmla.f32 q2, q3, %f25[1] \n"
"vmul.f32 q7, q14, %e25[0] \n" // q7 = _r_3_x_c
"vmul.f32 q6, q9, %f24[0] \n" // q6 = _r_4_x_c
"vmls.f32 q4, q9, %f25[0] \n"
"vmls.f32 q5, q14, %f25[0] \n"
"vst1.f32 {d4-d5}, [%0]! \n" // tmp[0][m]
"vmov q3, q7 \n" // use q7
"vadd.f32 q2, q13, q6 \n" // use q6
"vmla.f32 q3, q10, %e24[1] \n"
"vadd.f32 q8, q4, q5 \n"
"vsub.f32 q9, q4, q5 \n"
"vmov q5, q7 \n" // use q7
"vadd.f32 q6, q12, q6 \n" // use q6
"vmla.f32 q5, q10, %f24[1] \n"
"vmov q4, q13 \n"
"vmla.f32 q2, q12, %e24[0] \n"
"vmla.f32 q3, q11, %f24[1] \n"
"vst1.f32 {d16-d17}, [%1]! \n" // tmp[1][m]
"vmla.f32 q4, q6, %e25[1] \n"
"vmla.f32 q5, q11, %e24[1] \n"
"vst1.f32 {d18-d19}, [%2]! \n" // tmp[2][m]
"vadd.f32 q8, q2, q3 \n"
"vsub.f32 q9, q2, q3 \n"
"vsub.f32 q6, q15, q10 \n"
"vsub.f32 q7, q14, q11 \n"
"vadd.f32 q2, q4, q5 \n"
"vsub.f32 q3, q4, q5 \n"
"vst1.f32 {d16-d17}, [%3]! \n" // tmp[3][m]
"vst1.f32 {d18-d19}, [%4]! \n" // tmp[4][m]
"vmla.f32 q6, q7, %f25[1] \n"
"vst1.f32 {d4-d5}, [%5]! \n" // tmp[5][m]
"vst1.f32 {d6-d7}, [%6]! \n" // tmp[6][m]
"vst1.f32 {d12-d13}, [%7]! \n" // tmp[7][m]
// loop1
"vld1.f32 {d16-d19}, [%8] \n"
"vld1.f32 {d20-d23}, [%9] \n"
"vld1.f32 {d24-d27}, [%10] \n"
"vtrn.32 q8, q10 \n"
"vld1.f32 {d28-d31}, [%11] \n"
"vtrn.32 q9, q11 \n"
"vtrn.32 q12, q14 \n"
"vtrn.32 q13, q15 \n"
"vswp d17, d24 \n"
"vswp d19, d26 \n"
"vswp d21, d28 \n" // q8 = 00 q9 = 44 q10 = 11 q11 = 55
"vswp d23, d30 \n" // q12 = 22 q13 = 66 q14 = 33 q15 = 77
"vsub.f32 q2, q8, q13 \n"
"vsub.f32 q3, q9, q12 \n"
"vadd.f32 q4, q12, q13 \n"
"vadd.f32 q5, q10, q11 \n"
"vmla.f32 q2, q3, %f25[1] \n"
"vmul.f32 q7, q14, %e25[0] \n" // q7 = _r_3_x_c
"vmul.f32 q6, q9, %f24[0] \n" // q6 = _r_4_x_c
"vmls.f32 q4, q9, %f25[0] \n"
"vmls.f32 q5, q14, %f25[0] \n"
"vst1.f32 {d4-d5}, [%0]! \n" // tmp[0][m]
"vmov q3, q7 \n" // use q7
"vadd.f32 q2, q13, q6 \n" // use q6
"vmla.f32 q3, q10, %e24[1] \n"
"vadd.f32 q8, q4, q5 \n"
"vsub.f32 q9, q4, q5 \n"
"vmov q5, q7 \n" // use q7
"vadd.f32 q6, q12, q6 \n" // use q6
"vmla.f32 q5, q10, %f24[1] \n"
"vmov q4, q13 \n"
"vmla.f32 q2, q12, %e24[0] \n"
"vmla.f32 q3, q11, %f24[1] \n"
"vst1.f32 {d16-d17}, [%1]! \n" // tmp[1][m]
"vmla.f32 q4, q6, %e25[1] \n"
"vmla.f32 q5, q11, %e24[1] \n"
"vst1.f32 {d18-d19}, [%2]! \n" // tmp[2][m]
"vadd.f32 q8, q2, q3 \n"
"vsub.f32 q9, q2, q3 \n"
"vsub.f32 q6, q15, q10 \n"
"vsub.f32 q7, q14, q11 \n"
"vadd.f32 q2, q4, q5 \n"
"vsub.f32 q3, q4, q5 \n"
"vst1.f32 {d16-d17}, [%3]! \n" // tmp[3][m]
"vst1.f32 {d18-d19}, [%4]! \n" // tmp[4][m]
"vmla.f32 q6, q7, %f25[1] \n"
"vst1.f32 {d4-d5}, [%5]! \n" // tmp[5][m]
"vst1.f32 {d6-d7}, [%6]! \n" // tmp[6][m]
"vst1.f32 {d12-d13}, [%7]! \n" // tmp[7][m]
: "=r"(t0), // %0
"=r"(t1), // %1
"=r"(t2), // %2
"=r"(t3), // %3
"=r"(t4), // %4
"=r"(t5), // %5
"=r"(t6), // %6
"=r"(t7), // %7
"=r"(r0), // %8
"=r"(r1), // %9
"=r"(r2), // %10
"=r"(r3) // %11
: "0"(t0),
"1"(t1),
"2"(t2),
"3"(t3),
"4"(t4),
"5"(t5),
"6"(t6),
"7"(t7),
"8"(r0),
"9"(r1),
"10"(r2),
"11"(r3),
"w"(_coeff0), // %24
"w"(_coeff1), // %25
"r"(stepw) // %26
: "memory", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
t0 = tmp[0];
t1 = tmp[1];
t2 = tmp[2];
t3 = tmp[3];
float* r0_tm0_0 = img0_tm.row(i * w_tm / 8 + j);
float* r0_tm0_4 = img0_tm.row(i * w_tm / 8 + j + tiles);
float* r0_tm1_0 = img0_tm.row(i * w_tm / 8 + j + tiles * 2);
float* r0_tm1_4 = img0_tm.row(i * w_tm / 8 + j + tiles * 3);
float* r0_tm2_0 = img0_tm.row(i * w_tm / 8 + j + tiles * 4);
float* r0_tm2_4 = img0_tm.row(i * w_tm / 8 + j + tiles * 5);
float* r0_tm3_0 = img0_tm.row(i * w_tm / 8 + j + tiles * 6);
float* r0_tm3_4 = img0_tm.row(i * w_tm / 8 + j + tiles * 7);
int step = img0_tm.w * tiles * 2 * 4 * 4;
asm volatile(
// loop0
"vld1.f32 {d16-d19}, [%8] \n"
"add %8, %8, #128 \n"
"vld1.f32 {d20-d23}, [%9] \n"
"add %9, %9, #128 \n"
"vld1.f32 {d24-d27}, [%10] \n"
"add %10, %10, #128 \n"
"vtrn.32 q8, q10 \n"
"vld1.f32 {d28-d31}, [%11] \n"
"add %11, %11, #128 \n"
"vtrn.32 q9, q11 \n"
"vtrn.32 q12, q14 \n"
"vtrn.32 q13, q15 \n"
"vswp d17, d24 \n"
"vswp d19, d26 \n"
"vswp d21, d28 \n" // q8 = 00 q9 = 44 q10 = 11 q11 = 55
"vswp d23, d30 \n" // q12 = 22 q13 = 66 q14 = 33 q15 = 77
"vsub.f32 q2, q8, q13 \n"
"vsub.f32 q3, q9, q12 \n"
"vadd.f32 q4, q12, q13 \n"
"vadd.f32 q5, q10, q11 \n"
"vmla.f32 q2, q3, %f25[1] \n"
"vmul.f32 q7, q14, %e25[0] \n" // q7 = _r_3_x_c
"vmul.f32 q6, q9, %f24[0] \n" // q6 = _r_4_x_c
"vmls.f32 q4, q9, %f25[0] \n"
"vmls.f32 q5, q14, %f25[0] \n"
"vst1.f32 {d4[0]}, [%0]! \n"
"vst1.f32 {d4[1]}, [%2]! \n"
"vmov q3, q7 \n" // use q7
"vst1.f32 {d5[0]}, [%4]! \n"
"vst1.f32 {d5[1]}, [%6]! \n"
"vadd.f32 q2, q13, q6 \n" // use q6
"vmla.f32 q3, q10, %e24[1] \n"
"vadd.f32 q8, q4, q5 \n"
"vsub.f32 q9, q4, q5 \n"
"vmov q5, q7 \n" // use q7
"vadd.f32 q6, q12, q6 \n" // use q6
"vmla.f32 q5, q10, %f24[1] \n"
"vmov q4, q13 \n"
"vmla.f32 q2, q12, %e24[0] \n"
"vmla.f32 q3, q11, %f24[1] \n"
"vst1.f32 {d16[0]}, [%0]! \n"
"vst1.f32 {d16[1]}, [%2]! \n"
"vmla.f32 q4, q6, %e25[1] \n"
"vst1.f32 {d17[0]}, [%4]! \n"
"vst1.f32 {d17[1]}, [%6]! \n"
"vmla.f32 q5, q11, %e24[1] \n"
"vst1.f32 {d18[0]}, [%0]! \n"
"vst1.f32 {d18[1]}, [%2]! \n"
"vadd.f32 q8, q2, q3 \n"
"vst1.f32 {d19[0]}, [%4]! \n"
"vst1.f32 {d19[1]}, [%6]! \n"
"vsub.f32 q9, q2, q3 \n"
"vsub.f32 q6, q15, q10 \n"
"vsub.f32 q7, q14, q11 \n"
"vadd.f32 q2, q4, q5 \n"
"vsub.f32 q3, q4, q5 \n"
"vst1.f32 {d16[0]}, [%0], %26 \n"
"vst1.f32 {d16[1]}, [%2], %26 \n"
"vmla.f32 q6, q7, %f25[1] \n"
"vst1.f32 {d17[0]}, [%4], %26 \n"
"vst1.f32 {d17[1]}, [%6], %26 \n"
"vtrn.32 q9, q2 \n"
"vtrn.32 q3, q6 \n"
"sub %0, %0, #12 \n"
"sub %2, %2, #12 \n"
"sub %4, %4, #12 \n"
"sub %6, %6, #12 \n"
"vswp d19, d6 \n"
"vswp d5, d12 \n"
"vst1.f32 {d18-d19}, [%1], %26 \n"
"vst1.f32 {d4-d5}, [%3], %26 \n"
"vst1.f32 {d6-d7}, [%5], %26 \n"
"vst1.f32 {d12-d13}, [%7], %26 \n"
// loop1
"vld1.f32 {d16-d19}, [%8] \n"
"vld1.f32 {d20-d23}, [%9] \n"
"vld1.f32 {d24-d27}, [%10] \n"
"vtrn.32 q8, q10 \n"
"vld1.f32 {d28-d31}, [%11] \n"
"vtrn.32 q9, q11 \n"
"vtrn.32 q12, q14 \n"
"vtrn.32 q13, q15 \n"
"vswp d17, d24 \n"
"vswp d19, d26 \n"
"vswp d21, d28 \n" // q8 = 00 q9 = 44 q10 = 11 q11 = 55
"vswp d23, d30 \n" // q12 = 22 q13 = 66 q14 = 33 q15 = 77
"vsub.f32 q2, q8, q13 \n"
"vsub.f32 q3, q9, q12 \n"
"vadd.f32 q4, q12, q13 \n"
"vadd.f32 q5, q10, q11 \n"
"vmla.f32 q2, q3, %f25[1] \n"
"vmul.f32 q7, q14, %e25[0] \n" // q7 = _r_3_x_c
"vmul.f32 q6, q9, %f24[0] \n" // q6 = _r_4_x_c
"vmls.f32 q4, q9, %f25[0] \n"
"vmls.f32 q5, q14, %f25[0] \n"
"vst1.f32 {d4[0]}, [%0]! \n"
"vst1.f32 {d4[1]}, [%2]! \n"
"vmov q3, q7 \n" // use q7
"vst1.f32 {d5[0]}, [%4]! \n"
"vst1.f32 {d5[1]}, [%6]! \n"
"vadd.f32 q2, q13, q6 \n" // use q6
"vmla.f32 q3, q10, %e24[1] \n"
"vadd.f32 q8, q4, q5 \n"
"vsub.f32 q9, q4, q5 \n"
"vmov q5, q7 \n" // use q7
"vadd.f32 q6, q12, q6 \n" // use q6
"vmla.f32 q5, q10, %f24[1] \n"
"vmov q4, q13 \n"
"vmla.f32 q2, q12, %e24[0] \n"
"vmla.f32 q3, q11, %f24[1] \n"
"vst1.f32 {d16[0]}, [%0]! \n"
"vst1.f32 {d16[1]}, [%2]! \n"
"vmla.f32 q4, q6, %e25[1] \n"
"vst1.f32 {d17[0]}, [%4]! \n"
"vst1.f32 {d17[1]}, [%6]! \n"
"vmla.f32 q5, q11, %e24[1] \n"
"vst1.f32 {d18[0]}, [%0]! \n"
"vst1.f32 {d18[1]}, [%2]! \n"
"vadd.f32 q8, q2, q3 \n"
"vst1.f32 {d19[0]}, [%4]! \n"
"vst1.f32 {d19[1]}, [%6]! \n"
"vsub.f32 q9, q2, q3 \n"
"vsub.f32 q6, q15, q10 \n"
"vsub.f32 q7, q14, q11 \n"
"vadd.f32 q2, q4, q5 \n"
"vsub.f32 q3, q4, q5 \n"
"vst1.f32 {d16[0]}, [%0] \n"
"vst1.f32 {d16[1]}, [%2] \n"
"vmla.f32 q6, q7, %f25[1] \n"
"vst1.f32 {d17[0]}, [%4] \n"
"vst1.f32 {d17[1]}, [%6] \n"
"vtrn.32 q9, q2 \n"
"vtrn.32 q3, q6 \n"
"vswp d19, d6 \n"
"vswp d5, d12 \n"
"vst1.f32 {d18-d19}, [%1] \n"
"vst1.f32 {d4-d5}, [%3] \n"
"vst1.f32 {d6-d7}, [%5] \n"
"vst1.f32 {d12-d13}, [%7] \n"
: "=r"(r0_tm0_0), // %0
"=r"(r0_tm0_4), // %1
"=r"(r0_tm1_0), // %2
"=r"(r0_tm1_4), // %3
"=r"(r0_tm2_0), // %4
"=r"(r0_tm2_4), // %5
"=r"(r0_tm3_0), // %6
"=r"(r0_tm3_4), // %7
"=r"(t0), // %8
"=r"(t1), // %9
"=r"(t2), // %10
"=r"(t3) // %11
: "0"(r0_tm0_0),
"1"(r0_tm0_4),
"2"(r0_tm1_0),
"3"(r0_tm1_4),
"4"(r0_tm2_0),
"5"(r0_tm2_4),
"6"(r0_tm3_0),
"7"(r0_tm3_4),
"8"(t0),
"9"(t1),
"10"(t2),
"11"(t3),
"w"(_coeff0), // %24
"w"(_coeff1), // %25
"r"(step) // %26
: "memory", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
#else
const float* r0 = img0.row(i * 6) + j * 6;
for (int m = 0; m < 8; m++)
{
tmp[0][m] = r0[0] - r0[6] + (r0[4] - r0[2]) * 5.25f;
tmp[7][m] = r0[7] - r0[1] + (r0[3] - r0[5]) * 5.25f;
float tmp12a = (r0[2] + r0[6] - r0[4] * 4.25f);
float tmp12b = (r0[1] + r0[5] - r0[3] * 4.25f);
tmp[1][m] = tmp12a + tmp12b;
tmp[2][m] = tmp12a - tmp12b;
float tmp34a = (r0[6] + r0[2] * 0.25f - r0[4] * 1.25f);
float tmp34b = (r0[1] * 0.5f - r0[3] * 2.5f + r0[5] * 2.f);
tmp[3][m] = tmp34a + tmp34b;
tmp[4][m] = tmp34a - tmp34b;
float tmp56a = (r0[6] + (r0[2] - r0[4] * 1.25f) * 4.f);
float tmp56b = (r0[1] * 2.f - r0[3] * 2.5f + r0[5] * 0.5f);
tmp[5][m] = tmp56a + tmp56b;
tmp[6][m] = tmp56a - tmp56b;
r0 += w;
}
float* r0_tm_0 = img0_tm.row(i * w_tm / 8 + j);
float* r0_tm_4 = img0_tm.row(i * w_tm / 8 + j + tiles);
for (int m = 0; m < 8; m++)
{
const float* tmp0 = tmp[m];
r0_tm_0[0] = tmp0[0] - tmp0[6] + (tmp0[4] - tmp0[2]) * 5.25f;
r0_tm_4[3] = tmp0[7] - tmp0[1] + (tmp0[3] - tmp0[5]) * 5.25f;
float tmp12a = (tmp0[2] + tmp0[6] - tmp0[4] * 4.25f);
float tmp12b = (tmp0[1] - tmp0[3] * 4.25f + tmp0[5]);
r0_tm_0[1] = tmp12a + tmp12b;
r0_tm_0[2] = tmp12a - tmp12b;
float tmp34a = (tmp0[6] + tmp0[2] * 0.25f - tmp0[4] * 1.25f);
float tmp34b = (tmp0[1] * 0.5f - tmp0[3] * 2.5f + tmp0[5] * 2.f);
r0_tm_0[3] = tmp34a + tmp34b;
r0_tm_4[0] = tmp34a - tmp34b;
float tmp56a = (tmp0[6] + (tmp0[2] - tmp0[4] * 1.25f) * 4.f);
float tmp56b = (tmp0[1] * 2.f - tmp0[3] * 2.5f + tmp0[5] * 0.5f);
r0_tm_4[1] = tmp56a + tmp56b;
r0_tm_4[2] = tmp56a - tmp56b;
r0_tm_0 += img0_tm.w * tiles * 2;
r0_tm_4 += img0_tm.w * tiles * 2;
}
#endif // __ARM_NEON
}
}
}
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
top_blob_tm.create(4, 16 * w_tm / 8 * h_tm / 8, outch, 4u, opt.workspace_allocator);
const int tiles = h_tm / 8 * w_tm / 8;
int nn_outch = outch >> 2;
int remain_outch_start = nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 4;
Mat out0_tm = top_blob_tm.channel(p);
Mat out1_tm = top_blob_tm.channel(p + 1);
Mat out2_tm = top_blob_tm.channel(p + 2);
Mat out3_tm = top_blob_tm.channel(p + 3);
const float* ktm = kernel_tm.channel(pp);
out0_tm.fill(0.f);
out1_tm.fill(0.f);
out2_tm.fill(0.f);
out3_tm.fill(0.f);
int q = 0;
#if __ARM_NEON && __aarch64__
for (; q + 3 < inch; q += 4)
{
const float* r0 = bottom_blob_tm.channel(q);
const float* r1 = bottom_blob_tm.channel(q + 1);
const float* r2 = bottom_blob_tm.channel(q + 2);
const float* r3 = bottom_blob_tm.channel(q + 3);
float* output0_tm = out0_tm;
float* output1_tm = out1_tm;
float* output2_tm = out2_tm;
float* output3_tm = out3_tm;
asm volatile(
"mov w0, #16 \n" // w0 = r = 16
"0: \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%8], #64 \n" // v0 v1 v2 v3 = _k00 _k01 _k02 _k03
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%8], #64 \n" // v4 v5 v6 v7 = _k10 _k11 _k12 _k13
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%8], #64 \n" // v8 v9 v10 v11 = _k20 _k21 _k22 _k23
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%8], #64 \n" // v12 v13 v14 v15 = _k30 _k31 _k32 _k33
// tile loop
"lsr w1, %w18, #2 \n" // w1 = nn = tiles >> 2
"cmp w1, #0 \n"
"beq 2f \n"
//BEGIN tile loop
"prfm pldl1keep, [%4, #128] \n" //
"ld1 {v16.4s}, [%4], #16 \n"
"1: \n"
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v20.4s}, [%0] \n"
"add x4, %0, #16 \n" // x4 = %0 next
"fmla v20.4s, v16.4s, v0.4s \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v21.4s}, [%1] \n"
"add x5, %1, #16 \n" // x5 = %1 next
"fmla v21.4s, v16.4s, v4.4s \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v22.4s}, [%2] \n"
"add x6, %2, #16 \n" // x6 = %2 next
"fmla v22.4s, v16.4s, v8.4s \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v23.4s}, [%3] \n"
"add x7, %3, #16 \n" // x7 = %3 next
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v17.4s}, [%5], #16 \n"
"fmla v23.4s, v16.4s, v12.4s \n"
"prfm pldl1keep, [x4, #128] \n"
"ld1 {v24.4s}, [x4] \n"
"fmla v20.4s, v17.4s, v1.4s \n"
"fmla v21.4s, v17.4s, v5.4s \n"
"prfm pldl1keep, [%6, #128] \n"
"ld1 {v18.4s}, [%6], #16 \n"
"fmla v22.4s, v17.4s, v9.4s \n"
"fmla v23.4s, v17.4s, v13.4s \n"
"prfm pldl1keep, [x5, #128] \n"
"ld1 {v25.4s}, [x5] \n"
"fmla v20.4s, v18.4s, v2.4s \n"
"fmla v21.4s, v18.4s, v6.4s \n"
"prfm pldl1keep, [%7, #128] \n"
"ld1 {v19.4s}, [%7], #16 \n"
"fmla v22.4s, v18.4s, v10.4s \n"
"fmla v23.4s, v18.4s, v14.4s \n"
"prfm pldl1keep, [x6, #128] \n"
"ld1 {v26.4s}, [x6] \n"
"fmla v20.4s, v19.4s, v3.4s \n"
"fmla v21.4s, v19.4s, v7.4s \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v16.4s}, [%4], #16 \n"
"fmla v22.4s, v19.4s, v11.4s \n"
"fmla v23.4s, v19.4s, v15.4s \n"
///////
"prfm pldl1keep, [x7, #128] \n"
"ld1 {v27.4s}, [x7] \n"
"st1 {v20.4s}, [%0] \n"
"add %0, %0, #32 \n"
"fmla v24.4s, v16.4s, v0.4s \n"
"fmla v25.4s, v16.4s, v4.4s \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v17.4s}, [%5], #16 \n"
"fmla v26.4s, v16.4s, v8.4s \n"
"fmla v27.4s, v16.4s, v12.4s \n"
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v20.4s}, [%0] \n"
"st1 {v21.4s}, [%1] \n"
"add %1, %1, #32 \n"
"fmla v24.4s, v17.4s, v1.4s \n"
"fmla v25.4s, v17.4s, v5.4s \n"
"prfm pldl1keep, [%6, #128] \n"
"ld1 {v18.4s}, [%6], #16 \n"
"fmla v26.4s, v17.4s, v9.4s \n"
"fmla v27.4s, v17.4s, v13.4s \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v21.4s}, [%1] \n"
"st1 {v22.4s}, [%2] \n"
"add %2, %2, #32 \n"
"fmla v24.4s, v18.4s, v2.4s \n"
"fmla v25.4s, v18.4s, v6.4s \n"
"prfm pldl1keep, [%7, #128] \n"
"ld1 {v19.4s}, [%7], #16 \n"
"fmla v26.4s, v18.4s, v10.4s \n"
"fmla v27.4s, v18.4s, v14.4s \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v22.4s}, [%2] \n"
"st1 {v23.4s}, [%3] \n"
"add %3, %3, #32 \n"
"fmla v24.4s, v19.4s, v3.4s \n"
"fmla v25.4s, v19.4s, v7.4s \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v16.4s}, [%4], #16 \n"
"fmla v26.4s, v19.4s, v11.4s \n"
"fmla v27.4s, v19.4s, v15.4s \n"
///////
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v23.4s}, [%3] \n"
"st1 {v24.4s}, [x4] \n"
"add x4, x4, #32 \n"
"fmla v20.4s, v16.4s, v0.4s \n"
"fmla v21.4s, v16.4s, v4.4s \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v17.4s}, [%5], #16 \n"
"fmla v22.4s, v16.4s, v8.4s \n"
"fmla v23.4s, v16.4s, v12.4s \n"
"prfm pldl1keep, [x4, #128] \n"
"ld1 {v24.4s}, [x4] \n"
"st1 {v25.4s}, [x5] \n"
"add x5, x5, #32 \n"
"fmla v20.4s, v17.4s, v1.4s \n"
"fmla v21.4s, v17.4s, v5.4s \n"
"prfm pldl1keep, [%6, #128] \n"
"ld1 {v18.4s}, [%6], #16 \n"
"fmla v22.4s, v17.4s, v9.4s \n"
"fmla v23.4s, v17.4s, v13.4s \n"
"prfm pldl1keep, [x5, #128] \n"
"ld1 {v25.4s}, [x5] \n"
"st1 {v26.4s}, [x6] \n"
"add x6, x6, #32 \n"
"fmla v20.4s, v18.4s, v2.4s \n"
"fmla v21.4s, v18.4s, v6.4s \n"
"prfm pldl1keep, [%7, #128] \n"
"ld1 {v19.4s}, [%7], #16 \n"
"fmla v22.4s, v18.4s, v10.4s \n"
"fmla v23.4s, v18.4s, v14.4s \n"
"prfm pldl1keep, [x6, #128] \n"
"ld1 {v26.4s}, [x6] \n"
"st1 {v27.4s}, [x7] \n"
"add x7, x7, #32 \n"
"fmla v20.4s, v19.4s, v3.4s \n"
"fmla v21.4s, v19.4s, v7.4s \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v16.4s}, [%4], #16 \n"
"fmla v22.4s, v19.4s, v11.4s \n"
"fmla v23.4s, v19.4s, v15.4s \n"
///////
"prfm pldl1keep, [x7, #128] \n"
"ld1 {v27.4s}, [x7] \n"
"st1 {v20.4s}, [%0] \n"
"fmla v24.4s, v16.4s, v0.4s \n"
"fmla v25.4s, v16.4s, v4.4s \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v17.4s}, [%5], #16 \n"
"fmla v26.4s, v16.4s, v8.4s \n"
"fmla v27.4s, v16.4s, v12.4s \n"
"st1 {v21.4s}, [%1] \n"
"fmla v24.4s, v17.4s, v1.4s \n"
"fmla v25.4s, v17.4s, v5.4s \n"
"prfm pldl1keep, [%6, #128] \n"
"ld1 {v18.4s}, [%6], #16 \n"
"fmla v26.4s, v17.4s, v9.4s \n"
"fmla v27.4s, v17.4s, v13.4s \n"
"st1 {v22.4s}, [%2] \n"
"fmla v24.4s, v18.4s, v2.4s \n"
"fmla v25.4s, v18.4s, v6.4s \n"
"prfm pldl1keep, [%7, #128] \n"
"ld1 {v19.4s}, [%7], #16 \n"
"fmla v26.4s, v18.4s, v10.4s \n"
"fmla v27.4s, v18.4s, v14.4s \n"
"st1 {v23.4s}, [%3] \n"
"fmla v24.4s, v19.4s, v3.4s \n"
"fmla v25.4s, v19.4s, v7.4s \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v16.4s}, [%4], #16 \n"
"fmla v26.4s, v19.4s, v11.4s \n"
"fmla v27.4s, v19.4s, v15.4s \n"
"st1 {v24.4s}, [x4], #16 \n"
"mov %0, x4 \n"
"st1 {v25.4s}, [x5], #16 \n"
"mov %1, x5 \n"
"subs w1, w1, #1 \n"
"st1 {v26.4s}, [x6], #16 \n"
"mov %2, x6 \n"
"st1 {v27.4s}, [x7], #16 \n"
"mov %3, x7 \n"
"bne 1b \n"
"sub %4, %4, #16 \n"
//END tile loop
"2: \n"
// remain loop
"and w1, %w18, #3 \n" // w1 = remain = tiles & 3
"cmp w1, #0 \n"
"beq 4f \n"
//BEGIN remain loop
"3: \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v16.4s}, [%4], #16 \n"
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v20.4s}, [%0] \n"
"fmla v20.4s, v16.4s, v0.4s \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v21.4s}, [%1] \n"
"fmla v21.4s, v16.4s, v4.4s \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v22.4s}, [%2] \n"
"fmla v22.4s, v16.4s, v8.4s \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v23.4s}, [%3] \n"
"fmla v23.4s, v16.4s, v12.4s \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v17.4s}, [%5], #16 \n"
"fmla v20.4s, v17.4s, v1.4s \n"
"fmla v21.4s, v17.4s, v5.4s \n"
"fmla v22.4s, v17.4s, v9.4s \n"
"fmla v23.4s, v17.4s, v13.4s \n"
"prfm pldl1keep, [%6, #128] \n"
"ld1 {v18.4s}, [%6], #16 \n"
"fmla v20.4s, v18.4s, v2.4s \n"
"fmla v21.4s, v18.4s, v6.4s \n"
"fmla v22.4s, v18.4s, v10.4s \n"
"fmla v23.4s, v18.4s, v14.4s \n"
"prfm pldl1keep, [%7, #128] \n"
"ld1 {v19.4s}, [%7], #16 \n"
"fmla v20.4s, v19.4s, v3.4s \n"
"fmla v21.4s, v19.4s, v7.4s \n"
"fmla v22.4s, v19.4s, v11.4s \n"
"fmla v23.4s, v19.4s, v15.4s \n"
"st1 {v20.4s}, [%0], #16 \n"
"st1 {v21.4s}, [%1], #16 \n"
"subs w1, w1, #1 \n"
"st1 {v22.4s}, [%2], #16 \n"
"st1 {v23.4s}, [%3], #16 \n"
"bne 3b \n"
//END remain loop
"4: \n"
"subs w0, w0, #1 \n"
"bne 0b \n"
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(r0), // %4
"=r"(r1), // %5
"=r"(r2), // %6
"=r"(r3), // %7
"=r"(ktm) // %8
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(r0),
"5"(r1),
"6"(r2),
"7"(r3),
"8"(ktm),
"r"(tiles) // %18
: "cc", "memory", "x0", "x1", "x4", "x5", "x6", "x7", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27");
}
#endif // __ARM_NEON && __aarch64__
for (; q + 1 < inch; q += 2)
{
const float* r0 = bottom_blob_tm.channel(q);
const float* r1 = bottom_blob_tm.channel(q + 1);
float* output0_tm = out0_tm;
float* output1_tm = out1_tm;
float* output2_tm = out2_tm;
float* output3_tm = out3_tm;
#if __ARM_NEON
#if __aarch64__
asm volatile(
"mov w0, #16 \n" // w0 = r = 16
"0: \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v0.4s, v1.4s}, [%6], #32 \n" // v0 v1 = _k00 _k01
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v2.4s, v3.4s}, [%6], #32 \n" // v2 v3 = _k10 _k11
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v4.4s, v5.4s}, [%6], #32 \n" // v4 v5 = _k20 _k21
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v6.4s, v7.4s}, [%6], #32 \n" // v6 v7 = _k30 _k31
// tile loop
"lsr w1, %w14, #2 \n" // w1 = nn = tiles >> 2
"cmp w1, #0 \n"
"beq 2f \n"
//BEGIN tile loop
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v20.4s}, [%4], #16 \n"
"1: \n"
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v16.4s}, [%0] \n"
"fmla v16.4s, v20.4s, v0.4s \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v17.4s}, [%1] \n"
"fmla v17.4s, v20.4s, v2.4s \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v18.4s}, [%2] \n"
"fmla v18.4s, v20.4s, v4.4s \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v19.4s}, [%3] \n"
"fmla v19.4s, v20.4s, v6.4s \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v21.4s}, [%5], #16 \n"
"fmla v16.4s, v21.4s, v1.4s \n"
"fmla v17.4s, v21.4s, v3.4s \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v20.4s}, [%4], #16 \n"
"fmla v18.4s, v21.4s, v5.4s \n"
"fmla v19.4s, v21.4s, v7.4s \n"
"st1 {v16.4s}, [%0], #16 \n"
"st1 {v17.4s}, [%1], #16 \n"
////
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v16.4s}, [%0] \n"
"fmla v16.4s, v20.4s, v0.4s \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v17.4s}, [%1] \n"
"fmla v17.4s, v20.4s, v2.4s \n"
"st1 {v18.4s}, [%2], #16 \n"
"st1 {v19.4s}, [%3], #16 \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v18.4s}, [%2] \n"
"fmla v18.4s, v20.4s, v4.4s \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v19.4s}, [%3] \n"
"fmla v19.4s, v20.4s, v6.4s \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v21.4s}, [%5], #16 \n"
"fmla v16.4s, v21.4s, v1.4s \n"
"fmla v17.4s, v21.4s, v3.4s \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v20.4s}, [%4], #16 \n"
"fmla v18.4s, v21.4s, v5.4s \n"
"fmla v19.4s, v21.4s, v7.4s \n"
"st1 {v16.4s}, [%0], #16 \n"
"st1 {v17.4s}, [%1], #16 \n"
////
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v16.4s}, [%0] \n"
"fmla v16.4s, v20.4s, v0.4s \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v17.4s}, [%1] \n"
"fmla v17.4s, v20.4s, v2.4s \n"
"st1 {v18.4s}, [%2], #16 \n"
"st1 {v19.4s}, [%3], #16 \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v18.4s}, [%2] \n"
"fmla v18.4s, v20.4s, v4.4s \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v19.4s}, [%3] \n"
"fmla v19.4s, v20.4s, v6.4s \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v21.4s}, [%5], #16 \n"
"fmla v16.4s, v21.4s, v1.4s \n"
"fmla v17.4s, v21.4s, v3.4s \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v20.4s}, [%4], #16 \n"
"fmla v18.4s, v21.4s, v5.4s \n"
"fmla v19.4s, v21.4s, v7.4s \n"
"st1 {v16.4s}, [%0], #16 \n"
"st1 {v17.4s}, [%1], #16 \n"
////
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v16.4s}, [%0] \n"
"fmla v16.4s, v20.4s, v0.4s \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v17.4s}, [%1] \n"
"fmla v17.4s, v20.4s, v2.4s \n"
"st1 {v18.4s}, [%2], #16 \n"
"st1 {v19.4s}, [%3], #16 \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v18.4s}, [%2] \n"
"fmla v18.4s, v20.4s, v4.4s \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v19.4s}, [%3] \n"
"fmla v19.4s, v20.4s, v6.4s \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v21.4s}, [%5], #16 \n"
"fmla v16.4s, v21.4s, v1.4s \n"
"fmla v17.4s, v21.4s, v3.4s \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v20.4s}, [%4], #16 \n"
"fmla v18.4s, v21.4s, v5.4s \n"
"fmla v19.4s, v21.4s, v7.4s \n"
"st1 {v16.4s}, [%0], #16 \n"
"st1 {v17.4s}, [%1], #16 \n"
"subs w1, w1, #1 \n"
"st1 {v18.4s}, [%2], #16 \n"
"st1 {v19.4s}, [%3], #16 \n"
"bne 1b \n"
"sub %4, %4, #16 \n"
//END tile loop
"2: \n"
// remain loop
"and w1, %w14, #3 \n" // w1 = remain = tiles & 3
"cmp w1, #0 \n"
"beq 4f \n"
//BEGIN remain loop
"3: \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v20.4s}, [%4], #16 \n"
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v16.4s}, [%0] \n"
"fmla v16.4s, v20.4s, v0.4s \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v17.4s}, [%1] \n"
"fmla v17.4s, v20.4s, v2.4s \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v18.4s}, [%2] \n"
"fmla v18.4s, v20.4s, v4.4s \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v19.4s}, [%3] \n"
"fmla v19.4s, v20.4s, v6.4s \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v21.4s}, [%5], #16 \n"
"fmla v16.4s, v21.4s, v1.4s \n"
"fmla v17.4s, v21.4s, v3.4s \n"
"fmla v18.4s, v21.4s, v5.4s \n"
"fmla v19.4s, v21.4s, v7.4s \n"
"st1 {v16.4s}, [%0], #16 \n"
"st1 {v17.4s}, [%1], #16 \n"
"subs w1, w1, #1 \n"
"st1 {v18.4s}, [%2], #16 \n"
"st1 {v19.4s}, [%3], #16 \n"
"bne 3b \n"
//END remain loop
"4: \n"
"subs w0, w0, #1 \n"
"bne 0b \n"
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(r0), // %4
"=r"(r1), // %5
"=r"(ktm) // %6
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(r0),
"5"(r1),
"6"(ktm),
"r"(tiles) // %14
: "cc", "memory", "x0", "x1", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21");
#else
asm volatile(
"mov r0, #16 \n" // r0 = r = 16
"0: \n"
"pld [%6, #256] \n"
"vld1.f32 {d0-d3}, [%6 :128]! \n" // q0 q1 = _k00 _k01
"pld [%6, #256] \n"
"vld1.f32 {d4-d7}, [%6 :128]! \n" // q2 q3 = _k10 _k11
"pld [%6, #256] \n"
"vld1.f32 {d8-d11}, [%6 :128]! \n" // q4 q5 = _k20 _k21
"pld [%6, #256] \n"
"vld1.f32 {d12-d15}, [%6 :128]! \n" // q6 q7 = _k30 _k31
// tile loop
"lsr r1, %14, #2 \n" // r1 = nn = tiles >> 2
"cmp r1, #0 \n"
"beq 2f \n"
//BEGIN tile loop
"pld [%4, #128] \n"
"vld1.f32 {d24-d25}, [%4 :128]! \n" // q12 = _r0
"1: \n"
"pld [%0, #128] \n"
"vld1.f32 {d16-d17}, [%0 :128] \n" // q8 = _output0_tm
"vmla.f32 q8, q12, q0 \n"
"pld [%1, #128] \n"
"vld1.f32 {d18-d19}, [%1 :128] \n" // q9 = _output1_tm
"vmla.f32 q9, q12, q2 \n"
"pld [%2, #128] \n"
"vld1.f32 {d20-d21}, [%2 :128] \n" // q10 = _output2_tm
"vmla.f32 q10, q12, q4 \n"
"pld [%3, #128] \n"
"vld1.f32 {d22-d23}, [%3 :128] \n" // q11 = _output3_tm
"vmla.f32 q11, q12, q6 \n"
"pld [%5, #128] \n"
"vld1.f32 {d26-d27}, [%5 :128]! \n" // q13 = _r1
"vmla.f32 q8, q13, q1 \n"
"vmla.f32 q9, q13, q3 \n"
"pld [%4, #128] \n"
"vld1.f32 {d24-d25}, [%4 :128]! \n" // q12 = _r0
"vmla.f32 q10, q13, q5 \n"
"vmla.f32 q11, q13, q7 \n"
"vst1.f32 {d16-d17}, [%0 :128]! \n"
"vst1.f32 {d18-d19}, [%1 :128]! \n"
////
"pld [%0, #128] \n"
"vld1.f32 {d16-d17}, [%0 :128] \n" // q8 = _output0_tm
"vmla.f32 q8, q12, q0 \n"
"pld [%1, #128] \n"
"vld1.f32 {d18-d19}, [%1 :128] \n" // q9 = _output1_tm
"vmla.f32 q9, q12, q2 \n"
"vst1.f32 {d20-d21}, [%2 :128]! \n"
"vst1.f32 {d22-d23}, [%3 :128]! \n"
"pld [%2, #128] \n"
"vld1.f32 {d20-d21}, [%2 :128] \n" // q10 = _output2_tm
"vmla.f32 q10, q12, q4 \n"
"pld [%3, #128] \n"
"vld1.f32 {d22-d23}, [%3 :128] \n" // q11 = _output3_tm
"vmla.f32 q11, q12, q6 \n"
"pld [%5, #128] \n"
"vld1.f32 {d26-d27}, [%5 :128]! \n" // q13 = _r1
"vmla.f32 q8, q13, q1 \n"
"vmla.f32 q9, q13, q3 \n"
"pld [%4, #128] \n"
"vld1.f32 {d24-d25}, [%4 :128]! \n" // q12 = _r0
"vmla.f32 q10, q13, q5 \n"
"vmla.f32 q11, q13, q7 \n"
"vst1.f32 {d16-d17}, [%0 :128]! \n"
"vst1.f32 {d18-d19}, [%1 :128]! \n"
////
"pld [%0, #128] \n"
"vld1.f32 {d16-d17}, [%0 :128] \n" // q8 = _output0_tm
"vmla.f32 q8, q12, q0 \n"
"pld [%1, #128] \n"
"vld1.f32 {d18-d19}, [%1 :128] \n" // q9 = _output1_tm
"vmla.f32 q9, q12, q2 \n"
"vst1.f32 {d20-d21}, [%2 :128]! \n"
"vst1.f32 {d22-d23}, [%3 :128]! \n"
"pld [%2, #128] \n"
"vld1.f32 {d20-d21}, [%2 :128] \n" // q10 = _output2_tm
"vmla.f32 q10, q12, q4 \n"
"pld [%3, #128] \n"
"vld1.f32 {d22-d23}, [%3 :128] \n" // q11 = _output3_tm
"vmla.f32 q11, q12, q6 \n"
"pld [%5, #128] \n"
"vld1.f32 {d26-d27}, [%5 :128]! \n" // q13 = _r1
"vmla.f32 q8, q13, q1 \n"
"vmla.f32 q9, q13, q3 \n"
"pld [%4, #128] \n"
"vld1.f32 {d24-d25}, [%4 :128]! \n" // q12 = _r0
"vmla.f32 q10, q13, q5 \n"
"vmla.f32 q11, q13, q7 \n"
"vst1.f32 {d16-d17}, [%0 :128]! \n"
"vst1.f32 {d18-d19}, [%1 :128]! \n"
////
"pld [%0, #128] \n"
"vld1.f32 {d16-d17}, [%0 :128] \n" // q8 = _output0_tm
"vmla.f32 q8, q12, q0 \n"
"pld [%1, #128] \n"
"vld1.f32 {d18-d19}, [%1 :128] \n" // q9 = _output1_tm
"vmla.f32 q9, q12, q2 \n"
"vst1.f32 {d20-d21}, [%2 :128]! \n"
"vst1.f32 {d22-d23}, [%3 :128]! \n"
"pld [%2, #128] \n"
"vld1.f32 {d20-d21}, [%2 :128] \n" // q10 = _output2_tm
"vmla.f32 q10, q12, q4 \n"
"pld [%3, #128] \n"
"vld1.f32 {d22-d23}, [%3 :128] \n" // q11 = _output3_tm
"vmla.f32 q11, q12, q6 \n"
"pld [%5, #128] \n"
"vld1.f32 {d26-d27}, [%5 :128]! \n" // q13 = _r1
"vmla.f32 q8, q13, q1 \n"
"vmla.f32 q9, q13, q3 \n"
"pld [%4, #128] \n"
"vld1.f32 {d24-d25}, [%4 :128]! \n" // q12 = _r0
"vmla.f32 q10, q13, q5 \n"
"vmla.f32 q11, q13, q7 \n"
"vst1.f32 {d16-d17}, [%0 :128]! \n"
"vst1.f32 {d18-d19}, [%1 :128]! \n"
"subs r1, #1 \n"
"vst1.f32 {d20-d21}, [%2 :128]! \n"
"vst1.f32 {d22-d23}, [%3 :128]! \n"
"bne 1b \n"
"sub %4, %4, #16 \n"
//END tile loop
"2: \n"
// remain loop
"and r1, %14, #3 \n" // r1 = remain = tiles & 3
"cmp r1, #0 \n"
"beq 4f \n"
//BEGIN remain loop
"3: \n"
"pld [%4, #128] \n"
"vld1.f32 {d24-d25}, [%4 :128]! \n" // q12 = _r0
"pld [%0, #128] \n"
"vld1.f32 {d16-d17}, [%0 :128] \n" // q8 = _output0_tm
"vmla.f32 q8, q12, q0 \n"
"pld [%1, #128] \n"
"vld1.f32 {d18-d19}, [%1 :128] \n" // q9 = _output1_tm
"vmla.f32 q9, q12, q2 \n"
"pld [%2, #128] \n"
"vld1.f32 {d20-d21}, [%2 :128] \n" // q10 = _output2_tm
"vmla.f32 q10, q12, q4 \n"
"pld [%3, #128] \n"
"vld1.f32 {d22-d23}, [%3 :128] \n" // q11 = _output3_tm
"vmla.f32 q11, q12, q6 \n"
"pld [%5, #128] \n"
"vld1.f32 {d26-d27}, [%5 :128]! \n" // q13 = _r1
"vmla.f32 q8, q13, q1 \n"
"vmla.f32 q9, q13, q3 \n"
"vmla.f32 q10, q13, q5 \n"
"vmla.f32 q11, q13, q7 \n"
"vst1.f32 {d16-d17}, [%0 :128]! \n"
"vst1.f32 {d18-d19}, [%1 :128]! \n"
"subs r1, #1 \n"
"vst1.f32 {d20-d21}, [%2 :128]! \n"
"vst1.f32 {d22-d23}, [%3 :128]! \n"
"bne 3b \n"
//END remain loop
"4: \n"
"subs r0, #1 \n"
"bne 0b \n"
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(r0), // %4
"=r"(r1), // %5
"=r"(ktm) // %6
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(r0),
"5"(r1),
"6"(ktm),
"r"(tiles) // %14
: "cc", "memory", "r0", "r1", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13");
#endif // __aarch64__
#else
for (int r = 0; r < 16; r++)
{
for (int t = 0; t < tiles; t++)
{
for (int m = 0; m < 4; m++)
{
output0_tm[m] += r0[m] * ktm[0 + m];
output0_tm[m] += r1[m] * ktm[4 + m];
output1_tm[m] += r0[m] * ktm[8 + m];
output1_tm[m] += r1[m] * ktm[12 + m];
output2_tm[m] += r0[m] * ktm[16 + m];
output2_tm[m] += r1[m] * ktm[20 + m];
output3_tm[m] += r0[m] * ktm[24 + m];
output3_tm[m] += r1[m] * ktm[28 + m];
}
r0 += 4;
r1 += 4;
output0_tm += 4;
output1_tm += 4;
output2_tm += 4;
output3_tm += 4;
}
ktm += 32;
}
#endif // __ARM_NEON
}
for (; q < inch; q++)
{
const float* r0 = bottom_blob_tm.channel(q);
float* output0_tm = out0_tm;
float* output1_tm = out1_tm;
float* output2_tm = out2_tm;
float* output3_tm = out3_tm;
#if __ARM_NEON
#if __aarch64__
asm volatile(
"mov w0, #16 \n" // w0 = r = 16
"0: \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v0.4s, v1.4s}, [%5], #32 \n" // v0 v1 = _k00 _k10
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v2.4s, v3.4s}, [%5], #32 \n" // v2 v3 = _k20 _k30
// tile loop
"mov w1, %w12 \n" // w1 = tiles
"cmp w1, #0 \n"
"beq 2f \n"
//BEGIN tile loop
"1: \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v16.4s}, [%4], #16 \n"
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v17.4s}, [%0] \n"
"fmla v17.4s, v16.4s, v0.4s \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v18.4s}, [%1] \n"
"fmla v18.4s, v16.4s, v1.4s \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v19.4s}, [%2] \n"
"fmla v19.4s, v16.4s, v2.4s \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v20.4s}, [%3] \n"
"fmla v20.4s, v16.4s, v3.4s \n"
"st1 {v17.4s}, [%0], #16 \n"
"st1 {v18.4s}, [%1], #16 \n"
"subs w1, w1, #1 \n"
"st1 {v19.4s}, [%2], #16 \n"
"st1 {v20.4s}, [%3], #16 \n"
"bne 1b \n"
//END tile loop
"2: \n"
"subs w0, w0, #1 \n"
"bne 0b \n"
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(r0), // %4
"=r"(ktm) // %5
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(r0),
"5"(ktm),
"r"(tiles) // %12
: "cc", "memory", "x0", "x1", "v0", "v1", "v2", "v3", "v16", "v17", "v18", "v19", "v20");
#else
asm volatile(
"mov r0, #16 \n" // r0 = r = 16
"0: \n"
"pld [%5, #256] \n"
"vld1.f32 {d0-d3}, [%5 :128]! \n" // q0 q1 = _k00 _k10
"pld [%5, #256] \n"
"vld1.f32 {d4-d7}, [%5 :128]! \n" // q2 q3 = _k20 _k30
// tile loop
"mov r1, %12 \n" // r1 = tiles
"cmp r1, #0 \n"
"beq 2f \n"
//BEGIN tile loop
"1: \n"
"pld [%4, #128] \n"
"vld1.f32 {d24-d25}, [%4 :128]! \n" // q12 = _r0
"pld [%0, #128] \n"
"vld1.f32 {d16-d17}, [%0 :128] \n" // q8 = _output0_tm
"vmla.f32 q8, q12, q0 \n"
"pld [%1, #128] \n"
"vld1.f32 {d18-d19}, [%1 :128] \n" // q9 = _output1_tm
"vmla.f32 q9, q12, q1 \n"
"pld [%2, #128] \n"
"vld1.f32 {d20-d21}, [%2 :128] \n" // q10 = _output2_tm
"vmla.f32 q10, q12, q2 \n"
"pld [%3, #128] \n"
"vld1.f32 {d22-d23}, [%3 :128] \n" // q11 = _output3_tm
"vmla.f32 q11, q12, q3 \n"
"vst1.f32 {d16-d17}, [%0 :128]! \n"
"vst1.f32 {d18-d19}, [%1 :128]! \n"
"subs r1, #1 \n"
"vst1.f32 {d20-d21}, [%2 :128]! \n"
"vst1.f32 {d22-d23}, [%3 :128]! \n"
"bne 1b \n"
//END tile loop
"2: \n"
"subs r0, #1 \n"
"bne 0b \n"
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(r0), // %4
"=r"(ktm) // %5
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(r0),
"5"(ktm),
"r"(tiles) // %12
: "cc", "memory", "r0", "r1", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13");
#endif // __aarch64__
#else
for (int r = 0; r < 16; r++)
{
for (int t = 0; t < tiles; t++)
{
for (int m = 0; m < 4; m++)
{
output0_tm[m] += r0[m] * ktm[0 + m];
output1_tm[m] += r0[m] * ktm[4 + m];
output2_tm[m] += r0[m] * ktm[8 + m];
output3_tm[m] += r0[m] * ktm[12 + m];
}
r0 += 4;
output0_tm += 4;
output1_tm += 4;
output2_tm += 4;
output3_tm += 4;
}
ktm += 16;
}
#endif // __ARM_NEON
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
Mat out0_tm = top_blob_tm.channel(p);
const float* ktm = (const float*)kernel_tm.channel(nn_outch) + 8 * 8 * inch * (p - remain_outch_start);
out0_tm.fill(0.f);
int q = 0;
for (; q < inch; q++)
{
const float* r0 = bottom_blob_tm.channel(q);
float* output0_tm = out0_tm;
for (int r = 0; r < 16; r++)
{
#if __ARM_NEON
float32x4_t _k00 = vld1q_f32(ktm);
ktm += 4;
#endif // __ARM_NEON
// tile
for (int i = 0; i < tiles; i++)
{
#if __ARM_NEON
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v17.4s}, [%1], #16 \n"
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v16.4s}, [%0] \n"
"fmla v16.4s, v17.4s, %4.4s \n"
"st1 {v16.4s}, [%0], #16 \n"
: "=r"(output0_tm), // %0
"=r"(r0) // %1
: "0"(output0_tm),
"1"(r0),
"w"(_k00) // %4
: "cc", "memory", "v16", "v17");
#else
asm volatile(
"pld [%1, #128] \n"
"vld1.f32 {d18-d19}, [%1 :128]! \n" // q9 = _r0
"pld [%0, #128] \n"
"vld1.f32 {d16-d17}, [%0 :128] \n" // q8 = _output0_tm
"vmla.f32 q8, q9, %q4 \n"
"vst1.f32 {d16-d17}, [%0 :128]! \n"
: "=r"(output0_tm), // %0
"=r"(r0) // %1
: "0"(output0_tm),
"1"(r0),
"w"(_k00) // %4
: "cc", "memory", "q8", "q9");
#endif // __aarch64__
#else
for (int m = 0; m < 4; m++)
{
output0_tm[m] += r0[m] * ktm[m];
}
r0 += 4;
output0_tm += 4;
#endif // __ARM_NEON
}
#if !__ARM_NEON
ktm += 4;
#endif // __ARM_NEON
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator);
{
// const float otm[6][8] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f}
// };
// 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32
// 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16
// 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8
// 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4
// 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2
// 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6)
#if __ARM_NEON
const float coeff[4] = {4.f, 8.f, 16.f, 32.f};
float32x4_t _coeff = vld1q_f32(coeff);
#endif // __ARM_NEON
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = w_tm / 8 * h_tm / 8;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob_bordered.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
#if __ARM_NEON
float32x2_t _bias0 = vdup_n_f32(bias0);
#endif // __ARM_NEON
float tmp[6][8];
// tile
for (int i = 0; i < outh / 6; i++)
{
for (int j = 0; j < outw / 6; j++)
{
#if __ARM_NEON
const float* output0_tm0_0 = out0_tm.row(i * w_tm / 8 + j);
const float* output0_tm0_4 = out0_tm.row(i * w_tm / 8 + j + tiles);
const float* output0_tm1_0 = out0_tm.row(i * w_tm / 8 + j + tiles * 2);
const float* output0_tm1_4 = out0_tm.row(i * w_tm / 8 + j + tiles * 3);
const float* output0_tm2_0 = out0_tm.row(i * w_tm / 8 + j + tiles * 4);
const float* output0_tm2_4 = out0_tm.row(i * w_tm / 8 + j + tiles * 5);
const float* output0_tm3_0 = out0_tm.row(i * w_tm / 8 + j + tiles * 6);
const float* output0_tm3_4 = out0_tm.row(i * w_tm / 8 + j + tiles * 7);
#if __aarch64__
for (int m = 0; m + 3 < 8; m += 4)
{
float32x4_t _output0_tm0_0123 = vld1q_f32(output0_tm0_0);
float32x4_t _output0_tm0_4567 = vld1q_f32(output0_tm0_4);
float32x4_t _output0_tm1_0123 = vld1q_f32(output0_tm1_0);
float32x4_t _output0_tm1_4567 = vld1q_f32(output0_tm1_4);
float32x4_t _output0_tm2_0123 = vld1q_f32(output0_tm2_0);
float32x4_t _output0_tm2_4567 = vld1q_f32(output0_tm2_4);
float32x4_t _output0_tm3_0123 = vld1q_f32(output0_tm3_0);
float32x4_t _output0_tm3_4567 = vld1q_f32(output0_tm3_4);
float32x4x2_t _output0_tm01_00221133 = vtrnq_f32(_output0_tm0_0123, _output0_tm1_0123);
float32x4x2_t _output0_tm01_44665577 = vtrnq_f32(_output0_tm0_4567, _output0_tm1_4567);
float32x4x2_t _output0_tm23_00221133 = vtrnq_f32(_output0_tm2_0123, _output0_tm3_0123);
float32x4x2_t _output0_tm23_44665577 = vtrnq_f32(_output0_tm2_4567, _output0_tm3_4567);
// no vswp intrinsic :(
float32x4_t _output0_tm_00 = vcombine_f32(vget_low_f32(_output0_tm01_00221133.val[0]), vget_low_f32(_output0_tm23_00221133.val[0]));
float32x4_t _output0_tm_11 = vcombine_f32(vget_low_f32(_output0_tm01_00221133.val[1]), vget_low_f32(_output0_tm23_00221133.val[1]));
float32x4_t _output0_tm_22 = vcombine_f32(vget_high_f32(_output0_tm01_00221133.val[0]), vget_high_f32(_output0_tm23_00221133.val[0]));
float32x4_t _output0_tm_33 = vcombine_f32(vget_high_f32(_output0_tm01_00221133.val[1]), vget_high_f32(_output0_tm23_00221133.val[1]));
float32x4_t _output0_tm_44 = vcombine_f32(vget_low_f32(_output0_tm01_44665577.val[0]), vget_low_f32(_output0_tm23_44665577.val[0]));
float32x4_t _output0_tm_55 = vcombine_f32(vget_low_f32(_output0_tm01_44665577.val[1]), vget_low_f32(_output0_tm23_44665577.val[1]));
float32x4_t _output0_tm_66 = vcombine_f32(vget_high_f32(_output0_tm01_44665577.val[0]), vget_high_f32(_output0_tm23_44665577.val[0]));
float32x4_t _output0_tm_77 = vcombine_f32(vget_high_f32(_output0_tm01_44665577.val[1]), vget_high_f32(_output0_tm23_44665577.val[1]));
float32x4_t _tmp024a = vaddq_f32(_output0_tm_11, _output0_tm_22);
float32x4_t _tmp135a = vsubq_f32(_output0_tm_11, _output0_tm_22);
float32x4_t _tmp024b = vaddq_f32(_output0_tm_33, _output0_tm_44);
float32x4_t _tmp135b = vsubq_f32(_output0_tm_33, _output0_tm_44);
float32x4_t _tmp024c = vaddq_f32(_output0_tm_55, _output0_tm_66);
float32x4_t _tmp135c = vsubq_f32(_output0_tm_55, _output0_tm_66);
float32x4_t _tmp0 = vaddq_f32(_output0_tm_00, _tmp024a);
_tmp0 = vmlaq_lane_f32(_tmp0, _tmp024c, vget_high_f32(_coeff), 1);
_tmp0 = vaddq_f32(_tmp0, _tmp024b);
float32x4_t _tmp2 = vmlaq_lane_f32(_tmp024a, _tmp024b, vget_low_f32(_coeff), 0);
_tmp2 = vmlaq_lane_f32(_tmp2, _tmp024c, vget_low_f32(_coeff), 1);
float32x4_t _tmp4 = vmlaq_lane_f32(_tmp024a, _tmp024b, vget_high_f32(_coeff), 0);
_tmp4 = vaddq_f32(_tmp4, _tmp024c);
_tmp4 = vaddq_f32(_tmp4, _tmp024c);
vst1q_f32(&tmp[0][m], _tmp0);
vst1q_f32(&tmp[2][m], _tmp2);
vst1q_f32(&tmp[4][m], _tmp4);
float32x4_t _tmp1 = vmlaq_lane_f32(_tmp135a, _tmp135c, vget_high_f32(_coeff), 0);
_tmp1 = vaddq_f32(_tmp1, _tmp135b);
_tmp1 = vaddq_f32(_tmp1, _tmp135b);
float32x4_t _tmp3 = vmlaq_lane_f32(_tmp135a, _tmp135b, vget_low_f32(_coeff), 1);
_tmp3 = vmlaq_lane_f32(_tmp3, _tmp135c, vget_low_f32(_coeff), 0);
float32x4_t _tmp5 = vaddq_f32(_output0_tm_77, _tmp135a);
_tmp5 = vmlaq_lane_f32(_tmp5, _tmp135b, vget_high_f32(_coeff), 1);
_tmp5 = vaddq_f32(_tmp5, _tmp135c);
vst1q_f32(&tmp[1][m], _tmp1);
vst1q_f32(&tmp[3][m], _tmp3);
vst1q_f32(&tmp[5][m], _tmp5);
output0_tm0_0 += out0_tm.w * tiles * 2 * 4;
output0_tm0_4 += out0_tm.w * tiles * 2 * 4;
output0_tm1_0 += out0_tm.w * tiles * 2 * 4;
output0_tm1_4 += out0_tm.w * tiles * 2 * 4;
output0_tm2_0 += out0_tm.w * tiles * 2 * 4;
output0_tm2_4 += out0_tm.w * tiles * 2 * 4;
output0_tm3_0 += out0_tm.w * tiles * 2 * 4;
output0_tm3_4 += out0_tm.w * tiles * 2 * 4;
}
const float* t0 = tmp[0];
const float* t1 = tmp[1];
float* output0 = out0.row(i * 6) + j * 6;
float* output1 = output0 + outw;
for (int m = 0; m + 1 < 6; m += 2)
{
float32x4_t _t0_0123 = vld1q_f32(t0);
float32x4_t _t0_4567 = vld1q_f32(t0 + 4);
float32x4_t _t1_0123 = vld1q_f32(t1);
float32x4_t _t1_4567 = vld1q_f32(t1 + 4);
float32x4x2_t _t01_00221133 = vtrnq_f32(_t0_0123, _t1_0123);
float32x4x2_t _t01_44665577 = vtrnq_f32(_t0_4567, _t1_4567);
float32x2_t _t_00 = vget_low_f32(_t01_00221133.val[0]);
float32x2_t _t_11 = vget_low_f32(_t01_00221133.val[1]);
float32x2_t _t_22 = vget_high_f32(_t01_00221133.val[0]);
float32x2_t _t_33 = vget_high_f32(_t01_00221133.val[1]);
float32x2_t _t_44 = vget_low_f32(_t01_44665577.val[0]);
float32x2_t _t_55 = vget_low_f32(_t01_44665577.val[1]);
float32x2_t _t_66 = vget_high_f32(_t01_44665577.val[0]);
float32x2_t _t_77 = vget_high_f32(_t01_44665577.val[1]);
float32x2_t _tmp024a = vadd_f32(_t_11, _t_22);
float32x2_t _tmp135a = vsub_f32(_t_11, _t_22);
float32x2_t _tmp024b = vadd_f32(_t_33, _t_44);
float32x2_t _tmp135b = vsub_f32(_t_33, _t_44);
float32x2_t _tmp024c = vadd_f32(_t_55, _t_66);
float32x2_t _tmp135c = vsub_f32(_t_55, _t_66);
float32x2_t _output_0 = vadd_f32(_t_00, _tmp024a);
_output_0 = vmla_lane_f32(_output_0, _tmp024c, vget_high_f32(_coeff), 1);
_output_0 = vadd_f32(_output_0, _tmp024b);
_output_0 = vadd_f32(_output_0, _bias0);
float32x2_t _output_2 = vmla_lane_f32(_tmp024a, _tmp024b, vget_low_f32(_coeff), 0);
_output_2 = vmla_lane_f32(_output_2, _tmp024c, vget_low_f32(_coeff), 1);
_output_2 = vadd_f32(_output_2, _bias0);
float32x2_t _output_4 = vmla_lane_f32(_tmp024a, _tmp024b, vget_high_f32(_coeff), 0);
_output_4 = vadd_f32(_output_4, _tmp024c);
_output_4 = vadd_f32(_output_4, _tmp024c);
_output_4 = vadd_f32(_output_4, _bias0);
output0[0] = vget_lane_f32(_output_0, 0);
output1[0] = vget_lane_f32(_output_0, 1);
output0[2] = vget_lane_f32(_output_2, 0);
output1[2] = vget_lane_f32(_output_2, 1);
output0[4] = vget_lane_f32(_output_4, 0);
output1[4] = vget_lane_f32(_output_4, 1);
float32x2_t _output_1 = vmla_lane_f32(_tmp135a, _tmp135c, vget_high_f32(_coeff), 0);
_output_1 = vadd_f32(_output_1, _tmp135b);
_output_1 = vadd_f32(_output_1, _tmp135b);
_output_1 = vadd_f32(_output_1, _bias0);
float32x2_t _output_3 = vmla_lane_f32(_tmp135a, _tmp135b, vget_low_f32(_coeff), 1);
_output_3 = vmla_lane_f32(_output_3, _tmp135c, vget_low_f32(_coeff), 0);
_output_3 = vadd_f32(_output_3, _bias0);
float32x2_t _output_5 = vadd_f32(_t_77, _tmp135a);
_output_5 = vmla_lane_f32(_output_5, _tmp135b, vget_high_f32(_coeff), 1);
_output_5 = vadd_f32(_output_5, _tmp135c);
_output_5 = vadd_f32(_output_5, _bias0);
output0[1] = vget_lane_f32(_output_1, 0);
output1[1] = vget_lane_f32(_output_1, 1);
output0[3] = vget_lane_f32(_output_3, 0);
output1[3] = vget_lane_f32(_output_3, 1);
output0[5] = vget_lane_f32(_output_5, 0);
output1[5] = vget_lane_f32(_output_5, 1);
t0 += 8 * 2;
t1 += 8 * 2;
output0 += outw * 2;
output1 += outw * 2;
}
#else // __aarch64__
float* t0 = tmp[0];
float* t1 = tmp[1];
int step = out0_tm.w * tiles * 2 * 4 * 4;
asm volatile(
// loop0
"vld1.f32 {d16-d17}, [%2], %21 \n"
"vld1.f32 {d18-d19}, [%3], %21 \n"
"vld1.f32 {d20-d21}, [%4], %21 \n"
"vld1.f32 {d22-d23}, [%5], %21 \n"
"vld1.f32 {d24-d25}, [%6], %21 \n"
"vld1.f32 {d26-d27}, [%7], %21 \n"
"vld1.f32 {d28-d29}, [%8], %21 \n"
"vld1.f32 {d30-d31}, [%9], %21 \n"
"vtrn.32 q8, q10 \n"
"vtrn.32 q9, q11 \n"
"vtrn.32 q12, q14 \n"
"vtrn.32 q13, q15 \n"
"vswp d17, d24 \n"
"vswp d19, d26 \n"
"vswp d21, d28 \n" // q8 = 00 q9 = 44 q10 = 11 q11 = 55
"vswp d23, d30 \n" // q12 = 22 q13 = 66 q14 = 33 q15 = 77
"vadd.f32 q2, q10, q12 \n"
"vsub.f32 q3, q10, q12 \n"
"vadd.f32 q4, q14, q9 \n"
"vsub.f32 q5, q14, q9 \n"
"vadd.f32 q6, q11, q13 \n"
"vsub.f32 q7, q11, q13 \n" // spare q9 q10 q11 q12 q13 q14
"vmov q9, q3 \n"
"vadd.f32 q8, q8, q2 \n"
"vmla.f32 q9, q7, %f20[0] \n"
"vmov q12, q2 \n"
"vmov q10, q2 \n"
"vmov q11, q3 \n"
"vmla.f32 q12, q4, %f20[0] \n"
"vadd.f32 q15, q15, q3 \n"
"vmla.f32 q8, q6, %f20[1] \n"
"vadd.f32 q9, q9, q5 \n"
"vmla.f32 q10, q4, %e20[0] \n"
"vmla.f32 q11, q5, %e20[1] \n"
"vadd.f32 q12, q12, q6 \n"
"vmla.f32 q15, q5, %f20[1] \n"
"vadd.f32 q8, q8, q4 \n"
"vadd.f32 q9, q9, q5 \n"
"vmla.f32 q10, q6, %e20[1] \n"
"vmla.f32 q11, q7, %e20[0] \n"
"vadd.f32 q12, q12, q6 \n"
"vadd.f32 q15, q15, q7 \n"
"vst1.f32 {d16-d17}, [%0] \n"
"add %0, %0, #64 \n"
"vst1.f32 {d18-d19}, [%1] \n"
"add %1, %1, #64 \n"
"vst1.f32 {d20-d21}, [%0] \n"
"add %0, %0, #64 \n"
"vst1.f32 {d22-d23}, [%1] \n"
"add %1, %1, #64 \n"
"vst1.f32 {d24-d25}, [%0] \n"
"sub %0, %0, #112 \n"
"vst1.f32 {d30-d31}, [%1] \n"
"sub %1, %1, #112 \n"
// loop1
"vld1.f32 {d16-d17}, [%2] \n"
"vld1.f32 {d18-d19}, [%3] \n"
"vld1.f32 {d20-d21}, [%4] \n"
"vld1.f32 {d22-d23}, [%5] \n"
"vld1.f32 {d24-d25}, [%6] \n"
"vld1.f32 {d26-d27}, [%7] \n"
"vld1.f32 {d28-d29}, [%8] \n"
"vld1.f32 {d30-d31}, [%9] \n"
"vtrn.32 q8, q10 \n"
"vtrn.32 q9, q11 \n"
"vtrn.32 q12, q14 \n"
"vtrn.32 q13, q15 \n"
"vswp d17, d24 \n"
"vswp d19, d26 \n"
"vswp d21, d28 \n" // q8 = 00 q9 = 44 q10 = 11 q11 = 55
"vswp d23, d30 \n" // q12 = 22 q13 = 66 q14 = 33 q15 = 77
"vadd.f32 q2, q10, q12 \n"
"vsub.f32 q3, q10, q12 \n"
"vadd.f32 q4, q14, q9 \n"
"vsub.f32 q5, q14, q9 \n"
"vadd.f32 q6, q11, q13 \n"
"vsub.f32 q7, q11, q13 \n" // spare q9 q10 q11 q12 q13 q14
"vmov q9, q3 \n"
"vadd.f32 q8, q8, q2 \n"
"vmla.f32 q9, q7, %f20[0] \n"
"vmov q12, q2 \n"
"vmov q10, q2 \n"
"vmov q11, q3 \n"
"vmla.f32 q12, q4, %f20[0] \n"
"vadd.f32 q15, q15, q3 \n"
"vmla.f32 q8, q6, %f20[1] \n"
"vadd.f32 q9, q9, q5 \n"
"vmla.f32 q10, q4, %e20[0] \n"
"vmla.f32 q11, q5, %e20[1] \n"
"vadd.f32 q12, q12, q6 \n"
"vmla.f32 q15, q5, %f20[1] \n"
"vadd.f32 q8, q8, q4 \n"
"vadd.f32 q9, q9, q5 \n"
"vmla.f32 q10, q6, %e20[1] \n"
"vmla.f32 q11, q7, %e20[0] \n"
"vadd.f32 q12, q12, q6 \n"
"vadd.f32 q15, q15, q7 \n"
"vst1.f32 {d16-d17}, [%0] \n"
"add %0, %0, #64 \n"
"vst1.f32 {d18-d19}, [%1] \n"
"add %1, %1, #64 \n"
"vst1.f32 {d20-d21}, [%0] \n"
"add %0, %0, #64 \n"
"vst1.f32 {d22-d23}, [%1] \n"
"add %1, %1, #64 \n"
"vst1.f32 {d24-d25}, [%0] \n"
"vst1.f32 {d30-d31}, [%1] \n"
: "=r"(t0), // %0
"=r"(t1), // %1
"=r"(output0_tm0_0), // %2
"=r"(output0_tm0_4), // %3
"=r"(output0_tm1_0), // %4
"=r"(output0_tm1_4), // %5
"=r"(output0_tm2_0), // %6
"=r"(output0_tm2_4), // %7
"=r"(output0_tm3_0), // %8
"=r"(output0_tm3_4) // %9
: "0"(t0),
"1"(t1),
"2"(output0_tm0_0),
"3"(output0_tm0_4),
"4"(output0_tm1_0),
"5"(output0_tm1_4),
"6"(output0_tm2_0),
"7"(output0_tm2_4),
"8"(output0_tm3_0),
"9"(output0_tm3_4),
"w"(_coeff), // %20
"r"(step) // %21
: "memory", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
t0 = tmp[0];
t1 = tmp[1];
float* output0 = out0.row(i * 6) + j * 6;
float* output1 = output0 + outw;
int stepw = outw * 2 * 4;
asm volatile(
// loop0
"vld1.f32 {d16-d19}, [%2] \n"
"vld1.f32 {d20-d23}, [%3] \n"
"add %2, %2, #64 \n"
"add %3, %3, #64 \n"
"vtrn.32 q8, q10 \n" // q8 = 0 2 q10 = 1 3
"vtrn.32 q9, q11 \n" // q9 = 4 6 q11 = 5 7
"vadd.f32 d4, d20, d17 \n"
"vsub.f32 d5, d20, d17 \n"
"vadd.f32 d6, d21, d18 \n"
"vsub.f32 d7, d21, d18 \n"
"vadd.f32 d8, d22, d19 \n"
"vsub.f32 d9, d22, d19 \n" // spare d17 ~ d22
"vmov d20, d5 \n"
"vmov d18, d4 \n"
"vadd.f32 d16, d16, d4 \n"
"vmla.f32 d20, d9, %f8[0] \n"
"vmov d17, d4 \n"
"vmov d21, d5 \n"
"vmla.f32 d18, d6, %f8[0] \n"
"vadd.f32 d22, d23, d5 \n"
"vmla.f32 d16, d8, %f8[1] \n"
"vadd.f32 d20, d20, d7 \n"
"vmla.f32 d17, d6, %e8[0] \n"
"vmla.f32 d21, d7, %e8[1] \n"
"vadd.f32 d18, d18, d8 \n"
"vmla.f32 d22, d7, %f8[1] \n"
"vadd.f32 d16, d16, d6 \n"
"vadd.f32 d20, d20, d7 \n"
"vmla.f32 d17, d8, %e8[1] \n"
"vmla.f32 d21, d9, %e8[0] \n"
"vadd.f32 d18, d18, d8 \n"
"vadd.f32 d22, d22, d9 \n"
"vadd.f32 d16, d16, %P9 \n" // _bias0
"vadd.f32 d20, d20, %P9 \n" // _bias0
"vadd.f32 d17, d17, %P9 \n" // _bias0
"vadd.f32 d21, d21, %P9 \n" // _bias0
"vadd.f32 d18, d18, %P9 \n" // _bias0
"vadd.f32 d22, d22, %P9 \n" // _bias0
"vtrn.f32 q8, q10 \n"
"vtrn.f32 d18, d22 \n"
"vst1.f32 {d16-d18}, [%0], %10 \n"
"vst1.f32 {d20-d22}, [%1], %10 \n"
// loop1
"vld1.f32 {d16-d19}, [%2] \n"
"vld1.f32 {d20-d23}, [%3] \n"
"add %2, %2, #64 \n"
"add %3, %3, #64 \n"
"vtrn.32 q8, q10 \n" // q8 = 0 2 q10 = 1 3
"vtrn.32 q9, q11 \n" // q9 = 4 6 q11 = 5 7
"vadd.f32 d4, d20, d17 \n"
"vsub.f32 d5, d20, d17 \n"
"vadd.f32 d6, d21, d18 \n"
"vsub.f32 d7, d21, d18 \n"
"vadd.f32 d8, d22, d19 \n"
"vsub.f32 d9, d22, d19 \n" // spare d17 ~ d22
"vmov d20, d5 \n"
"vmov d18, d4 \n"
"vadd.f32 d16, d16, d4 \n"
"vmla.f32 d20, d9, %f8[0] \n"
"vmov d17, d4 \n"
"vmov d21, d5 \n"
"vmla.f32 d18, d6, %f8[0] \n"
"vadd.f32 d22, d23, d5 \n"
"vmla.f32 d16, d8, %f8[1] \n"
"vadd.f32 d20, d20, d7 \n"
"vmla.f32 d17, d6, %e8[0] \n"
"vmla.f32 d21, d7, %e8[1] \n"
"vadd.f32 d18, d18, d8 \n"
"vmla.f32 d22, d7, %f8[1] \n"
"vadd.f32 d16, d16, d6 \n"
"vadd.f32 d20, d20, d7 \n"
"vmla.f32 d17, d8, %e8[1] \n"
"vmla.f32 d21, d9, %e8[0] \n"
"vadd.f32 d18, d18, d8 \n"
"vadd.f32 d22, d22, d9 \n"
"vadd.f32 d16, d16, %P9 \n" // _bias0
"vadd.f32 d20, d20, %P9 \n" // _bias0
"vadd.f32 d17, d17, %P9 \n" // _bias0
"vadd.f32 d21, d21, %P9 \n" // _bias0
"vadd.f32 d18, d18, %P9 \n" // _bias0
"vadd.f32 d22, d22, %P9 \n" // _bias0
"vtrn.f32 q8, q10 \n"
"vtrn.f32 d18, d22 \n"
"vst1.f32 {d16-d18}, [%0], %10 \n"
"vst1.f32 {d20-d22}, [%1], %10 \n"
// loop2
"vld1.f32 {d16-d19}, [%2] \n"
"vld1.f32 {d20-d23}, [%3] \n"
"add %2, %2, #64 \n"
"add %3, %3, #64 \n"
"vtrn.32 q8, q10 \n" // q8 = 0 2 q10 = 1 3
"vtrn.32 q9, q11 \n" // q9 = 4 6 q11 = 5 7
"vadd.f32 d4, d20, d17 \n"
"vsub.f32 d5, d20, d17 \n"
"vadd.f32 d6, d21, d18 \n"
"vsub.f32 d7, d21, d18 \n"
"vadd.f32 d8, d22, d19 \n"
"vsub.f32 d9, d22, d19 \n" // spare d17 ~ d22
"vmov d20, d5 \n"
"vmov d18, d4 \n"
"vadd.f32 d16, d16, d4 \n"
"vmla.f32 d20, d9, %f8[0] \n"
"vmov d17, d4 \n"
"vmov d21, d5 \n"
"vmla.f32 d18, d6, %f8[0] \n"
"vadd.f32 d22, d23, d5 \n"
"vmla.f32 d16, d8, %f8[1] \n"
"vadd.f32 d20, d20, d7 \n"
"vmla.f32 d17, d6, %e8[0] \n"
"vmla.f32 d21, d7, %e8[1] \n"
"vadd.f32 d18, d18, d8 \n"
"vmla.f32 d22, d7, %f8[1] \n"
"vadd.f32 d16, d16, d6 \n"
"vadd.f32 d20, d20, d7 \n"
"vmla.f32 d17, d8, %e8[1] \n"
"vmla.f32 d21, d9, %e8[0] \n"
"vadd.f32 d18, d18, d8 \n"
"vadd.f32 d22, d22, d9 \n"
"vadd.f32 d16, d16, %P9 \n" // _bias0
"vadd.f32 d20, d20, %P9 \n" // _bias0
"vadd.f32 d17, d17, %P9 \n" // _bias0
"vadd.f32 d21, d21, %P9 \n" // _bias0
"vadd.f32 d18, d18, %P9 \n" // _bias0
"vadd.f32 d22, d22, %P9 \n" // _bias0
"vtrn.f32 q8, q10 \n"
"vtrn.f32 d18, d22 \n"
"vst1.f32 {d16-d18}, [%0], %10 \n"
"vst1.f32 {d20-d22}, [%1], %10 \n"
: "=r"(output0), // %0
"=r"(output1), // %1
"=r"(t0), // %2
"=r"(t1) // %3
: "0"(output0),
"1"(output1),
"2"(t0),
"3"(t1),
"w"(_coeff), // %8
"w"(_bias0), // %9
"r"(stepw) // %10
: "memory", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
#else
const float* output0_tm_0 = out0_tm.row(i * w_tm / 8 + j);
const float* output0_tm_4 = out0_tm.row(i * w_tm / 8 + j + tiles);
for (int m = 0; m < 8; m++)
{
float tmp024a = output0_tm_0[1] + output0_tm_0[2];
float tmp135a = output0_tm_0[1] - output0_tm_0[2];
float tmp024b = output0_tm_0[3] + output0_tm_4[0];
float tmp135b = output0_tm_0[3] - output0_tm_4[0];
float tmp024c = output0_tm_4[1] + output0_tm_4[2];
float tmp135c = output0_tm_4[1] - output0_tm_4[2];
tmp[0][m] = output0_tm_0[0] + tmp024a + tmp024b + tmp024c * 32;
tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8;
tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c;
tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16;
tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4;
tmp[5][m] = output0_tm_4[3] + tmp135a + tmp135b * 32 + tmp135c;
output0_tm_0 += out0_tm.w * tiles * 2;
output0_tm_4 += out0_tm.w * tiles * 2;
}
float* output0 = out0.row(i * 6) + j * 6;
for (int m = 0; m < 6; m++)
{
const float* tmp0 = tmp[m];
float tmp024a = tmp0[1] + tmp0[2];
float tmp135a = tmp0[1] - tmp0[2];
float tmp024b = tmp0[3] + tmp0[4];
float tmp135b = tmp0[3] - tmp0[4];
float tmp024c = tmp0[5] + tmp0[6];
float tmp135c = tmp0[5] - tmp0[6];
output0[0] = bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32;
output0[2] = bias0 + tmp024a + tmp024b * 4 + tmp024c * 8;
output0[4] = bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c;
output0[1] = bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16;
output0[3] = bias0 + tmp135a + tmp135b * 8 + tmp135c * 4;
output0[5] = bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c;
output0 += outw;
}
#endif // __ARM_NEON
}
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
static void conv3x3s1_winograd64_neon5(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 6n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 5) / 6 * 6;
outh = (outh + 5) / 6 * 6;
w = outw + 2;
h = outh + 2;
Option opt_b = opt;
opt_b.blob_allocator = opt.workspace_allocator;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt_b);
const float* bias = _bias;
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = w_tm / 8 * h_tm / 8;
bottom_blob_tm.create(1, 64 * tiles, inch, 4u, opt.workspace_allocator);
// bottom_blob_tm.create(inch, tiles, 64);
// const float itm[8][8] = {
// {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f},
//
// {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f},
// {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f},
//
// {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f},
// {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f},
//
// {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f},
// {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f},
//
// {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f}
// };
// 0 = r00 - r06 + (r04 - r02) * 5.25
// 7 = r07 - r01 + (r03 - r05) * 5.25
// 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05)
// 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05)
// 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// reuse r04 * 1.25
// reuse r03 * 2.5
// 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5)
// 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5)
#if __ARM_NEON
const float coeff[8] = {
0.25f, 0.5f, -1.25f, 2.f,
-2.5f, 4.f, 4.25f, 5.25f
};
float32x4_t _coeff0 = vld1q_f32(coeff);
float32x4_t _coeff1 = vld1q_f32(coeff + 4);
#endif // __ARM_NEON
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const Mat img0 = bottom_blob_bordered.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
float tmp[8][8];
// tile
for (int i = 0; i < h_tm / 8; i++)
{
for (int j = 0; j < w_tm / 8; j++)
{
#if __ARM_NEON
const float* r0 = img0.row(i * 6) + j * 6;
const float* r1 = r0 + w;
const float* r2 = r0 + w * 2;
const float* r3 = r0 + w * 3;
#if __aarch64__
for (int m = 0; m + 3 < 8; m += 4)
{
float32x4_t _r0_0123 = vld1q_f32(r0);
float32x4_t _r0_4567 = vld1q_f32(r0 + 4);
float32x4_t _r1_0123 = vld1q_f32(r1);
float32x4_t _r1_4567 = vld1q_f32(r1 + 4);
float32x4_t _r2_0123 = vld1q_f32(r2);
float32x4_t _r2_4567 = vld1q_f32(r2 + 4);
float32x4_t _r3_0123 = vld1q_f32(r3);
float32x4_t _r3_4567 = vld1q_f32(r3 + 4);
float32x4x2_t _r01_00221133 = vtrnq_f32(_r0_0123, _r1_0123);
float32x4x2_t _r01_44665577 = vtrnq_f32(_r0_4567, _r1_4567);
float32x4x2_t _r23_00221133 = vtrnq_f32(_r2_0123, _r3_0123);
float32x4x2_t _r23_44665577 = vtrnq_f32(_r2_4567, _r3_4567);
// no vswp intrinsic :(
float32x4_t _r_00 = vcombine_f32(vget_low_f32(_r01_00221133.val[0]), vget_low_f32(_r23_00221133.val[0]));
float32x4_t _r_11 = vcombine_f32(vget_low_f32(_r01_00221133.val[1]), vget_low_f32(_r23_00221133.val[1]));
float32x4_t _r_22 = vcombine_f32(vget_high_f32(_r01_00221133.val[0]), vget_high_f32(_r23_00221133.val[0]));
float32x4_t _r_33 = vcombine_f32(vget_high_f32(_r01_00221133.val[1]), vget_high_f32(_r23_00221133.val[1]));
float32x4_t _r_44 = vcombine_f32(vget_low_f32(_r01_44665577.val[0]), vget_low_f32(_r23_44665577.val[0]));
float32x4_t _r_55 = vcombine_f32(vget_low_f32(_r01_44665577.val[1]), vget_low_f32(_r23_44665577.val[1]));
float32x4_t _r_66 = vcombine_f32(vget_high_f32(_r01_44665577.val[0]), vget_high_f32(_r23_44665577.val[0]));
float32x4_t _r_77 = vcombine_f32(vget_high_f32(_r01_44665577.val[1]), vget_high_f32(_r23_44665577.val[1]));
float32x4_t _r_0_m_6 = vsubq_f32(_r_00, _r_66);
float32x4_t _r_7_m_1 = vsubq_f32(_r_77, _r_11);
float32x4_t _r_4_m_2 = vsubq_f32(_r_44, _r_22);
float32x4_t _r_3_m_5 = vsubq_f32(_r_33, _r_55);
float32x4_t _tmp0 = vmlaq_lane_f32(_r_0_m_6, _r_4_m_2, vget_high_f32(_coeff1), 1);
float32x4_t _tmp7 = vmlaq_lane_f32(_r_7_m_1, _r_3_m_5, vget_high_f32(_coeff1), 1);
vst1q_f32(&tmp[0][m], _tmp0);
vst1q_f32(&tmp[7][m], _tmp7);
float32x4_t _r_2_a_6 = vaddq_f32(_r_22, _r_66);
float32x4_t _r_1_a_5 = vaddq_f32(_r_11, _r_55);
float32x4_t _tmp12a = vmlsq_lane_f32(_r_2_a_6, _r_44, vget_high_f32(_coeff1), 0);
float32x4_t _tmp12b = vmlsq_lane_f32(_r_1_a_5, _r_33, vget_high_f32(_coeff1), 0);
float32x4_t _tmp1 = vaddq_f32(_tmp12a, _tmp12b);
float32x4_t _tmp2 = vsubq_f32(_tmp12a, _tmp12b);
vst1q_f32(&tmp[1][m], _tmp1);
vst1q_f32(&tmp[2][m], _tmp2);
float32x4_t _r_4_x_c = vmulq_lane_f32(_r_44, vget_high_f32(_coeff0), 0);
float32x4_t _r_3_x_c = vmulq_lane_f32(_r_33, vget_low_f32(_coeff1), 0);
float32x4_t _tmp34a = vaddq_f32(_r_66, _r_4_x_c);
_tmp34a = vmlaq_lane_f32(_tmp34a, _r_22, vget_low_f32(_coeff0), 0);
float32x4_t _tmp34b = vmlaq_lane_f32(_r_3_x_c, _r_11, vget_low_f32(_coeff0), 1);
_tmp34b = vmlaq_lane_f32(_tmp34b, _r_55, vget_high_f32(_coeff0), 1);
float32x4_t _tmp3 = vaddq_f32(_tmp34a, _tmp34b);
float32x4_t _tmp4 = vsubq_f32(_tmp34a, _tmp34b);
vst1q_f32(&tmp[3][m], _tmp3);
vst1q_f32(&tmp[4][m], _tmp4);
// reuse r04 * 1.25
// reuse r03 * 2.5
float32x4_t _r_2_a_4c = vaddq_f32(_r_22, _r_4_x_c);
float32x4_t _tmp56a = vmlaq_lane_f32(_r_66, _r_2_a_4c, vget_low_f32(_coeff1), 1);
float32x4_t _tmp56b = vmlaq_lane_f32(_r_3_x_c, _r_11, vget_high_f32(_coeff0), 1);
_tmp56b = vmlaq_lane_f32(_tmp56b, _r_55, vget_low_f32(_coeff0), 1);
float32x4_t _tmp5 = vaddq_f32(_tmp56a, _tmp56b);
float32x4_t _tmp6 = vsubq_f32(_tmp56a, _tmp56b);
vst1q_f32(&tmp[5][m], _tmp5);
vst1q_f32(&tmp[6][m], _tmp6);
r0 += w * 4;
r1 += w * 4;
r2 += w * 4;
r3 += w * 4;
}
const float* t0 = tmp[0];
const float* t1 = tmp[1];
const float* t2 = tmp[2];
const float* t3 = tmp[3];
float* r0_tm0 = img0_tm.row(i * w_tm / 8 + j);
float* r0_tm1 = img0_tm.row(i * w_tm / 8 + j + tiles * 8);
float* r0_tm2 = img0_tm.row(i * w_tm / 8 + j + tiles * 16);
float* r0_tm3 = img0_tm.row(i * w_tm / 8 + j + tiles * 24);
for (int m = 0; m + 3 < 8; m += 4)
{
float32x4_t _t0_0123 = vld1q_f32(t0);
float32x4_t _t0_4567 = vld1q_f32(t0 + 4);
float32x4_t _t1_0123 = vld1q_f32(t1);
float32x4_t _t1_4567 = vld1q_f32(t1 + 4);
float32x4_t _t2_0123 = vld1q_f32(t2);
float32x4_t _t2_4567 = vld1q_f32(t2 + 4);
float32x4_t _t3_0123 = vld1q_f32(t3);
float32x4_t _t3_4567 = vld1q_f32(t3 + 4);
float32x4x2_t _t01_00221133 = vtrnq_f32(_t0_0123, _t1_0123);
float32x4x2_t _t01_44665577 = vtrnq_f32(_t0_4567, _t1_4567);
float32x4x2_t _t23_00221133 = vtrnq_f32(_t2_0123, _t3_0123);
float32x4x2_t _t23_44665577 = vtrnq_f32(_t2_4567, _t3_4567);
// no vswp intrinsic :(
float32x4_t _t_00 = vcombine_f32(vget_low_f32(_t01_00221133.val[0]), vget_low_f32(_t23_00221133.val[0]));
float32x4_t _t_11 = vcombine_f32(vget_low_f32(_t01_00221133.val[1]), vget_low_f32(_t23_00221133.val[1]));
float32x4_t _t_22 = vcombine_f32(vget_high_f32(_t01_00221133.val[0]), vget_high_f32(_t23_00221133.val[0]));
float32x4_t _t_33 = vcombine_f32(vget_high_f32(_t01_00221133.val[1]), vget_high_f32(_t23_00221133.val[1]));
float32x4_t _t_44 = vcombine_f32(vget_low_f32(_t01_44665577.val[0]), vget_low_f32(_t23_44665577.val[0]));
float32x4_t _t_55 = vcombine_f32(vget_low_f32(_t01_44665577.val[1]), vget_low_f32(_t23_44665577.val[1]));
float32x4_t _t_66 = vcombine_f32(vget_high_f32(_t01_44665577.val[0]), vget_high_f32(_t23_44665577.val[0]));
float32x4_t _t_77 = vcombine_f32(vget_high_f32(_t01_44665577.val[1]), vget_high_f32(_t23_44665577.val[1]));
float32x4_t _t_0_m_6 = vsubq_f32(_t_00, _t_66);
float32x4_t _t_7_m_1 = vsubq_f32(_t_77, _t_11);
float32x4_t _t_4_m_2 = vsubq_f32(_t_44, _t_22);
float32x4_t _t_3_m_5 = vsubq_f32(_t_33, _t_55);
float32x4_t _r0_tm_0_0 = vmlaq_lane_f32(_t_0_m_6, _t_4_m_2, vget_high_f32(_coeff1), 1);
float32x4_t _r0_tm_4_3 = vmlaq_lane_f32(_t_7_m_1, _t_3_m_5, vget_high_f32(_coeff1), 1);
r0_tm0[0] = vgetq_lane_f32(_r0_tm_0_0, 0);
r0_tm1[0] = vgetq_lane_f32(_r0_tm_0_0, 1);
r0_tm2[0] = vgetq_lane_f32(_r0_tm_0_0, 2);
r0_tm3[0] = vgetq_lane_f32(_r0_tm_0_0, 3);
r0_tm0 += img0_tm.w * tiles;
r0_tm1 += img0_tm.w * tiles;
r0_tm2 += img0_tm.w * tiles;
r0_tm3 += img0_tm.w * tiles;
float32x4_t _t_2_m_6 = vaddq_f32(_t_22, _t_66);
float32x4_t _t_1_m_5 = vaddq_f32(_t_11, _t_55);
float32x4_t _tmp12a = vmlsq_lane_f32(_t_2_m_6, _t_44, vget_high_f32(_coeff1), 0);
float32x4_t _tmp12b = vmlsq_lane_f32(_t_1_m_5, _t_33, vget_high_f32(_coeff1), 0);
float32x4_t _r0_tm_0_1 = vaddq_f32(_tmp12a, _tmp12b);
float32x4_t _r0_tm_0_2 = vsubq_f32(_tmp12a, _tmp12b);
r0_tm0[0] = vgetq_lane_f32(_r0_tm_0_1, 0);
r0_tm1[0] = vgetq_lane_f32(_r0_tm_0_1, 1);
r0_tm2[0] = vgetq_lane_f32(_r0_tm_0_1, 2);
r0_tm3[0] = vgetq_lane_f32(_r0_tm_0_1, 3);
r0_tm0 += img0_tm.w * tiles;
r0_tm1 += img0_tm.w * tiles;
r0_tm2 += img0_tm.w * tiles;
r0_tm3 += img0_tm.w * tiles;
r0_tm0[0] = vgetq_lane_f32(_r0_tm_0_2, 0);
r0_tm1[0] = vgetq_lane_f32(_r0_tm_0_2, 1);
r0_tm2[0] = vgetq_lane_f32(_r0_tm_0_2, 2);
r0_tm3[0] = vgetq_lane_f32(_r0_tm_0_2, 3);
r0_tm0 += img0_tm.w * tiles;
r0_tm1 += img0_tm.w * tiles;
r0_tm2 += img0_tm.w * tiles;
r0_tm3 += img0_tm.w * tiles;
float32x4_t _t_4_x_c = vmulq_lane_f32(_t_44, vget_high_f32(_coeff0), 0);
float32x4_t _t_3_x_c = vmulq_lane_f32(_t_33, vget_low_f32(_coeff1), 0);
float32x4_t _tmp34a = vaddq_f32(_t_66, _t_4_x_c);
_tmp34a = vmlaq_lane_f32(_tmp34a, _t_22, vget_low_f32(_coeff0), 0);
float32x4_t _tmp34b = vmlaq_lane_f32(_t_3_x_c, _t_11, vget_low_f32(_coeff0), 1);
_tmp34b = vmlaq_lane_f32(_tmp34b, _t_55, vget_high_f32(_coeff0), 1);
float32x4_t _r0_tm_0_3 = vaddq_f32(_tmp34a, _tmp34b);
float32x4_t _r0_tm_4_0 = vsubq_f32(_tmp34a, _tmp34b);
r0_tm0[0] = vgetq_lane_f32(_r0_tm_0_3, 0);
r0_tm1[0] = vgetq_lane_f32(_r0_tm_0_3, 1);
r0_tm2[0] = vgetq_lane_f32(_r0_tm_0_3, 2);
r0_tm3[0] = vgetq_lane_f32(_r0_tm_0_3, 3);
r0_tm0 += img0_tm.w * tiles;
r0_tm1 += img0_tm.w * tiles;
r0_tm2 += img0_tm.w * tiles;
r0_tm3 += img0_tm.w * tiles;
r0_tm0[0] = vgetq_lane_f32(_r0_tm_4_0, 0);
r0_tm1[0] = vgetq_lane_f32(_r0_tm_4_0, 1);
r0_tm2[0] = vgetq_lane_f32(_r0_tm_4_0, 2);
r0_tm3[0] = vgetq_lane_f32(_r0_tm_4_0, 3);
r0_tm0 += img0_tm.w * tiles;
r0_tm1 += img0_tm.w * tiles;
r0_tm2 += img0_tm.w * tiles;
r0_tm3 += img0_tm.w * tiles;
float32x4_t _t_2_a_4c = vaddq_f32(_t_22, _t_4_x_c);
float32x4_t _tmp56a = vmlaq_lane_f32(_t_66, _t_2_a_4c, vget_low_f32(_coeff1), 1);
float32x4_t _tmp56b = vmlaq_lane_f32(_t_3_x_c, _t_11, vget_high_f32(_coeff0), 1);
_tmp56b = vmlaq_lane_f32(_tmp56b, _t_55, vget_low_f32(_coeff0), 1);
float32x4_t _r0_tm_4_1 = vaddq_f32(_tmp56a, _tmp56b);
float32x4_t _r0_tm_4_2 = vsubq_f32(_tmp56a, _tmp56b);
r0_tm0[0] = vgetq_lane_f32(_r0_tm_4_1, 0);
r0_tm1[0] = vgetq_lane_f32(_r0_tm_4_1, 1);
r0_tm2[0] = vgetq_lane_f32(_r0_tm_4_1, 2);
r0_tm3[0] = vgetq_lane_f32(_r0_tm_4_1, 3);
r0_tm0 += img0_tm.w * tiles;
r0_tm1 += img0_tm.w * tiles;
r0_tm2 += img0_tm.w * tiles;
r0_tm3 += img0_tm.w * tiles;
r0_tm0[0] = vgetq_lane_f32(_r0_tm_4_2, 0);
r0_tm1[0] = vgetq_lane_f32(_r0_tm_4_2, 1);
r0_tm2[0] = vgetq_lane_f32(_r0_tm_4_2, 2);
r0_tm3[0] = vgetq_lane_f32(_r0_tm_4_2, 3);
r0_tm0 += img0_tm.w * tiles;
r0_tm1 += img0_tm.w * tiles;
r0_tm2 += img0_tm.w * tiles;
r0_tm3 += img0_tm.w * tiles;
r0_tm0[0] = vgetq_lane_f32(_r0_tm_4_3, 0);
r0_tm1[0] = vgetq_lane_f32(_r0_tm_4_3, 1);
r0_tm2[0] = vgetq_lane_f32(_r0_tm_4_3, 2);
r0_tm3[0] = vgetq_lane_f32(_r0_tm_4_3, 3);
t0 += 8 * 4;
t1 += 8 * 4;
t2 += 8 * 4;
t3 += 8 * 4;
r0_tm0 += img0_tm.w * tiles * 25;
r0_tm1 += img0_tm.w * tiles * 25;
r0_tm2 += img0_tm.w * tiles * 25;
r0_tm3 += img0_tm.w * tiles * 25;
}
#else // __aarch64__
float* t0 = tmp[0];
float* t1 = tmp[1];
float* t2 = tmp[2];
float* t3 = tmp[3];
int stepw = w * 4 * 4;
asm volatile(
// loop0
"vld1.f32 {d16-d19}, [%4], %18 \n"
"vld1.f32 {d20-d23}, [%5], %18 \n"
"vld1.f32 {d24-d27}, [%6], %18 \n"
"vtrn.32 q8, q10 \n"
"vld1.f32 {d28-d31}, [%7], %18 \n"
"vtrn.32 q9, q11 \n"
"vtrn.32 q12, q14 \n"
"vtrn.32 q13, q15 \n"
"vswp d17, d24 \n"
"vswp d19, d26 \n"
"vswp d21, d28 \n" // q8 = 00 q9 = 44 q10 = 11 q11 = 55
"vswp d23, d30 \n" // q12 = 22 q13 = 66 q14 = 33 q15 = 77
"vsub.f32 q2, q8, q13 \n"
"vsub.f32 q3, q9, q12 \n"
"vadd.f32 q4, q12, q13 \n"
"vadd.f32 q5, q10, q11 \n"
"vmla.f32 q2, q3, %f17[1] \n"
"vmul.f32 q7, q14, %e17[0] \n" // q7 = _r_3_x_c
"vmul.f32 q6, q9, %f16[0] \n" // q6 = _r_4_x_c
"vmls.f32 q4, q9, %f17[0] \n"
"vmls.f32 q5, q14, %f17[0] \n"
"vst1.f32 {d4-d5}, [%0] \n" // tmp[0][m]
"add %0, %0, #128 \n"
"vmov q3, q7 \n" // use q7
"vadd.f32 q2, q13, q6 \n" // use q6
"vmla.f32 q3, q10, %e16[1] \n"
"vadd.f32 q8, q4, q5 \n"
"vsub.f32 q9, q4, q5 \n"
"vmov q5, q7 \n" // use q7
"vadd.f32 q6, q12, q6 \n" // use q6
"vmla.f32 q5, q10, %f16[1] \n"
"vmov q4, q13 \n"
"vmla.f32 q2, q12, %e16[0] \n"
"vmla.f32 q3, q11, %f16[1] \n"
"vst1.f32 {d16-d17}, [%1] \n" // tmp[1][m]
"add %1, %1, #128 \n"
"vmla.f32 q4, q6, %e17[1] \n"
"vmla.f32 q5, q11, %e16[1] \n"
"vst1.f32 {d18-d19}, [%2] \n" // tmp[2][m]
"add %2, %2, #128 \n"
"vadd.f32 q8, q2, q3 \n"
"vsub.f32 q9, q2, q3 \n"
"vsub.f32 q6, q15, q10 \n"
"vsub.f32 q7, q14, q11 \n"
"vadd.f32 q2, q4, q5 \n"
"vsub.f32 q3, q4, q5 \n"
"vst1.f32 {d16-d17}, [%3] \n" // tmp[3][m]
"add %3, %3, #128 \n"
"vst1.f32 {d18-d19}, [%0] \n" // tmp[4][m]
"sub %0, %0, #112 \n"
"vmla.f32 q6, q7, %f17[1] \n"
"vst1.f32 {d4-d5}, [%1] \n" // tmp[5][m]
"sub %1, %1, #112 \n"
"vst1.f32 {d6-d7}, [%2] \n" // tmp[6][m]
"sub %2, %2, #112 \n"
"vst1.f32 {d12-d13}, [%3] \n" // tmp[7][m]
"sub %3, %3, #112 \n"
// loop1
"vld1.f32 {d16-d19}, [%4] \n"
"vld1.f32 {d20-d23}, [%5] \n"
"vld1.f32 {d24-d27}, [%6] \n"
"vtrn.32 q8, q10 \n"
"vld1.f32 {d28-d31}, [%7] \n"
"vtrn.32 q9, q11 \n"
"vtrn.32 q12, q14 \n"
"vtrn.32 q13, q15 \n"
"vswp d17, d24 \n"
"vswp d19, d26 \n"
"vswp d21, d28 \n" // q8 = 00 q9 = 44 q10 = 11 q11 = 55
"vswp d23, d30 \n" // q12 = 22 q13 = 66 q14 = 33 q15 = 77
"vsub.f32 q2, q8, q13 \n"
"vsub.f32 q3, q9, q12 \n"
"vadd.f32 q4, q12, q13 \n"
"vadd.f32 q5, q10, q11 \n"
"vmla.f32 q2, q3, %f17[1] \n"
"vmul.f32 q7, q14, %e17[0] \n" // q7 = _r_3_x_c
"vmul.f32 q6, q9, %f16[0] \n" // q6 = _r_4_x_c
"vmls.f32 q4, q9, %f17[0] \n"
"vmls.f32 q5, q14, %f17[0] \n"
"vst1.f32 {d4-d5}, [%0] \n" // tmp[0][m]
"add %0, %0, #128 \n"
"vmov q3, q7 \n" // use q7
"vadd.f32 q2, q13, q6 \n" // use q6
"vmla.f32 q3, q10, %e16[1] \n"
"vadd.f32 q8, q4, q5 \n"
"vsub.f32 q9, q4, q5 \n"
"vmov q5, q7 \n" // use q7
"vadd.f32 q6, q12, q6 \n" // use q6
"vmla.f32 q5, q10, %f16[1] \n"
"vmov q4, q13 \n"
"vmla.f32 q2, q12, %e16[0] \n"
"vmla.f32 q3, q11, %f16[1] \n"
"vst1.f32 {d16-d17}, [%1] \n" // tmp[1][m]
"add %1, %1, #128 \n"
"vmla.f32 q4, q6, %e17[1] \n"
"vmla.f32 q5, q11, %e16[1] \n"
"vst1.f32 {d18-d19}, [%2] \n" // tmp[2][m]
"add %2, %2, #128 \n"
"vadd.f32 q8, q2, q3 \n"
"vsub.f32 q9, q2, q3 \n"
"vsub.f32 q6, q15, q10 \n"
"vsub.f32 q7, q14, q11 \n"
"vadd.f32 q2, q4, q5 \n"
"vsub.f32 q3, q4, q5 \n"
"vst1.f32 {d16-d17}, [%3] \n" // tmp[3][m]
"add %3, %3, #128 \n"
"vst1.f32 {d18-d19}, [%0] \n" // tmp[4][m]
"vmla.f32 q6, q7, %f17[1] \n"
"vst1.f32 {d4-d5}, [%1] \n" // tmp[5][m]
"vst1.f32 {d6-d7}, [%2] \n" // tmp[6][m]
"vst1.f32 {d12-d13}, [%3] \n" // tmp[7][m]
: "=r"(t0), // %0
"=r"(t1), // %1
"=r"(t2), // %2
"=r"(t3), // %3
"=r"(r0), // %4
"=r"(r1), // %5
"=r"(r2), // %6
"=r"(r3) // %7
: "0"(t0),
"1"(t1),
"2"(t2),
"3"(t3),
"4"(r0),
"5"(r1),
"6"(r2),
"7"(r3),
"w"(_coeff0), // %16
"w"(_coeff1), // %17
"r"(stepw) // %18
: "memory", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
t0 = tmp[0];
t1 = tmp[1];
t2 = tmp[2];
t3 = tmp[3];
float* r0_tm0_0 = img0_tm.row(i * w_tm / 8 + j);
float* r0_tm1_0 = img0_tm.row(i * w_tm / 8 + j + tiles * 8);
float* r0_tm2_0 = img0_tm.row(i * w_tm / 8 + j + tiles * 16);
float* r0_tm3_0 = img0_tm.row(i * w_tm / 8 + j + tiles * 24);
int step = img0_tm.w * tiles * 4;
int step2 = img0_tm.w * tiles * 25 * 4;
asm volatile(
// loop0
"vld1.f32 {d16-d19}, [%4] \n"
"add %4, %4, #128 \n"
"vld1.f32 {d20-d23}, [%5] \n"
"add %5, %5, #128 \n"
"vld1.f32 {d24-d27}, [%6] \n"
"add %6, %6, #128 \n"
"vtrn.32 q8, q10 \n"
"vld1.f32 {d28-d31}, [%7] \n"
"add %7, %7, #128 \n"
"vtrn.32 q9, q11 \n"
"vtrn.32 q12, q14 \n"
"vtrn.32 q13, q15 \n"
"vswp d17, d24 \n"
"vswp d19, d26 \n"
"vswp d21, d28 \n" // q8 = 00 q9 = 44 q10 = 11 q11 = 55
"vswp d23, d30 \n" // q12 = 22 q13 = 66 q14 = 33 q15 = 77
"vsub.f32 q2, q8, q13 \n"
"vsub.f32 q3, q9, q12 \n"
"vadd.f32 q4, q12, q13 \n"
"vadd.f32 q5, q10, q11 \n"
"vmla.f32 q2, q3, %f17[1] \n"
"vmul.f32 q7, q14, %e17[0] \n" // q7 = _r_3_x_c
"vmul.f32 q6, q9, %f16[0] \n" // q6 = _r_4_x_c
"vmls.f32 q4, q9, %f17[0] \n"
"vmls.f32 q5, q14, %f17[0] \n"
"vst1.f32 {d4[0]}, [%0], %18 \n"
"vst1.f32 {d4[1]}, [%1], %18 \n"
"vmov q3, q7 \n" // use q7
"vst1.f32 {d5[0]}, [%2], %18 \n"
"vst1.f32 {d5[1]}, [%3], %18 \n"
"vadd.f32 q2, q13, q6 \n" // use q6
"vmla.f32 q3, q10, %e16[1] \n"
"vadd.f32 q8, q4, q5 \n"
"vsub.f32 q9, q4, q5 \n"
"vmov q5, q7 \n" // use q7
"vadd.f32 q6, q12, q6 \n" // use q6
"vmla.f32 q5, q10, %f16[1] \n"
"vmov q4, q13 \n"
"vmla.f32 q2, q12, %e16[0] \n"
"vmla.f32 q3, q11, %f16[1] \n"
"vst1.f32 {d16[0]}, [%0], %18 \n"
"vst1.f32 {d16[1]}, [%1], %18 \n"
"vmla.f32 q4, q6, %e17[1] \n"
"vst1.f32 {d17[0]}, [%2], %18 \n"
"vst1.f32 {d17[1]}, [%3], %18 \n"
"vmla.f32 q5, q11, %e16[1] \n"
"vst1.f32 {d18[0]}, [%0], %18 \n"
"vst1.f32 {d18[1]}, [%1], %18 \n"
"vadd.f32 q8, q2, q3 \n"
"vst1.f32 {d19[0]}, [%2], %18 \n"
"vst1.f32 {d19[1]}, [%3], %18 \n"
"vsub.f32 q9, q2, q3 \n"
"vsub.f32 q6, q15, q10 \n"
"vsub.f32 q7, q14, q11 \n"
"vst1.f32 {d16[0]}, [%0], %18 \n"
"vst1.f32 {d16[1]}, [%1], %18 \n"
"vst1.f32 {d17[0]}, [%2], %18 \n"
"vst1.f32 {d17[1]}, [%3], %18 \n"
"vadd.f32 q2, q4, q5 \n"
"vst1.f32 {d18[0]}, [%0], %18 \n"
"vst1.f32 {d18[1]}, [%1], %18 \n"
"vst1.f32 {d19[0]}, [%2], %18 \n"
"vst1.f32 {d19[1]}, [%3], %18 \n"
"vsub.f32 q3, q4, q5 \n"
"vst1.f32 {d4[0]}, [%0], %18 \n"
"vst1.f32 {d4[1]}, [%1], %18 \n"
"vst1.f32 {d5[0]}, [%2], %18 \n"
"vst1.f32 {d5[1]}, [%3], %18 \n"
"vmla.f32 q6, q7, %f17[1] \n"
"vst1.f32 {d6[0]}, [%0], %18 \n"
"vst1.f32 {d6[1]}, [%1], %18 \n"
"vst1.f32 {d7[0]}, [%2], %18 \n"
"vst1.f32 {d7[1]}, [%3], %18 \n"
"vst1.f32 {d12[0]}, [%0], %19 \n"
"vst1.f32 {d12[1]}, [%1], %19 \n"
"vst1.f32 {d13[0]}, [%2], %19 \n"
"vst1.f32 {d13[1]}, [%3], %19 \n"
// loop1
"vld1.f32 {d16-d19}, [%4] \n"
"vld1.f32 {d20-d23}, [%5] \n"
"vld1.f32 {d24-d27}, [%6] \n"
"vtrn.32 q8, q10 \n"
"vld1.f32 {d28-d31}, [%7] \n"
"vtrn.32 q9, q11 \n"
"vtrn.32 q12, q14 \n"
"vtrn.32 q13, q15 \n"
"vswp d17, d24 \n"
"vswp d19, d26 \n"
"vswp d21, d28 \n" // q8 = 00 q9 = 44 q10 = 11 q11 = 55
"vswp d23, d30 \n" // q12 = 22 q13 = 66 q14 = 33 q15 = 77
"vsub.f32 q2, q8, q13 \n"
"vsub.f32 q3, q9, q12 \n"
"vadd.f32 q4, q12, q13 \n"
"vadd.f32 q5, q10, q11 \n"
"vmla.f32 q2, q3, %f17[1] \n"
"vmul.f32 q7, q14, %e17[0] \n" // q7 = _r_3_x_c
"vmul.f32 q6, q9, %f16[0] \n" // q6 = _r_4_x_c
"vmls.f32 q4, q9, %f17[0] \n"
"vmls.f32 q5, q14, %f17[0] \n"
"vst1.f32 {d4[0]}, [%0], %18 \n"
"vst1.f32 {d4[1]}, [%1], %18 \n"
"vmov q3, q7 \n" // use q7
"vst1.f32 {d5[0]}, [%2], %18 \n"
"vst1.f32 {d5[1]}, [%3], %18 \n"
"vadd.f32 q2, q13, q6 \n" // use q6
"vmla.f32 q3, q10, %e16[1] \n"
"vadd.f32 q8, q4, q5 \n"
"vsub.f32 q9, q4, q5 \n"
"vmov q5, q7 \n" // use q7
"vadd.f32 q6, q12, q6 \n" // use q6
"vmla.f32 q5, q10, %f16[1] \n"
"vmov q4, q13 \n"
"vmla.f32 q2, q12, %e16[0] \n"
"vmla.f32 q3, q11, %f16[1] \n"
"vst1.f32 {d16[0]}, [%0], %18 \n"
"vst1.f32 {d16[1]}, [%1], %18 \n"
"vmla.f32 q4, q6, %e17[1] \n"
"vst1.f32 {d17[0]}, [%2], %18 \n"
"vst1.f32 {d17[1]}, [%3], %18 \n"
"vmla.f32 q5, q11, %e16[1] \n"
"vst1.f32 {d18[0]}, [%0], %18 \n"
"vst1.f32 {d18[1]}, [%1], %18 \n"
"vadd.f32 q8, q2, q3 \n"
"vst1.f32 {d19[0]}, [%2], %18 \n"
"vst1.f32 {d19[1]}, [%3], %18 \n"
"vsub.f32 q9, q2, q3 \n"
"vsub.f32 q6, q15, q10 \n"
"vsub.f32 q7, q14, q11 \n"
"vst1.f32 {d16[0]}, [%0], %18 \n"
"vst1.f32 {d16[1]}, [%1], %18 \n"
"vst1.f32 {d17[0]}, [%2], %18 \n"
"vst1.f32 {d17[1]}, [%3], %18 \n"
"vadd.f32 q2, q4, q5 \n"
"vst1.f32 {d18[0]}, [%0], %18 \n"
"vst1.f32 {d18[1]}, [%1], %18 \n"
"vst1.f32 {d19[0]}, [%2], %18 \n"
"vst1.f32 {d19[1]}, [%3], %18 \n"
"vsub.f32 q3, q4, q5 \n"
"vst1.f32 {d4[0]}, [%0], %18 \n"
"vst1.f32 {d4[1]}, [%1], %18 \n"
"vst1.f32 {d5[0]}, [%2], %18 \n"
"vst1.f32 {d5[1]}, [%3], %18 \n"
"vmla.f32 q6, q7, %f17[1] \n"
"vst1.f32 {d6[0]}, [%0], %18 \n"
"vst1.f32 {d6[1]}, [%1], %18 \n"
"vst1.f32 {d7[0]}, [%2], %18 \n"
"vst1.f32 {d7[1]}, [%3], %18 \n"
"vst1.f32 {d12[0]}, [%0] \n"
"vst1.f32 {d12[1]}, [%1] \n"
"vst1.f32 {d13[0]}, [%2] \n"
"vst1.f32 {d13[1]}, [%3] \n"
: "=r"(r0_tm0_0), // %0
"=r"(r0_tm1_0), // %1
"=r"(r0_tm2_0), // %2
"=r"(r0_tm3_0), // %3
"=r"(t0), // %4
"=r"(t1), // %5
"=r"(t2), // %6
"=r"(t3) // %7
: "0"(r0_tm0_0),
"1"(r0_tm1_0),
"2"(r0_tm2_0),
"3"(r0_tm3_0),
"4"(t0),
"5"(t1),
"6"(t2),
"7"(t3),
"w"(_coeff0), // %16
"w"(_coeff1), // %17
"r"(step), // %18
"r"(step2) // %19
: "memory", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
#else
const float* r0 = img0.row(i * 6) + j * 6;
for (int m = 0; m < 8; m++)
{
tmp[0][m] = r0[0] - r0[6] + (r0[4] - r0[2]) * 5.25f;
tmp[7][m] = r0[7] - r0[1] + (r0[3] - r0[5]) * 5.25f;
float tmp12a = (r0[2] + r0[6] - r0[4] * 4.25f);
float tmp12b = (r0[1] + r0[5] - r0[3] * 4.25f);
tmp[1][m] = tmp12a + tmp12b;
tmp[2][m] = tmp12a - tmp12b;
float tmp34a = (r0[6] + r0[2] * 0.25f - r0[4] * 1.25f);
float tmp34b = (r0[1] * 0.5f - r0[3] * 2.5f + r0[5] * 2.f);
tmp[3][m] = tmp34a + tmp34b;
tmp[4][m] = tmp34a - tmp34b;
float tmp56a = (r0[6] + (r0[2] - r0[4] * 1.25f) * 4.f);
float tmp56b = (r0[1] * 2.f - r0[3] * 2.5f + r0[5] * 0.5f);
tmp[5][m] = tmp56a + tmp56b;
tmp[6][m] = tmp56a - tmp56b;
r0 += w;
}
float* r0_tm_0 = img0_tm.row(i * w_tm / 8 + j);
float* r0_tm_1 = img0_tm.row(i * w_tm / 8 + j + tiles);
float* r0_tm_2 = img0_tm.row(i * w_tm / 8 + j + tiles * 2);
float* r0_tm_3 = img0_tm.row(i * w_tm / 8 + j + tiles * 3);
float* r0_tm_4 = img0_tm.row(i * w_tm / 8 + j + tiles * 4);
float* r0_tm_5 = img0_tm.row(i * w_tm / 8 + j + tiles * 5);
float* r0_tm_6 = img0_tm.row(i * w_tm / 8 + j + tiles * 6);
float* r0_tm_7 = img0_tm.row(i * w_tm / 8 + j + tiles * 7);
for (int m = 0; m < 8; m++)
{
const float* tmp0 = tmp[m];
r0_tm_0[0] = tmp0[0] - tmp0[6] + (tmp0[4] - tmp0[2]) * 5.25f;
r0_tm_7[0] = tmp0[7] - tmp0[1] + (tmp0[3] - tmp0[5]) * 5.25f;
float tmp12a = (tmp0[2] + tmp0[6] - tmp0[4] * 4.25f);
float tmp12b = (tmp0[1] - tmp0[3] * 4.25f + tmp0[5]);
r0_tm_1[0] = tmp12a + tmp12b;
r0_tm_2[0] = tmp12a - tmp12b;
float tmp34a = (tmp0[6] + tmp0[2] * 0.25f - tmp0[4] * 1.25f);
float tmp34b = (tmp0[1] * 0.5f - tmp0[3] * 2.5f + tmp0[5] * 2.f);
r0_tm_3[0] = tmp34a + tmp34b;
r0_tm_4[0] = tmp34a - tmp34b;
float tmp56a = (tmp0[6] + (tmp0[2] - tmp0[4] * 1.25f) * 4.f);
float tmp56b = (tmp0[1] * 2.f - tmp0[3] * 2.5f + tmp0[5] * 0.5f);
r0_tm_5[0] = tmp56a + tmp56b;
r0_tm_6[0] = tmp56a - tmp56b;
r0_tm_0 += img0_tm.w * tiles * 8;
r0_tm_1 += img0_tm.w * tiles * 8;
r0_tm_2 += img0_tm.w * tiles * 8;
r0_tm_3 += img0_tm.w * tiles * 8;
r0_tm_4 += img0_tm.w * tiles * 8;
r0_tm_5 += img0_tm.w * tiles * 8;
r0_tm_6 += img0_tm.w * tiles * 8;
r0_tm_7 += img0_tm.w * tiles * 8;
}
#endif // __ARM_NEON
}
}
}
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = w_tm / 8 * h_tm / 8;
// permute
// bottom_blob_tm.create(1, 64 * tiles, inch);
// Mat bottom_blob_tm2(inch, tiles, 64);
Mat bottom_blob_tm2(8 * inch, tiles / 8 + (tiles % 8) / 4 + tiles % 4, 64, 4u, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int r = 0; r < 64; r++)
{
Mat tm2 = bottom_blob_tm2.channel(r);
// tile
int i = 0;
for (; i + 7 < tiles; i += 8)
{
float* tm2p = tm2.row(i / 8);
const float* r0 = bottom_blob_tm;
r0 += r * tiles + i;
for (int q = 0; q < inch; q++)
{
#if __ARM_NEON
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _r0n = vld1q_f32(r0 + 4);
vst1q_f32(tm2p, _r0);
vst1q_f32(tm2p + 4, _r0n);
#else
tm2p[0] = r0[0];
tm2p[1] = r0[1];
tm2p[2] = r0[2];
tm2p[3] = r0[3];
tm2p[4] = r0[4];
tm2p[5] = r0[5];
tm2p[6] = r0[6];
tm2p[7] = r0[7];
#endif // __ARM_NEON
r0 += bottom_blob_tm.cstep;
tm2p += 8;
}
}
for (; i + 3 < tiles; i += 4)
{
float* tm2p = tm2.row(i / 8 + (i % 8) / 4);
const float* r0 = bottom_blob_tm;
r0 += r * tiles + i;
for (int q = 0; q < inch; q++)
{
#if __ARM_NEON
float32x4_t _r0 = vld1q_f32(r0);
vst1q_f32(tm2p, _r0);
#else
tm2p[0] = r0[0];
tm2p[1] = r0[1];
tm2p[2] = r0[2];
tm2p[3] = r0[3];
#endif // __ARM_NEON
r0 += bottom_blob_tm.cstep;
tm2p += 4;
}
}
for (; i < tiles; i++)
{
float* tm2p = tm2.row(i / 8 + (i % 8) / 4 + i % 4);
const float* r0 = bottom_blob_tm;
r0 += r * tiles + i;
for (int q = 0; q < inch; q++)
{
tm2p[0] = r0[0];
r0 += bottom_blob_tm.cstep;
tm2p += 1;
}
}
}
bottom_blob_tm = Mat();
// permute end
top_blob_tm.create(1, 64 * tiles, outch);
int nn_outch = 0;
int remain_outch_start = 0;
#if __ARM_NEON && __aarch64__
nn_outch = outch >> 3;
remain_outch_start = nn_outch << 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 8;
const Mat kernel_tm0 = kernel_tm.channel(p / 8);
Mat out0_tm = top_blob_tm.channel(p);
Mat out1_tm = top_blob_tm.channel(p + 1);
Mat out2_tm = top_blob_tm.channel(p + 2);
Mat out3_tm = top_blob_tm.channel(p + 3);
Mat out4_tm = top_blob_tm.channel(p + 4);
Mat out5_tm = top_blob_tm.channel(p + 5);
Mat out6_tm = top_blob_tm.channel(p + 6);
Mat out7_tm = top_blob_tm.channel(p + 7);
float* output0_tm = out0_tm;
float* output1_tm = out1_tm;
float* output2_tm = out2_tm;
float* output3_tm = out3_tm;
float* output4_tm = out4_tm;
float* output5_tm = out5_tm;
float* output6_tm = out6_tm;
float* output7_tm = out7_tm;
for (int r = 0; r < 64; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
// tile
int i = 0;
for (; i + 7 < tiles; i += 8)
{
const float* bb2p0 = bb2.row(i / 8);
const float* ktm0 = kernel_tm0.row(r);
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"eor v20.16b, v20.16b, v20.16b \n"
"eor v21.16b, v21.16b, v21.16b \n"
"eor v22.16b, v22.16b, v22.16b \n"
"eor v23.16b, v23.16b, v23.16b \n"
"eor v24.16b, v24.16b, v24.16b \n"
"eor v25.16b, v25.16b, v25.16b \n"
"eor v26.16b, v26.16b, v26.16b \n"
"eor v27.16b, v27.16b, v27.16b \n"
"eor v28.16b, v28.16b, v28.16b \n"
"eor v29.16b, v29.16b, v29.16b \n"
"eor v30.16b, v30.16b, v30.16b \n"
"eor v31.16b, v31.16b, v31.16b \n"
// inch loop
"lsr w4, %w20, #2 \n" // w4 = nn = inch >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%8], #64 \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n"
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v9.4s, v0.s[0] \n"
"fmla v18.4s, v8.4s, v0.s[1] \n"
"fmla v19.4s, v9.4s, v0.s[1] \n"
"fmla v20.4s, v8.4s, v0.s[2] \n"
"fmla v21.4s, v9.4s, v0.s[2] \n"
"fmla v22.4s, v8.4s, v0.s[3] \n"
"fmla v23.4s, v9.4s, v0.s[3] \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%9], #64 \n"
"fmla v24.4s, v8.4s, v1.s[0] \n"
"fmla v25.4s, v9.4s, v1.s[0] \n"
"fmla v26.4s, v8.4s, v1.s[1] \n"
"fmla v27.4s, v9.4s, v1.s[1] \n"
"fmla v28.4s, v8.4s, v1.s[2] \n"
"fmla v29.4s, v9.4s, v1.s[2] \n"
"fmla v30.4s, v8.4s, v1.s[3] \n"
"fmla v31.4s, v9.4s, v1.s[3] \n"
"fmla v16.4s, v10.4s, v2.s[0] \n"
"fmla v17.4s, v11.4s, v2.s[0] \n"
"fmla v18.4s, v10.4s, v2.s[1] \n"
"fmla v19.4s, v11.4s, v2.s[1] \n"
"fmla v20.4s, v10.4s, v2.s[2] \n"
"fmla v21.4s, v11.4s, v2.s[2] \n"
"fmla v22.4s, v10.4s, v2.s[3] \n"
"fmla v23.4s, v11.4s, v2.s[3] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%8], #64 \n"
"fmla v24.4s, v10.4s, v3.s[0] \n"
"fmla v25.4s, v11.4s, v3.s[0] \n"
"fmla v26.4s, v10.4s, v3.s[1] \n"
"fmla v27.4s, v11.4s, v3.s[1] \n"
"fmla v28.4s, v10.4s, v3.s[2] \n"
"fmla v29.4s, v11.4s, v3.s[2] \n"
"fmla v30.4s, v10.4s, v3.s[3] \n"
"fmla v31.4s, v11.4s, v3.s[3] \n"
"fmla v16.4s, v12.4s, v4.s[0] \n"
"fmla v17.4s, v13.4s, v4.s[0] \n"
"fmla v18.4s, v12.4s, v4.s[1] \n"
"fmla v19.4s, v13.4s, v4.s[1] \n"
"fmla v20.4s, v12.4s, v4.s[2] \n"
"fmla v21.4s, v13.4s, v4.s[2] \n"
"fmla v22.4s, v12.4s, v4.s[3] \n"
"fmla v23.4s, v13.4s, v4.s[3] \n"
"fmla v24.4s, v12.4s, v5.s[0] \n"
"fmla v25.4s, v13.4s, v5.s[0] \n"
"fmla v26.4s, v12.4s, v5.s[1] \n"
"fmla v27.4s, v13.4s, v5.s[1] \n"
"fmla v28.4s, v12.4s, v5.s[2] \n"
"fmla v29.4s, v13.4s, v5.s[2] \n"
"fmla v30.4s, v12.4s, v5.s[3] \n"
"fmla v31.4s, v13.4s, v5.s[3] \n"
"fmla v16.4s, v14.4s, v6.s[0] \n"
"fmla v17.4s, v15.4s, v6.s[0] \n"
"fmla v18.4s, v14.4s, v6.s[1] \n"
"fmla v19.4s, v15.4s, v6.s[1] \n"
"fmla v20.4s, v14.4s, v6.s[2] \n"
"fmla v21.4s, v15.4s, v6.s[2] \n"
"fmla v22.4s, v14.4s, v6.s[3] \n"
"fmla v23.4s, v15.4s, v6.s[3] \n"
"subs w4, w4, #1 \n"
"fmla v24.4s, v14.4s, v7.s[0] \n"
"fmla v25.4s, v15.4s, v7.s[0] \n"
"fmla v26.4s, v14.4s, v7.s[1] \n"
"fmla v27.4s, v15.4s, v7.s[1] \n"
"fmla v28.4s, v14.4s, v7.s[2] \n"
"fmla v29.4s, v15.4s, v7.s[2] \n"
"fmla v30.4s, v14.4s, v7.s[3] \n"
"fmla v31.4s, v15.4s, v7.s[3] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and w4, %w20, #3 \n" // w4 = remain = tiles & 3
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%8, #256] \n"
"ld1 {v8.4s, v9.4s}, [%8], #32 \n"
"prfm pldl1keep, [%9, #256] \n"
"ld1 {v0.4s, v1.4s}, [%9], #32 \n"
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v9.4s, v0.s[0] \n"
"fmla v18.4s, v8.4s, v0.s[1] \n"
"fmla v19.4s, v9.4s, v0.s[1] \n"
"fmla v20.4s, v8.4s, v0.s[2] \n"
"fmla v21.4s, v9.4s, v0.s[2] \n"
"fmla v22.4s, v8.4s, v0.s[3] \n"
"fmla v23.4s, v9.4s, v0.s[3] \n"
"subs w4, w4, #1 \n"
"fmla v24.4s, v8.4s, v1.s[0] \n"
"fmla v25.4s, v9.4s, v1.s[0] \n"
"fmla v26.4s, v8.4s, v1.s[1] \n"
"fmla v27.4s, v9.4s, v1.s[1] \n"
"fmla v28.4s, v8.4s, v1.s[2] \n"
"fmla v29.4s, v9.4s, v1.s[2] \n"
"fmla v30.4s, v8.4s, v1.s[3] \n"
"fmla v31.4s, v9.4s, v1.s[3] \n"
"bne 2b \n"
"3: \n"
"st1 {v16.4s, v17.4s}, [%0], #32 \n"
"st1 {v18.4s, v19.4s}, [%1], #32 \n"
"st1 {v20.4s, v21.4s}, [%2], #32 \n"
"st1 {v22.4s, v23.4s}, [%3], #32 \n"
"st1 {v24.4s, v25.4s}, [%4], #32 \n"
"st1 {v26.4s, v27.4s}, [%5], #32 \n"
"st1 {v28.4s, v29.4s}, [%6], #32 \n"
"st1 {v30.4s, v31.4s}, [%7], #32 \n"
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(output4_tm), // %4
"=r"(output5_tm), // %5
"=r"(output6_tm), // %6
"=r"(output7_tm), // %7
"=r"(bb2p0), // %8
"=r"(ktm0) // %9
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(output4_tm),
"5"(output5_tm),
"6"(output6_tm),
"7"(output7_tm),
"8"(bb2p0),
"9"(ktm0),
"r"(inch) // %20
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; i + 3 < tiles; i += 4)
{
const float* bb2p0 = bb2.row(i / 8 + (i % 8) / 4);
const float* ktm0 = kernel_tm0.row(r);
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"eor v20.16b, v20.16b, v20.16b \n"
"eor v21.16b, v21.16b, v21.16b \n"
"eor v22.16b, v22.16b, v22.16b \n"
"eor v23.16b, v23.16b, v23.16b \n"
// inch loop
"lsr w4, %w20, #2 \n" // w4 = nn = inch >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%8], #64 \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n"
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v0.s[1] \n"
"fmla v18.4s, v8.4s, v0.s[2] \n"
"fmla v19.4s, v8.4s, v0.s[3] \n"
"fmla v20.4s, v8.4s, v1.s[0] \n"
"fmla v21.4s, v8.4s, v1.s[1] \n"
"fmla v22.4s, v8.4s, v1.s[2] \n"
"fmla v23.4s, v8.4s, v1.s[3] \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%9], #64 \n"
"fmla v16.4s, v9.4s, v2.s[0] \n"
"fmla v17.4s, v9.4s, v2.s[1] \n"
"fmla v18.4s, v9.4s, v2.s[2] \n"
"fmla v19.4s, v9.4s, v2.s[3] \n"
"fmla v20.4s, v9.4s, v3.s[0] \n"
"fmla v21.4s, v9.4s, v3.s[1] \n"
"fmla v22.4s, v9.4s, v3.s[2] \n"
"fmla v23.4s, v9.4s, v3.s[3] \n"
"fmla v16.4s, v10.4s, v4.s[0] \n"
"fmla v17.4s, v10.4s, v4.s[1] \n"
"fmla v18.4s, v10.4s, v4.s[2] \n"
"fmla v19.4s, v10.4s, v4.s[3] \n"
"fmla v20.4s, v10.4s, v5.s[0] \n"
"fmla v21.4s, v10.4s, v5.s[1] \n"
"fmla v22.4s, v10.4s, v5.s[2] \n"
"fmla v23.4s, v10.4s, v5.s[3] \n"
"subs w4, w4, #1 \n"
"fmla v16.4s, v11.4s, v6.s[0] \n"
"fmla v17.4s, v11.4s, v6.s[1] \n"
"fmla v18.4s, v11.4s, v6.s[2] \n"
"fmla v19.4s, v11.4s, v6.s[3] \n"
"fmla v20.4s, v11.4s, v7.s[0] \n"
"fmla v21.4s, v11.4s, v7.s[1] \n"
"fmla v22.4s, v11.4s, v7.s[2] \n"
"fmla v23.4s, v11.4s, v7.s[3] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and w4, %w20, #3 \n" // w4 = remain = tiles & 3
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%8, #128] \n"
"ld1 {v8.4s}, [%8], #16 \n"
"prfm pldl1keep, [%9, #256] \n"
"ld1 {v0.4s, v1.4s}, [%9], #32 \n"
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v0.s[1] \n"
"fmla v18.4s, v8.4s, v0.s[2] \n"
"fmla v19.4s, v8.4s, v0.s[3] \n"
"subs w4, w4, #1 \n"
"fmla v20.4s, v8.4s, v1.s[0] \n"
"fmla v21.4s, v8.4s, v1.s[1] \n"
"fmla v22.4s, v8.4s, v1.s[2] \n"
"fmla v23.4s, v8.4s, v1.s[3] \n"
"bne 2b \n"
"3: \n"
"st1 {v16.4s}, [%0], #16 \n"
"st1 {v17.4s}, [%1], #16 \n"
"st1 {v18.4s}, [%2], #16 \n"
"st1 {v19.4s}, [%3], #16 \n"
"st1 {v20.4s}, [%4], #16 \n"
"st1 {v21.4s}, [%5], #16 \n"
"st1 {v22.4s}, [%6], #16 \n"
"st1 {v23.4s}, [%7], #16 \n"
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(output4_tm), // %4
"=r"(output5_tm), // %5
"=r"(output6_tm), // %6
"=r"(output7_tm), // %7
"=r"(bb2p0), // %8
"=r"(ktm0) // %9
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(output4_tm),
"5"(output5_tm),
"6"(output6_tm),
"7"(output7_tm),
"8"(bb2p0),
"9"(ktm0),
"r"(inch) // %20
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23");
}
for (; i < tiles; i++)
{
const float* bb2p0 = bb2.row(i / 8 + (i % 8) / 4 + i % 4);
const float* ktm0 = kernel_tm0.row(r);
float32x4_t _sum0123 = vdupq_n_f32(0.f);
float32x4_t _sum4567 = vdupq_n_f32(0.f);
int q = 0;
for (; q + 3 < inch; q += 4)
{
// asm volatile("prfm pldl1keep, [%0, #128] \n" : :"r"(bb2p0) :);
float32x4_t _bb2p0 = vld1q_f32(bb2p0);
bb2p0 += 4;
// asm volatile("prfm pldl1keep, [%0, #512] \n" : :"r"(ktm0) :);
float32x4_t _ktm0 = vld1q_f32(ktm0 + 0);
float32x4_t _ktm1 = vld1q_f32(ktm0 + 4);
float32x4_t _ktm2 = vld1q_f32(ktm0 + 8);
float32x4_t _ktm3 = vld1q_f32(ktm0 + 12);
ktm0 += 16;
_sum0123 = vmlaq_laneq_f32(_sum0123, _ktm0, _bb2p0, 0);
_sum4567 = vmlaq_laneq_f32(_sum4567, _ktm1, _bb2p0, 0);
_sum0123 = vmlaq_laneq_f32(_sum0123, _ktm2, _bb2p0, 1);
_sum4567 = vmlaq_laneq_f32(_sum4567, _ktm3, _bb2p0, 1);
// asm volatile("prfm pldl1keep, [%0, #512] \n" : :"r"(ktm0) :);
float32x4_t _ktm4 = vld1q_f32(ktm0 + 0);
float32x4_t _ktm5 = vld1q_f32(ktm0 + 4);
float32x4_t _ktm6 = vld1q_f32(ktm0 + 8);
float32x4_t _ktm7 = vld1q_f32(ktm0 + 12);
ktm0 += 16;
_sum0123 = vmlaq_laneq_f32(_sum0123, _ktm4, _bb2p0, 2);
_sum4567 = vmlaq_laneq_f32(_sum4567, _ktm5, _bb2p0, 2);
_sum0123 = vmlaq_laneq_f32(_sum0123, _ktm6, _bb2p0, 3);
_sum4567 = vmlaq_laneq_f32(_sum4567, _ktm7, _bb2p0, 3);
}
for (; q < inch; q++)
{
float32x4_t _bb2p0 = vld1q_dup_f32(bb2p0);
float32x4_t _ktm0123 = vld1q_f32(ktm0 + 0);
float32x4_t _ktm4567 = vld1q_f32(ktm0 + 4);
_sum0123 = vmlaq_f32(_sum0123, _bb2p0, _ktm0123);
_sum4567 = vmlaq_f32(_sum4567, _bb2p0, _ktm4567);
bb2p0 += 1;
ktm0 += 8;
}
float sum0 = vgetq_lane_f32(_sum0123, 0);
float sum1 = vgetq_lane_f32(_sum0123, 1);
float sum2 = vgetq_lane_f32(_sum0123, 2);
float sum3 = vgetq_lane_f32(_sum0123, 3);
float sum4 = vgetq_lane_f32(_sum4567, 0);
float sum5 = vgetq_lane_f32(_sum4567, 1);
float sum6 = vgetq_lane_f32(_sum4567, 2);
float sum7 = vgetq_lane_f32(_sum4567, 3);
output0_tm[0] = sum0;
output1_tm[0] = sum1;
output2_tm[0] = sum2;
output3_tm[0] = sum3;
output4_tm[0] = sum4;
output5_tm[0] = sum5;
output6_tm[0] = sum6;
output7_tm[0] = sum7;
output0_tm += 1;
output1_tm += 1;
output2_tm += 1;
output3_tm += 1;
output4_tm += 1;
output5_tm += 1;
output6_tm += 1;
output7_tm += 1;
}
}
}
#endif // __aarch64__
nn_outch = (outch - remain_outch_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = remain_outch_start + pp * 4;
#if __ARM_NEON && __aarch64__
const Mat kernel_tm0 = kernel_tm.channel(p / 8 + (p % 8) / 4);
#else
const Mat kernel_tm0 = kernel_tm.channel(p / 4);
#endif
Mat out0_tm = top_blob_tm.channel(p);
Mat out1_tm = top_blob_tm.channel(p + 1);
Mat out2_tm = top_blob_tm.channel(p + 2);
Mat out3_tm = top_blob_tm.channel(p + 3);
float* output0_tm = out0_tm;
float* output1_tm = out1_tm;
float* output2_tm = out2_tm;
float* output3_tm = out3_tm;
for (int r = 0; r < 64; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
// tile
int i = 0;
for (; i + 7 < tiles; i += 8)
{
const float* bb2p0 = bb2.row(i / 8);
const float* ktm0 = kernel_tm0.row(r);
#if __ARM_NEON
#if __aarch64__
asm volatile(
"eor v8.16b, v8.16b, v8.16b \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"eor v12.16b, v12.16b, v12.16b \n"
"eor v13.16b, v13.16b, v13.16b \n"
"eor v14.16b, v14.16b, v14.16b \n"
"eor v15.16b, v15.16b, v15.16b \n"
// inch loop
"lsr w4, %w12, #2 \n" // w4 = nn = inch >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v5.4s, v0.s[0] \n"
"fmla v10.4s, v4.4s, v0.s[1] \n"
"fmla v11.4s, v5.4s, v0.s[1] \n"
"fmla v12.4s, v4.4s, v0.s[2] \n"
"fmla v13.4s, v5.4s, v0.s[2] \n"
"fmla v14.4s, v4.4s, v0.s[3] \n"
"fmla v15.4s, v5.4s, v0.s[3] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n"
"fmla v8.4s, v6.4s, v1.s[0] \n"
"fmla v9.4s, v7.4s, v1.s[0] \n"
"fmla v10.4s, v6.4s, v1.s[1] \n"
"fmla v11.4s, v7.4s, v1.s[1] \n"
"fmla v12.4s, v6.4s, v1.s[2] \n"
"fmla v13.4s, v7.4s, v1.s[2] \n"
"fmla v14.4s, v6.4s, v1.s[3] \n"
"fmla v15.4s, v7.4s, v1.s[3] \n"
"fmla v8.4s, v16.4s, v2.s[0] \n"
"fmla v9.4s, v17.4s, v2.s[0] \n"
"fmla v10.4s, v16.4s, v2.s[1] \n"
"fmla v11.4s, v17.4s, v2.s[1] \n"
"fmla v12.4s, v16.4s, v2.s[2] \n"
"fmla v13.4s, v17.4s, v2.s[2] \n"
"fmla v14.4s, v16.4s, v2.s[3] \n"
"fmla v15.4s, v17.4s, v2.s[3] \n"
"fmla v8.4s, v18.4s, v3.s[0] \n"
"fmla v9.4s, v19.4s, v3.s[0] \n"
"fmla v10.4s, v18.4s, v3.s[1] \n"
"fmla v11.4s, v19.4s, v3.s[1] \n"
"fmla v12.4s, v18.4s, v3.s[2] \n"
"fmla v13.4s, v19.4s, v3.s[2] \n"
"fmla v14.4s, v18.4s, v3.s[3] \n"
"fmla v15.4s, v19.4s, v3.s[3] \n"
"subs w4, w4, #1 \n"
"bne 0b \n"
"1: \n"
// remain loop
"and w4, %w12, #3 \n" // w4 = remain = tiles & 3
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v4.4s, v5.4s}, [%4], #32 \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v0.4s}, [%5], #16 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v5.4s, v0.s[0] \n"
"fmla v10.4s, v4.4s, v0.s[1] \n"
"fmla v11.4s, v5.4s, v0.s[1] \n"
"fmla v12.4s, v4.4s, v0.s[2] \n"
"fmla v13.4s, v5.4s, v0.s[2] \n"
"fmla v14.4s, v4.4s, v0.s[3] \n"
"fmla v15.4s, v5.4s, v0.s[3] \n"
"subs w4, w4, #1 \n"
"bne 2b \n"
"3: \n"
"st1 {v8.4s, v9.4s}, [%0], #32 \n"
"st1 {v10.4s, v11.4s}, [%1], #32 \n"
"st1 {v12.4s, v13.4s}, [%2], #32 \n"
"st1 {v14.4s, v15.4s}, [%3], #32 \n"
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(bb2p0), // %4
"=r"(ktm0) // %5
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(bb2p0),
"5"(ktm0),
"r"(inch) // %12
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19");
#else // __aarch64__
asm volatile(
"veor q8, q8, q8 \n"
"veor q9, q9, q9 \n"
"veor q10, q10, q10 \n"
"veor q11, q11, q11 \n"
"veor q12, q12, q12 \n"
"veor q13, q13, q13 \n"
"veor q14, q14, q14 \n"
"veor q15, q15, q15 \n"
// inch loop
"lsr r4, %12, #2 \n" // r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"
"pld [%4, #512] \n"
"vldm %4!, {d8-d15} \n"
// "vld1.f32 {d8-d11}, [%4 :128]! \n"
// "vld1.f32 {d12-d15}, [%4 :128]! \n"
"pld [%5, #512] \n"
"vldm %5!, {d0-d7} \n"
// "vld1.f32 {d0-d3}, [%5 :128]! \n"
// "vld1.f32 {d4-d7}, [%5 :128]! \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q5, d0[0] \n"
"vmla.f32 q10, q4, d0[1] \n"
"vmla.f32 q11, q5, d0[1] \n"
"vmla.f32 q12, q4, d1[0] \n"
"vmla.f32 q13, q5, d1[0] \n"
"vmla.f32 q14, q4, d1[1] \n"
"vmla.f32 q15, q5, d1[1] \n"
"vmla.f32 q8, q6, d2[0] \n"
"vmla.f32 q9, q7, d2[0] \n"
"vmla.f32 q10, q6, d2[1] \n"
"vmla.f32 q11, q7, d2[1] \n"
"vmla.f32 q12, q6, d3[0] \n"
"vmla.f32 q13, q7, d3[0] \n"
"vmla.f32 q14, q6, d3[1] \n"
"vmla.f32 q15, q7, d3[1] \n"
"pld [%4, #512] \n"
"vldm %4!, {d8-d15} \n"
// "vld1.f32 {d8-d11}, [%4 :128]! \n"
// "vld1.f32 {d12-d15}, [%4 :128]! \n"
"vmla.f32 q8, q4, d4[0] \n"
"vmla.f32 q9, q5, d4[0] \n"
"vmla.f32 q10, q4, d4[1] \n"
"vmla.f32 q11, q5, d4[1] \n"
"vmla.f32 q12, q4, d5[0] \n"
"vmla.f32 q13, q5, d5[0] \n"
"vmla.f32 q14, q4, d5[1] \n"
"vmla.f32 q15, q5, d5[1] \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q6, d6[0] \n"
"vmla.f32 q9, q7, d6[0] \n"
"vmla.f32 q10, q6, d6[1] \n"
"vmla.f32 q11, q7, d6[1] \n"
"vmla.f32 q12, q6, d7[0] \n"
"vmla.f32 q13, q7, d7[0] \n"
"vmla.f32 q14, q6, d7[1] \n"
"vmla.f32 q15, q7, d7[1] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and r4, %12, #3 \n" // r4 = remain = tiles & 3
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"
"pld [%4, #256] \n"
"vld1.f32 {d8-d11}, [%4 :128]! \n"
"pld [%5, #128] \n"
"vld1.f32 {d0-d1}, [%5 :128]! \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q5, d0[0] \n"
"vmla.f32 q10, q4, d0[1] \n"
"vmla.f32 q11, q5, d0[1] \n"
"subs r4, r4, #1 \n"
"vmla.f32 q12, q4, d1[0] \n"
"vmla.f32 q13, q5, d1[0] \n"
"vmla.f32 q14, q4, d1[1] \n"
"vmla.f32 q15, q5, d1[1] \n"
"bne 2b \n"
"3: \n"
"vst1.f32 {d16-d19}, [%0]! \n"
"vst1.f32 {d20-d23}, [%1]! \n"
"vst1.f32 {d24-d27}, [%2]! \n"
"vst1.f32 {d28-d31}, [%3]! \n"
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(bb2p0), // %4
"=r"(ktm0) // %5
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(bb2p0),
"5"(ktm0),
"r"(inch) // %12
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
#else
float sum0_0 = 0.f;
float sum0_1 = 0.f;
float sum0_2 = 0.f;
float sum0_3 = 0.f;
float sum0_4 = 0.f;
float sum0_5 = 0.f;
float sum0_6 = 0.f;
float sum0_7 = 0.f;
float sum1_0 = 0.f;
float sum1_1 = 0.f;
float sum1_2 = 0.f;
float sum1_3 = 0.f;
float sum1_4 = 0.f;
float sum1_5 = 0.f;
float sum1_6 = 0.f;
float sum1_7 = 0.f;
float sum2_0 = 0.f;
float sum2_1 = 0.f;
float sum2_2 = 0.f;
float sum2_3 = 0.f;
float sum2_4 = 0.f;
float sum2_5 = 0.f;
float sum2_6 = 0.f;
float sum2_7 = 0.f;
float sum3_0 = 0.f;
float sum3_1 = 0.f;
float sum3_2 = 0.f;
float sum3_3 = 0.f;
float sum3_4 = 0.f;
float sum3_5 = 0.f;
float sum3_6 = 0.f;
float sum3_7 = 0.f;
for (int q = 0; q < inch; q++)
{
sum0_0 += bb2p0[0] * ktm0[0];
sum0_1 += bb2p0[1] * ktm0[0];
sum0_2 += bb2p0[2] * ktm0[0];
sum0_3 += bb2p0[3] * ktm0[0];
sum0_4 += bb2p0[4] * ktm0[0];
sum0_5 += bb2p0[5] * ktm0[0];
sum0_6 += bb2p0[6] * ktm0[0];
sum0_7 += bb2p0[7] * ktm0[0];
sum1_0 += bb2p0[0] * ktm0[1];
sum1_1 += bb2p0[1] * ktm0[1];
sum1_2 += bb2p0[2] * ktm0[1];
sum1_3 += bb2p0[3] * ktm0[1];
sum1_4 += bb2p0[4] * ktm0[1];
sum1_5 += bb2p0[5] * ktm0[1];
sum1_6 += bb2p0[6] * ktm0[1];
sum1_7 += bb2p0[7] * ktm0[1];
sum2_0 += bb2p0[0] * ktm0[2];
sum2_1 += bb2p0[1] * ktm0[2];
sum2_2 += bb2p0[2] * ktm0[2];
sum2_3 += bb2p0[3] * ktm0[2];
sum2_4 += bb2p0[4] * ktm0[2];
sum2_5 += bb2p0[5] * ktm0[2];
sum2_6 += bb2p0[6] * ktm0[2];
sum2_7 += bb2p0[7] * ktm0[2];
sum3_0 += bb2p0[0] * ktm0[3];
sum3_1 += bb2p0[1] * ktm0[3];
sum3_2 += bb2p0[2] * ktm0[3];
sum3_3 += bb2p0[3] * ktm0[3];
sum3_4 += bb2p0[4] * ktm0[3];
sum3_5 += bb2p0[5] * ktm0[3];
sum3_6 += bb2p0[6] * ktm0[3];
sum3_7 += bb2p0[7] * ktm0[3];
bb2p0 += 8;
ktm0 += 4;
}
output0_tm[0] = sum0_0;
output0_tm[1] = sum0_1;
output0_tm[2] = sum0_2;
output0_tm[3] = sum0_3;
output0_tm[4] = sum0_4;
output0_tm[5] = sum0_5;
output0_tm[6] = sum0_6;
output0_tm[7] = sum0_7;
output1_tm[0] = sum1_0;
output1_tm[1] = sum1_1;
output1_tm[2] = sum1_2;
output1_tm[3] = sum1_3;
output1_tm[4] = sum1_4;
output1_tm[5] = sum1_5;
output1_tm[6] = sum1_6;
output1_tm[7] = sum1_7;
output2_tm[0] = sum2_0;
output2_tm[1] = sum2_1;
output2_tm[2] = sum2_2;
output2_tm[3] = sum2_3;
output2_tm[4] = sum2_4;
output2_tm[5] = sum2_5;
output2_tm[6] = sum2_6;
output2_tm[7] = sum2_7;
output3_tm[0] = sum3_0;
output3_tm[1] = sum3_1;
output3_tm[2] = sum3_2;
output3_tm[3] = sum3_3;
output3_tm[4] = sum3_4;
output3_tm[5] = sum3_5;
output3_tm[6] = sum3_6;
output3_tm[7] = sum3_7;
output0_tm += 8;
output1_tm += 8;
output2_tm += 8;
output3_tm += 8;
#endif // __ARM_NEON
}
for (; i + 3 < tiles; i += 4)
{
const float* bb2p0 = bb2.row(i / 8 + (i % 8) / 4);
const float* ktm0 = kernel_tm0.row(r);
#if __ARM_NEON
#if __aarch64__
asm volatile(
"eor v8.16b, v8.16b, v8.16b \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
// inch loop
"lsr w4, %w12, #2 \n" // w4 = nn = inch >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v4.4s, v0.s[1] \n"
"fmla v10.4s, v4.4s, v0.s[2] \n"
"fmla v11.4s, v4.4s, v0.s[3] \n"
"fmla v8.4s, v5.4s, v1.s[0] \n"
"fmla v9.4s, v5.4s, v1.s[1] \n"
"fmla v10.4s, v5.4s, v1.s[2] \n"
"fmla v11.4s, v5.4s, v1.s[3] \n"
"fmla v8.4s, v6.4s, v2.s[0] \n"
"fmla v9.4s, v6.4s, v2.s[1] \n"
"fmla v10.4s, v6.4s, v2.s[2] \n"
"fmla v11.4s, v6.4s, v2.s[3] \n"
"fmla v8.4s, v7.4s, v3.s[0] \n"
"fmla v9.4s, v7.4s, v3.s[1] \n"
"fmla v10.4s, v7.4s, v3.s[2] \n"
"fmla v11.4s, v7.4s, v3.s[3] \n"
"subs w4, w4, #1 \n"
"bne 0b \n"
"1: \n"
// remain loop
"and w4, %w12, #3 \n" // w4 = remain = tiles & 3
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v4.4s}, [%4], #16 \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v0.4s}, [%5], #16 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v4.4s, v0.s[1] \n"
"fmla v10.4s, v4.4s, v0.s[2] \n"
"fmla v11.4s, v4.4s, v0.s[3] \n"
"subs w4, w4, #1 \n"
"bne 2b \n"
"3: \n"
"st1 {v8.4s}, [%0], #16 \n"
"st1 {v9.4s}, [%1], #16 \n"
"st1 {v10.4s}, [%2], #16 \n"
"st1 {v11.4s}, [%3], #16 \n"
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(bb2p0), // %4
"=r"(ktm0) // %5
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(bb2p0),
"5"(ktm0),
"r"(inch) // %12
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11");
#else // __aarch64__
asm volatile(
"veor q8, q8, q8 \n"
"veor q9, q9, q9 \n"
"veor q10, q10, q10 \n"
"veor q11, q11, q11 \n"
// inch loop
"lsr r4, %12, #2 \n" // r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"
"pld [%4, #512] \n"
"vldm %4!, {d8-d15} \n"
// "vld1.f32 {d8-d11}, [%4 :128]! \n"
// "vld1.f32 {d12-d15}, [%4 :128]! \n"
"pld [%5, #512] \n"
"vldm %5!, {d0-d7} \n"
// "vld1.f32 {d0-d3}, [%5 :128]! \n"
// "vld1.f32 {d4-d7}, [%5 :128]! \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q4, d0[1] \n"
"vmla.f32 q10, q4, d1[0] \n"
"vmla.f32 q11, q4, d1[1] \n"
"vmla.f32 q8, q5, d2[0] \n"
"vmla.f32 q9, q5, d2[1] \n"
"vmla.f32 q10, q5, d3[0] \n"
"vmla.f32 q11, q5, d3[1] \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q6, d4[0] \n"
"vmla.f32 q9, q6, d4[1] \n"
"vmla.f32 q10, q6, d5[0] \n"
"vmla.f32 q11, q6, d5[1] \n"
"vmla.f32 q8, q7, d6[0] \n"
"vmla.f32 q9, q7, d6[1] \n"
"vmla.f32 q10, q7, d7[0] \n"
"vmla.f32 q11, q7, d7[1] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and r4, %12, #3 \n" // r4 = remain = tiles & 3
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"
"pld [%4, #128] \n"
"vld1.f32 {d8-d9}, [%4 :128]! \n"
"pld [%5, #128] \n"
"vld1.f32 {d0-d1}, [%5 :128]! \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q4, d0[1] \n"
"vmla.f32 q10, q4, d1[0] \n"
"vmla.f32 q11, q4, d1[1] \n"
"bne 2b \n"
"3: \n"
"vst1.f32 {d16-d17}, [%0]! \n"
"vst1.f32 {d18-d19}, [%1]! \n"
"vst1.f32 {d20-d21}, [%2]! \n"
"vst1.f32 {d22-d23}, [%3]! \n"
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(bb2p0), // %4
"=r"(ktm0) // %5
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(bb2p0),
"5"(ktm0),
"r"(inch) // %12
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11");
#endif // __aarch64__
#else
float sum0_0 = 0.f;
float sum0_1 = 0.f;
float sum0_2 = 0.f;
float sum0_3 = 0.f;
float sum1_0 = 0.f;
float sum1_1 = 0.f;
float sum1_2 = 0.f;
float sum1_3 = 0.f;
float sum2_0 = 0.f;
float sum2_1 = 0.f;
float sum2_2 = 0.f;
float sum2_3 = 0.f;
float sum3_0 = 0.f;
float sum3_1 = 0.f;
float sum3_2 = 0.f;
float sum3_3 = 0.f;
for (int q = 0; q < inch; q++)
{
sum0_0 += bb2p0[0] * ktm0[0];
sum0_1 += bb2p0[1] * ktm0[0];
sum0_2 += bb2p0[2] * ktm0[0];
sum0_3 += bb2p0[3] * ktm0[0];
sum1_0 += bb2p0[0] * ktm0[1];
sum1_1 += bb2p0[1] * ktm0[1];
sum1_2 += bb2p0[2] * ktm0[1];
sum1_3 += bb2p0[3] * ktm0[1];
sum2_0 += bb2p0[0] * ktm0[2];
sum2_1 += bb2p0[1] * ktm0[2];
sum2_2 += bb2p0[2] * ktm0[2];
sum2_3 += bb2p0[3] * ktm0[2];
sum3_0 += bb2p0[0] * ktm0[3];
sum3_1 += bb2p0[1] * ktm0[3];
sum3_2 += bb2p0[2] * ktm0[3];
sum3_3 += bb2p0[3] * ktm0[3];
bb2p0 += 4;
ktm0 += 4;
}
output0_tm[0] = sum0_0;
output0_tm[1] = sum0_1;
output0_tm[2] = sum0_2;
output0_tm[3] = sum0_3;
output1_tm[0] = sum1_0;
output1_tm[1] = sum1_1;
output1_tm[2] = sum1_2;
output1_tm[3] = sum1_3;
output2_tm[0] = sum2_0;
output2_tm[1] = sum2_1;
output2_tm[2] = sum2_2;
output2_tm[3] = sum2_3;
output3_tm[0] = sum3_0;
output3_tm[1] = sum3_1;
output3_tm[2] = sum3_2;
output3_tm[3] = sum3_3;
output0_tm += 4;
output1_tm += 4;
output2_tm += 4;
output3_tm += 4;
#endif // __ARM_NEON
}
for (; i < tiles; i++)
{
const float* bb2p0 = bb2.row(i / 8 + (i % 8) / 4 + i % 4);
const float* ktm0 = kernel_tm0.row(r);
#if __ARM_NEON
float32x4_t _sum0123 = vdupq_n_f32(0.f);
int q = 0;
for (; q + 3 < inch; q += 4)
{
// asm volatile("prfm pldl1keep, [%0, #128] \n" : :"r"(bb2p0) :);
float32x4_t _bb2p0 = vld1q_f32(bb2p0);
bb2p0 += 4;
// asm volatile("prfm pldl1keep, [%0, #512] \n" : :"r"(ktm0) :);
float32x4_t _ktm0 = vld1q_f32(ktm0 + 0);
float32x4_t _ktm1 = vld1q_f32(ktm0 + 4);
float32x4_t _ktm2 = vld1q_f32(ktm0 + 8);
float32x4_t _ktm3 = vld1q_f32(ktm0 + 12);
ktm0 += 16;
#if __aarch64__
_sum0123 = vmlaq_laneq_f32(_sum0123, _ktm0, _bb2p0, 0);
_sum0123 = vmlaq_laneq_f32(_sum0123, _ktm1, _bb2p0, 1);
_sum0123 = vmlaq_laneq_f32(_sum0123, _ktm2, _bb2p0, 2);
_sum0123 = vmlaq_laneq_f32(_sum0123, _ktm3, _bb2p0, 3);
#else
_sum0123 = vmlaq_lane_f32(_sum0123, _ktm0, vget_low_f32(_bb2p0), 0);
_sum0123 = vmlaq_lane_f32(_sum0123, _ktm1, vget_low_f32(_bb2p0), 1);
_sum0123 = vmlaq_lane_f32(_sum0123, _ktm2, vget_high_f32(_bb2p0), 0);
_sum0123 = vmlaq_lane_f32(_sum0123, _ktm3, vget_high_f32(_bb2p0), 1);
#endif // __aarch64__
}
for (; q < inch; q++)
{
float32x4_t _bb2p0 = vld1q_dup_f32(bb2p0);
float32x4_t _ktm0 = vld1q_f32(ktm0);
_sum0123 = vmlaq_f32(_sum0123, _bb2p0, _ktm0);
bb2p0 += 1;
ktm0 += 4;
}
float sum0 = vgetq_lane_f32(_sum0123, 0);
float sum1 = vgetq_lane_f32(_sum0123, 1);
float sum2 = vgetq_lane_f32(_sum0123, 2);
float sum3 = vgetq_lane_f32(_sum0123, 3);
#else
float sum0 = 0.f;
float sum1 = 0.f;
float sum2 = 0.f;
float sum3 = 0.f;
for (int q = 0; q < inch; q++)
{
sum0 += bb2p0[0] * ktm0[0];
sum1 += bb2p0[0] * ktm0[1];
sum2 += bb2p0[0] * ktm0[2];
sum3 += bb2p0[0] * ktm0[3];
bb2p0 += 1;
ktm0 += 4;
}
#endif // __ARM_NEON
output0_tm[0] = sum0;
output1_tm[0] = sum1;
output2_tm[0] = sum2;
output3_tm[0] = sum3;
output0_tm += 1;
output1_tm += 1;
output2_tm += 1;
output3_tm += 1;
}
}
}
remain_outch_start += nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
#if __ARM_NEON && __aarch64__
const Mat kernel_tm0 = kernel_tm.channel(p / 8 + (p % 8) / 4 + p % 4);
#else
const Mat kernel_tm0 = kernel_tm.channel(p / 4 + p % 4);
#endif
Mat out0_tm = top_blob_tm.channel(p);
float* output0_tm = out0_tm;
for (int r = 0; r < 64; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
// tile
int i = 0;
for (; i + 7 < tiles; i += 8)
{
const float* bb2p0 = bb2.row(i / 8);
const float* ktm0 = kernel_tm0.row(r);
#if __ARM_NEON
#if __aarch64__
asm volatile(
"eor v8.16b, v8.16b, v8.16b \n"
"eor v9.16b, v9.16b, v9.16b \n"
// inch loop
"lsr w4, %w6, #2 \n" // w4 = nn = inch >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%1], #64 \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v0.4s}, [%2], #16 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v5.4s, v0.s[0] \n"
"fmla v8.4s, v6.4s, v0.s[1] \n"
"fmla v9.4s, v7.4s, v0.s[1] \n"
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1], #64 \n"
"fmla v8.4s, v12.4s, v0.s[2] \n"
"fmla v9.4s, v13.4s, v0.s[2] \n"
"fmla v8.4s, v14.4s, v0.s[3] \n"
"fmla v9.4s, v15.4s, v0.s[3] \n"
"subs w4, w4, #1 \n"
"bne 0b \n"
"1: \n"
// remain loop
"and w4, %w6, #3 \n" // w4 = remain = tiles & 3
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v4.4s, v5.4s}, [%1], #32 \n"
"prfm pldl1keep, [%2, #32] \n"
"ld1r {v0.4s}, [%2], #4 \n"
"fmla v8.4s, v4.4s, v0.4s \n"
"fmla v9.4s, v5.4s, v0.4s \n"
"subs w4, w4, #1 \n"
"bne 2b \n"
"3: \n"
"st1 {v8.4s, v9.4s}, [%0], #32 \n"
: "=r"(output0_tm), // %0
"=r"(bb2p0), // %1
"=r"(ktm0) // %2
: "0"(output0_tm),
"1"(bb2p0),
"2"(ktm0),
"r"(inch) // %6
: "cc", "memory", "x4", "v0", "v4", "v5", "v6", "v7", "v8", "v9", "v12", "v13", "v14", "v15");
#else // __aarch64__
asm volatile(
"veor q8, q8, q8 \n"
"veor q9, q9, q9 \n"
// inch loop
"lsr r4, %6, #2 \n" // r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"
"pld [%1, #512] \n"
"vldm %1!, {d8-d15} \n"
// "vld1.f32 {d8-d11}, [%1 :128]! \n"
// "vld1.f32 {d12-d15}, [%1 :128]! \n"
"pld [%2, #128] \n"
"vld1.f32 {d0-d1}, [%2 :128]! \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q5, d0[0] \n"
"vmla.f32 q8, q6, d0[1] \n"
"vmla.f32 q9, q7, d0[1] \n"
"pld [%1, #512] \n"
"vldm %1!, {d24-d31} \n"
// "vld1.f32 {d24-d27}, [%1 :128]! \n"
// "vld1.f32 {d28-d31}, [%1 :128]! \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q12, d1[0] \n"
"vmla.f32 q9, q13, d1[0] \n"
"vmla.f32 q8, q14, d1[1] \n"
"vmla.f32 q9, q15, d1[1] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and r4, %6, #3 \n" // r4 = remain = tiles & 3
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"
"pld [%1, #256] \n"
"vld1.f32 {d8-d11}, [%1 :128]! \n"
"pld [%2, #32] \n"
"vld1.f32 {d0[],d1[]}, [%2]! \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q4, q0 \n"
"vmla.f32 q9, q5, q0 \n"
"bne 2b \n"
"3: \n"
"vst1.f32 {d16-d19}, [%0]! \n"
: "=r"(output0_tm), // %0
"=r"(bb2p0), // %1
"=r"(ktm0) // %2
: "0"(output0_tm),
"1"(bb2p0),
"2"(ktm0),
"r"(inch) // %6
: "cc", "memory", "r4", "q0", "q4", "q5", "q6", "q7", "q8", "q9", "q12", "q13", "q14", "q15");
#endif // __aarch64__
#else
float sum0 = 0.f;
float sum1 = 0.f;
float sum2 = 0.f;
float sum3 = 0.f;
float sum4 = 0.f;
float sum5 = 0.f;
float sum6 = 0.f;
float sum7 = 0.f;
for (int q = 0; q < inch; q++)
{
sum0 += bb2p0[0] * ktm0[0];
sum1 += bb2p0[1] * ktm0[0];
sum2 += bb2p0[2] * ktm0[0];
sum3 += bb2p0[3] * ktm0[0];
sum4 += bb2p0[4] * ktm0[0];
sum5 += bb2p0[5] * ktm0[0];
sum6 += bb2p0[6] * ktm0[0];
sum7 += bb2p0[7] * ktm0[0];
bb2p0 += 8;
ktm0 += 1;
}
output0_tm[0] = sum0;
output0_tm[1] = sum1;
output0_tm[2] = sum2;
output0_tm[3] = sum3;
output0_tm[4] = sum4;
output0_tm[5] = sum5;
output0_tm[6] = sum6;
output0_tm[7] = sum7;
output0_tm += 8;
#endif // __ARM_NEON
}
for (; i + 3 < tiles; i += 4)
{
const float* bb2p0 = bb2.row(i / 8 + (i % 8) / 4);
const float* ktm0 = kernel_tm0.row(r);
#if __ARM_NEON
#if __aarch64__
asm volatile(
"eor v8.16b, v8.16b, v8.16b \n"
// inch loop
"lsr w4, %w6, #2 \n" // w4 = nn = inch >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v0.4s}, [%5], #16 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v8.4s, v5.4s, v0.s[1] \n"
"fmla v8.4s, v6.4s, v0.s[2] \n"
"fmla v8.4s, v7.4s, v0.s[3] \n"
"subs w4, w4, #1 \n"
"bne 0b \n"
"1: \n"
// remain loop
"and w4, %w6, #3 \n" // w4 = remain = tiles & 3
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v4.4s}, [%4], #16 \n"
"prfm pldl1keep, [%5, #32] \n"
"ld1r {v0.4s}, [%5], #4 \n"
"fmla v8.4s, v4.4s, v0.4s \n"
"subs w4, w4, #1 \n"
"bne 2b \n"
"3: \n"
"st1 {v8.4s}, [%0], #16 \n"
: "=r"(output0_tm), // %0
"=r"(bb2p0), // %1
"=r"(ktm0) // %2
: "0"(output0_tm),
"1"(bb2p0),
"2"(ktm0),
"r"(inch) // %6
: "cc", "memory", "x4", "v0", "v4", "v5", "v6", "v7", "v8");
#else // __aarch64__
asm volatile(
"veor q8, q8, q8 \n"
// inch loop
"lsr r4, %6, #2 \n" // r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"
"pld [%4, #512] \n"
"vldm %4!, {d8-d15} \n"
// "vld1.f32 {d8-d11}, [%4 :128]! \n"
// "vld1.f32 {d12-d15}, [%4 :128]! \n"
"pld [%5, #128] \n"
"vld1.f32 {d0-d1}, [%5 :128]! \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q8, q5, d0[1] \n"
"vmla.f32 q8, q6, d1[0] \n"
"vmla.f32 q8, q7, d1[1] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and r4, %6, #3 \n" // r4 = remain = tiles & 3
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"
"pld [%4, #128] \n"
"vld1.f32 {d8-d9}, [%4]! \n"
"pld [%5, #32] \n"
"vld1.f32 {d0[],d1[]}, [%5]! \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q4, q0 \n"
"bne 2b \n"
"3: \n"
"vst1.f32 {d16-d17}, [%0]! \n"
: "=r"(output0_tm), // %0
"=r"(bb2p0), // %1
"=r"(ktm0) // %2
: "0"(output0_tm),
"1"(bb2p0),
"2"(ktm0),
"r"(inch) // %6
: "cc", "memory", "r4", "q0", "q4", "q5", "q6", "q7", "q8");
#endif // __aarch64__
#else
float sum0 = 0.f;
float sum1 = 0.f;
float sum2 = 0.f;
float sum3 = 0.f;
for (int q = 0; q < inch; q++)
{
sum0 += bb2p0[0] * ktm0[0];
sum1 += bb2p0[1] * ktm0[0];
sum2 += bb2p0[2] * ktm0[0];
sum3 += bb2p0[3] * ktm0[0];
bb2p0 += 4;
ktm0 += 1;
}
output0_tm[0] = sum0;
output0_tm[1] = sum1;
output0_tm[2] = sum2;
output0_tm[3] = sum3;
output0_tm += 4;
#endif // __ARM_NEON
}
for (; i < tiles; i++)
{
const float* bb2p0 = bb2.row(i / 8 + (i % 8) / 4 + i % 4);
const float* ktm0 = kernel_tm0.row(r);
int q = 0;
#if __ARM_NEON
float32x4_t _sum0 = vdupq_n_f32(0.f);
for (; q + 3 < inch; q += 4)
{
// asm volatile("prfm pldl1keep, [%0, #128] \n" : :"r"(bb2p0) :);
float32x4_t _bb2p0 = vld1q_f32(bb2p0);
bb2p0 += 4;
float32x4_t _ktm0 = vld1q_f32(ktm0);
ktm0 += 4;
_sum0 = vmlaq_f32(_sum0, _bb2p0, _ktm0);
}
#if __aarch64__
float sum0 = vaddvq_f32(_sum0);
#else
float32x2_t _ss0 = vadd_f32(vget_low_f32(_sum0), vget_high_f32(_sum0));
float sum0 = vget_lane_f32(vpadd_f32(_ss0, _ss0), 0);
#endif // __aarch64__
#else
float sum0 = 0.f;
#endif
for (; q < inch; q++)
{
sum0 += bb2p0[0] * ktm0[0];
bb2p0 += 1;
ktm0 += 1;
}
output0_tm[0] = sum0;
output0_tm += 1;
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator);
}
{
// const float otm[6][8] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f}
// };
// 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32
// 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16
// 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8
// 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4
// 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2
// 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6)
#if __ARM_NEON
const float coeff[4] = {4.f, 8.f, 16.f, 32.f};
float32x4_t _coeff = vld1q_f32(coeff);
#endif // __ARM_NEON
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = w_tm / 8 * h_tm / 8;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob_bordered.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
#if __ARM_NEON
float32x2_t _bias0 = vdup_n_f32(bias0);
#endif // __ARM_NEON
float tmp[6][8];
// tile
for (int i = 0; i < outh / 6; i++)
{
for (int j = 0; j < outw / 6; j++)
{
#if __ARM_NEON
#if __aarch64__
const float* output0_tm0 = out0_tm.row(i * w_tm / 8 + j);
const float* output0_tm1 = out0_tm.row(i * w_tm / 8 + j + tiles * 8);
const float* output0_tm2 = out0_tm.row(i * w_tm / 8 + j + tiles * 16);
const float* output0_tm3 = out0_tm.row(i * w_tm / 8 + j + tiles * 24);
for (int m = 0; m + 3 < 8; m += 4)
{
float32x4_t _output0_tm_00 = {};
float32x4_t _output0_tm_11 = {};
float32x4_t _output0_tm_22 = {};
float32x4_t _output0_tm_33 = {};
float32x4_t _output0_tm_44 = {};
float32x4_t _output0_tm_55 = {};
float32x4_t _output0_tm_66 = {};
float32x4_t _output0_tm_77 = {};
_output0_tm_00 = vsetq_lane_f32(output0_tm0[0], _output0_tm_00, 0);
output0_tm0 += out0_tm.w * tiles;
_output0_tm_00 = vsetq_lane_f32(output0_tm1[0], _output0_tm_00, 1);
output0_tm1 += out0_tm.w * tiles;
_output0_tm_00 = vsetq_lane_f32(output0_tm2[0], _output0_tm_00, 2);
output0_tm2 += out0_tm.w * tiles;
_output0_tm_00 = vsetq_lane_f32(output0_tm3[0], _output0_tm_00, 3);
output0_tm3 += out0_tm.w * tiles;
_output0_tm_11 = vsetq_lane_f32(output0_tm0[0], _output0_tm_11, 0);
output0_tm0 += out0_tm.w * tiles;
_output0_tm_11 = vsetq_lane_f32(output0_tm1[0], _output0_tm_11, 1);
output0_tm1 += out0_tm.w * tiles;
_output0_tm_11 = vsetq_lane_f32(output0_tm2[0], _output0_tm_11, 2);
output0_tm2 += out0_tm.w * tiles;
_output0_tm_11 = vsetq_lane_f32(output0_tm3[0], _output0_tm_11, 3);
output0_tm3 += out0_tm.w * tiles;
_output0_tm_22 = vsetq_lane_f32(output0_tm0[0], _output0_tm_22, 0);
output0_tm0 += out0_tm.w * tiles;
_output0_tm_22 = vsetq_lane_f32(output0_tm1[0], _output0_tm_22, 1);
output0_tm1 += out0_tm.w * tiles;
_output0_tm_22 = vsetq_lane_f32(output0_tm2[0], _output0_tm_22, 2);
output0_tm2 += out0_tm.w * tiles;
_output0_tm_22 = vsetq_lane_f32(output0_tm3[0], _output0_tm_22, 3);
output0_tm3 += out0_tm.w * tiles;
_output0_tm_33 = vsetq_lane_f32(output0_tm0[0], _output0_tm_33, 0);
output0_tm0 += out0_tm.w * tiles;
_output0_tm_33 = vsetq_lane_f32(output0_tm1[0], _output0_tm_33, 1);
output0_tm1 += out0_tm.w * tiles;
_output0_tm_33 = vsetq_lane_f32(output0_tm2[0], _output0_tm_33, 2);
output0_tm2 += out0_tm.w * tiles;
_output0_tm_33 = vsetq_lane_f32(output0_tm3[0], _output0_tm_33, 3);
output0_tm3 += out0_tm.w * tiles;
_output0_tm_44 = vsetq_lane_f32(output0_tm0[0], _output0_tm_44, 0);
output0_tm0 += out0_tm.w * tiles;
_output0_tm_44 = vsetq_lane_f32(output0_tm1[0], _output0_tm_44, 1);
output0_tm1 += out0_tm.w * tiles;
_output0_tm_44 = vsetq_lane_f32(output0_tm2[0], _output0_tm_44, 2);
output0_tm2 += out0_tm.w * tiles;
_output0_tm_44 = vsetq_lane_f32(output0_tm3[0], _output0_tm_44, 3);
output0_tm3 += out0_tm.w * tiles;
_output0_tm_55 = vsetq_lane_f32(output0_tm0[0], _output0_tm_55, 0);
output0_tm0 += out0_tm.w * tiles;
_output0_tm_55 = vsetq_lane_f32(output0_tm1[0], _output0_tm_55, 1);
output0_tm1 += out0_tm.w * tiles;
_output0_tm_55 = vsetq_lane_f32(output0_tm2[0], _output0_tm_55, 2);
output0_tm2 += out0_tm.w * tiles;
_output0_tm_55 = vsetq_lane_f32(output0_tm3[0], _output0_tm_55, 3);
output0_tm3 += out0_tm.w * tiles;
_output0_tm_66 = vsetq_lane_f32(output0_tm0[0], _output0_tm_66, 0);
output0_tm0 += out0_tm.w * tiles;
_output0_tm_66 = vsetq_lane_f32(output0_tm1[0], _output0_tm_66, 1);
output0_tm1 += out0_tm.w * tiles;
_output0_tm_66 = vsetq_lane_f32(output0_tm2[0], _output0_tm_66, 2);
output0_tm2 += out0_tm.w * tiles;
_output0_tm_66 = vsetq_lane_f32(output0_tm3[0], _output0_tm_66, 3);
output0_tm3 += out0_tm.w * tiles;
_output0_tm_77 = vsetq_lane_f32(output0_tm0[0], _output0_tm_77, 0);
_output0_tm_77 = vsetq_lane_f32(output0_tm1[0], _output0_tm_77, 1);
_output0_tm_77 = vsetq_lane_f32(output0_tm2[0], _output0_tm_77, 2);
_output0_tm_77 = vsetq_lane_f32(output0_tm3[0], _output0_tm_77, 3);
float32x4_t _tmp024a = vaddq_f32(_output0_tm_11, _output0_tm_22);
float32x4_t _tmp135a = vsubq_f32(_output0_tm_11, _output0_tm_22);
float32x4_t _tmp024b = vaddq_f32(_output0_tm_33, _output0_tm_44);
float32x4_t _tmp135b = vsubq_f32(_output0_tm_33, _output0_tm_44);
float32x4_t _tmp024c = vaddq_f32(_output0_tm_55, _output0_tm_66);
float32x4_t _tmp135c = vsubq_f32(_output0_tm_55, _output0_tm_66);
float32x4_t _tmp0 = vaddq_f32(_output0_tm_00, _tmp024a);
_tmp0 = vmlaq_lane_f32(_tmp0, _tmp024c, vget_high_f32(_coeff), 1);
_tmp0 = vaddq_f32(_tmp0, _tmp024b);
float32x4_t _tmp2 = vmlaq_lane_f32(_tmp024a, _tmp024b, vget_low_f32(_coeff), 0);
_tmp2 = vmlaq_lane_f32(_tmp2, _tmp024c, vget_low_f32(_coeff), 1);
float32x4_t _tmp4 = vmlaq_lane_f32(_tmp024a, _tmp024b, vget_high_f32(_coeff), 0);
_tmp4 = vaddq_f32(_tmp4, _tmp024c);
_tmp4 = vaddq_f32(_tmp4, _tmp024c);
vst1q_f32(&tmp[0][m], _tmp0);
vst1q_f32(&tmp[2][m], _tmp2);
vst1q_f32(&tmp[4][m], _tmp4);
float32x4_t _tmp1 = vmlaq_lane_f32(_tmp135a, _tmp135c, vget_high_f32(_coeff), 0);
_tmp1 = vaddq_f32(_tmp1, _tmp135b);
_tmp1 = vaddq_f32(_tmp1, _tmp135b);
float32x4_t _tmp3 = vmlaq_lane_f32(_tmp135a, _tmp135b, vget_low_f32(_coeff), 1);
_tmp3 = vmlaq_lane_f32(_tmp3, _tmp135c, vget_low_f32(_coeff), 0);
float32x4_t _tmp5 = vaddq_f32(_output0_tm_77, _tmp135a);
_tmp5 = vmlaq_lane_f32(_tmp5, _tmp135b, vget_high_f32(_coeff), 1);
_tmp5 = vaddq_f32(_tmp5, _tmp135c);
vst1q_f32(&tmp[1][m], _tmp1);
vst1q_f32(&tmp[3][m], _tmp3);
vst1q_f32(&tmp[5][m], _tmp5);
output0_tm0 += out0_tm.w * tiles * 25;
output0_tm1 += out0_tm.w * tiles * 25;
output0_tm2 += out0_tm.w * tiles * 25;
output0_tm3 += out0_tm.w * tiles * 25;
}
const float* t0 = tmp[0];
const float* t1 = tmp[1];
float* output0 = out0.row(i * 6) + j * 6;
float* output1 = output0 + outw;
for (int m = 0; m + 1 < 6; m += 2)
{
float32x4_t _t0_0123 = vld1q_f32(t0);
float32x4_t _t0_4567 = vld1q_f32(t0 + 4);
float32x4_t _t1_0123 = vld1q_f32(t1);
float32x4_t _t1_4567 = vld1q_f32(t1 + 4);
float32x4x2_t _t01_00221133 = vtrnq_f32(_t0_0123, _t1_0123);
float32x4x2_t _t01_44665577 = vtrnq_f32(_t0_4567, _t1_4567);
float32x2_t _t_00 = vget_low_f32(_t01_00221133.val[0]);
float32x2_t _t_11 = vget_low_f32(_t01_00221133.val[1]);
float32x2_t _t_22 = vget_high_f32(_t01_00221133.val[0]);
float32x2_t _t_33 = vget_high_f32(_t01_00221133.val[1]);
float32x2_t _t_44 = vget_low_f32(_t01_44665577.val[0]);
float32x2_t _t_55 = vget_low_f32(_t01_44665577.val[1]);
float32x2_t _t_66 = vget_high_f32(_t01_44665577.val[0]);
float32x2_t _t_77 = vget_high_f32(_t01_44665577.val[1]);
float32x2_t _tmp024a = vadd_f32(_t_11, _t_22);
float32x2_t _tmp135a = vsub_f32(_t_11, _t_22);
float32x2_t _tmp024b = vadd_f32(_t_33, _t_44);
float32x2_t _tmp135b = vsub_f32(_t_33, _t_44);
float32x2_t _tmp024c = vadd_f32(_t_55, _t_66);
float32x2_t _tmp135c = vsub_f32(_t_55, _t_66);
float32x2_t _output_0 = vadd_f32(_t_00, _tmp024a);
_output_0 = vmla_lane_f32(_output_0, _tmp024c, vget_high_f32(_coeff), 1);
_output_0 = vadd_f32(_output_0, _tmp024b);
_output_0 = vadd_f32(_output_0, _bias0);
float32x2_t _output_2 = vmla_lane_f32(_tmp024a, _tmp024b, vget_low_f32(_coeff), 0);
_output_2 = vmla_lane_f32(_output_2, _tmp024c, vget_low_f32(_coeff), 1);
_output_2 = vadd_f32(_output_2, _bias0);
float32x2_t _output_4 = vmla_lane_f32(_tmp024a, _tmp024b, vget_high_f32(_coeff), 0);
_output_4 = vadd_f32(_output_4, _tmp024c);
_output_4 = vadd_f32(_output_4, _tmp024c);
_output_4 = vadd_f32(_output_4, _bias0);
output0[0] = vget_lane_f32(_output_0, 0);
output1[0] = vget_lane_f32(_output_0, 1);
output0[2] = vget_lane_f32(_output_2, 0);
output1[2] = vget_lane_f32(_output_2, 1);
output0[4] = vget_lane_f32(_output_4, 0);
output1[4] = vget_lane_f32(_output_4, 1);
float32x2_t _output_1 = vmla_lane_f32(_tmp135a, _tmp135c, vget_high_f32(_coeff), 0);
_output_1 = vadd_f32(_output_1, _tmp135b);
_output_1 = vadd_f32(_output_1, _tmp135b);
_output_1 = vadd_f32(_output_1, _bias0);
float32x2_t _output_3 = vmla_lane_f32(_tmp135a, _tmp135b, vget_low_f32(_coeff), 1);
_output_3 = vmla_lane_f32(_output_3, _tmp135c, vget_low_f32(_coeff), 0);
_output_3 = vadd_f32(_output_3, _bias0);
float32x2_t _output_5 = vadd_f32(_t_77, _tmp135a);
_output_5 = vmla_lane_f32(_output_5, _tmp135b, vget_high_f32(_coeff), 1);
_output_5 = vadd_f32(_output_5, _tmp135c);
_output_5 = vadd_f32(_output_5, _bias0);
output0[1] = vget_lane_f32(_output_1, 0);
output1[1] = vget_lane_f32(_output_1, 1);
output0[3] = vget_lane_f32(_output_3, 0);
output1[3] = vget_lane_f32(_output_3, 1);
output0[5] = vget_lane_f32(_output_5, 0);
output1[5] = vget_lane_f32(_output_5, 1);
t0 += 8 * 2;
t1 += 8 * 2;
output0 += outw * 2;
output1 += outw * 2;
}
#else // __aarch64__
const float* output0_tm0_0 = out0_tm.row(i * w_tm / 8 + j);
const float* output0_tm1_0 = out0_tm.row(i * w_tm / 8 + j + tiles * 8);
const float* output0_tm2_0 = out0_tm.row(i * w_tm / 8 + j + tiles * 16);
const float* output0_tm3_0 = out0_tm.row(i * w_tm / 8 + j + tiles * 24);
float* t0 = tmp[0];
float* t1 = tmp[1];
int step = out0_tm.w * tiles * 4;
int step2 = out0_tm.w * tiles * 25 * 4;
asm volatile(
// loop0
"vld1.f32 {d16[0]}, [%2], %13 \n"
"vld1.f32 {d16[1]}, [%3], %13 \n"
"vld1.f32 {d17[0]}, [%4], %13 \n"
"vld1.f32 {d17[1]}, [%5], %13 \n"
"vld1.f32 {d20[0]}, [%2], %13 \n"
"vld1.f32 {d20[1]}, [%3], %13 \n"
"vld1.f32 {d21[0]}, [%4], %13 \n"
"vld1.f32 {d21[1]}, [%5], %13 \n"
"vld1.f32 {d24[0]}, [%2], %13 \n"
"vld1.f32 {d24[1]}, [%3], %13 \n"
"vld1.f32 {d25[0]}, [%4], %13 \n"
"vld1.f32 {d25[1]}, [%5], %13 \n"
"vadd.f32 q2, q10, q12 \n"
"vsub.f32 q3, q10, q12 \n"
"vld1.f32 {d28[0]}, [%2], %13 \n"
"vld1.f32 {d28[1]}, [%3], %13 \n"
"vld1.f32 {d29[0]}, [%4], %13 \n"
"vld1.f32 {d29[1]}, [%5], %13 \n"
"vld1.f32 {d18[0]}, [%2], %13 \n"
"vld1.f32 {d18[1]}, [%3], %13 \n"
"vld1.f32 {d19[0]}, [%4], %13 \n"
"vld1.f32 {d19[1]}, [%5], %13 \n"
"vadd.f32 q4, q14, q9 \n"
"vsub.f32 q5, q14, q9 \n"
"vld1.f32 {d22[0]}, [%2], %13 \n"
"vld1.f32 {d22[1]}, [%3], %13 \n"
"vld1.f32 {d23[0]}, [%4], %13 \n"
"vld1.f32 {d23[1]}, [%5], %13 \n"
"vld1.f32 {d26[0]}, [%2], %13 \n"
"vld1.f32 {d26[1]}, [%3], %13 \n"
"vld1.f32 {d27[0]}, [%4], %13 \n"
"vld1.f32 {d27[1]}, [%5], %13 \n"
"vadd.f32 q6, q11, q13 \n"
"vsub.f32 q7, q11, q13 \n" // spare q9 q10 q11 q12 q13 q14
"vld1.f32 {d30[0]}, [%2], %14 \n"
"vld1.f32 {d30[1]}, [%3], %14 \n"
"vld1.f32 {d31[0]}, [%4], %14 \n"
"vld1.f32 {d31[1]}, [%5], %14 \n"
"vmov q9, q3 \n"
"vadd.f32 q8, q8, q2 \n"
"vmla.f32 q9, q7, %f12[0] \n"
"vmov q12, q2 \n"
"vmov q10, q2 \n"
"vmov q11, q3 \n"
"vmla.f32 q12, q4, %f12[0] \n"
"vadd.f32 q15, q15, q3 \n"
"vmla.f32 q8, q6, %f12[1] \n"
"vadd.f32 q9, q9, q5 \n"
"vmla.f32 q10, q4, %e12[0] \n"
"vmla.f32 q11, q5, %e12[1] \n"
"vadd.f32 q12, q12, q6 \n"
"vmla.f32 q15, q5, %f12[1] \n"
"vadd.f32 q8, q8, q4 \n"
"vadd.f32 q9, q9, q5 \n"
"vmla.f32 q10, q6, %e12[1] \n"
"vmla.f32 q11, q7, %e12[0] \n"
"vadd.f32 q12, q12, q6 \n"
"vadd.f32 q15, q15, q7 \n"
"vst1.f32 {d16-d17}, [%0] \n"
"add %0, %0, #64 \n"
"vst1.f32 {d18-d19}, [%1] \n"
"add %1, %1, #64 \n"
"vst1.f32 {d20-d21}, [%0] \n"
"add %0, %0, #64 \n"
"vst1.f32 {d22-d23}, [%1] \n"
"add %1, %1, #64 \n"
"vst1.f32 {d24-d25}, [%0] \n"
"sub %0, %0, #112 \n"
"vst1.f32 {d30-d31}, [%1] \n"
"sub %1, %1, #112 \n"
// loop1
"vld1.f32 {d16[0]}, [%2], %13 \n"
"vld1.f32 {d16[1]}, [%3], %13 \n"
"vld1.f32 {d17[0]}, [%4], %13 \n"
"vld1.f32 {d17[1]}, [%5], %13 \n"
"vld1.f32 {d20[0]}, [%2], %13 \n"
"vld1.f32 {d20[1]}, [%3], %13 \n"
"vld1.f32 {d21[0]}, [%4], %13 \n"
"vld1.f32 {d21[1]}, [%5], %13 \n"
"vld1.f32 {d24[0]}, [%2], %13 \n"
"vld1.f32 {d24[1]}, [%3], %13 \n"
"vld1.f32 {d25[0]}, [%4], %13 \n"
"vld1.f32 {d25[1]}, [%5], %13 \n"
"vadd.f32 q2, q10, q12 \n"
"vsub.f32 q3, q10, q12 \n"
"vld1.f32 {d28[0]}, [%2], %13 \n"
"vld1.f32 {d28[1]}, [%3], %13 \n"
"vld1.f32 {d29[0]}, [%4], %13 \n"
"vld1.f32 {d29[1]}, [%5], %13 \n"
"vld1.f32 {d18[0]}, [%2], %13 \n"
"vld1.f32 {d18[1]}, [%3], %13 \n"
"vld1.f32 {d19[0]}, [%4], %13 \n"
"vld1.f32 {d19[1]}, [%5], %13 \n"
"vadd.f32 q4, q14, q9 \n"
"vsub.f32 q5, q14, q9 \n"
"vld1.f32 {d22[0]}, [%2], %13 \n"
"vld1.f32 {d22[1]}, [%3], %13 \n"
"vld1.f32 {d23[0]}, [%4], %13 \n"
"vld1.f32 {d23[1]}, [%5], %13 \n"
"vld1.f32 {d26[0]}, [%2], %13 \n"
"vld1.f32 {d26[1]}, [%3], %13 \n"
"vld1.f32 {d27[0]}, [%4], %13 \n"
"vld1.f32 {d27[1]}, [%5], %13 \n"
"vadd.f32 q6, q11, q13 \n"
"vsub.f32 q7, q11, q13 \n" // spare q9 q10 q11 q12 q13 q14
"vld1.f32 {d30[0]}, [%2] \n"
"vld1.f32 {d30[1]}, [%3] \n"
"vld1.f32 {d31[0]}, [%4] \n"
"vld1.f32 {d31[1]}, [%5] \n"
"vmov q9, q3 \n"
"vadd.f32 q8, q8, q2 \n"
"vmla.f32 q9, q7, %f12[0] \n"
"vmov q12, q2 \n"
"vmov q10, q2 \n"
"vmov q11, q3 \n"
"vmla.f32 q12, q4, %f12[0] \n"
"vadd.f32 q15, q15, q3 \n"
"vmla.f32 q8, q6, %f12[1] \n"
"vadd.f32 q9, q9, q5 \n"
"vmla.f32 q10, q4, %e12[0] \n"
"vmla.f32 q11, q5, %e12[1] \n"
"vadd.f32 q12, q12, q6 \n"
"vmla.f32 q15, q5, %f12[1] \n"
"vadd.f32 q8, q8, q4 \n"
"vadd.f32 q9, q9, q5 \n"
"vmla.f32 q10, q6, %e12[1] \n"
"vmla.f32 q11, q7, %e12[0] \n"
"vadd.f32 q12, q12, q6 \n"
"vadd.f32 q15, q15, q7 \n"
"vst1.f32 {d16-d17}, [%0] \n"
"add %0, %0, #64 \n"
"vst1.f32 {d18-d19}, [%1] \n"
"add %1, %1, #64 \n"
"vst1.f32 {d20-d21}, [%0] \n"
"add %0, %0, #64 \n"
"vst1.f32 {d22-d23}, [%1] \n"
"add %1, %1, #64 \n"
"vst1.f32 {d24-d25}, [%0] \n"
"vst1.f32 {d30-d31}, [%1] \n"
: "=r"(t0), // %0
"=r"(t1), // %1
"=r"(output0_tm0_0), // %2
"=r"(output0_tm1_0), // %3
"=r"(output0_tm2_0), // %4
"=r"(output0_tm3_0) // %5
: "0"(t0),
"1"(t1),
"2"(output0_tm0_0),
"3"(output0_tm1_0),
"4"(output0_tm2_0),
"5"(output0_tm3_0),
"w"(_coeff), // %12
"r"(step), // %13
"r"(step2) // %14
: "memory", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
t0 = tmp[0];
t1 = tmp[1];
float* output0 = out0.row(i * 6) + j * 6;
float* output1 = output0 + outw;
int stepw = outw * 2 * 4;
asm volatile(
// loop0
"vld1.f32 {d16-d19}, [%2] \n"
"vld1.f32 {d20-d23}, [%3] \n"
"add %2, %2, #64 \n"
"add %3, %3, #64 \n"
"vtrn.32 q8, q10 \n" // q8 = 0 2 q10 = 1 3
"vtrn.32 q9, q11 \n" // q9 = 4 6 q11 = 5 7
"vadd.f32 d4, d20, d17 \n"
"vsub.f32 d5, d20, d17 \n"
"vadd.f32 d6, d21, d18 \n"
"vsub.f32 d7, d21, d18 \n"
"vadd.f32 d8, d22, d19 \n"
"vsub.f32 d9, d22, d19 \n" // spare d17 ~ d22
"vmov d20, d5 \n"
"vmov d18, d4 \n"
"vadd.f32 d16, d16, d4 \n"
"vmla.f32 d20, d9, %f8[0] \n"
"vmov d17, d4 \n"
"vmov d21, d5 \n"
"vmla.f32 d18, d6, %f8[0] \n"
"vadd.f32 d22, d23, d5 \n"
"vmla.f32 d16, d8, %f8[1] \n"
"vadd.f32 d20, d20, d7 \n"
"vmla.f32 d17, d6, %e8[0] \n"
"vmla.f32 d21, d7, %e8[1] \n"
"vadd.f32 d18, d18, d8 \n"
"vmla.f32 d22, d7, %f8[1] \n"
"vadd.f32 d16, d16, d6 \n"
"vadd.f32 d20, d20, d7 \n"
"vmla.f32 d17, d8, %e8[1] \n"
"vmla.f32 d21, d9, %e8[0] \n"
"vadd.f32 d18, d18, d8 \n"
"vadd.f32 d22, d22, d9 \n"
"vadd.f32 d16, d16, %P9 \n" // _bias0
"vadd.f32 d20, d20, %P9 \n" // _bias0
"vadd.f32 d17, d17, %P9 \n" // _bias0
"vadd.f32 d21, d21, %P9 \n" // _bias0
"vadd.f32 d18, d18, %P9 \n" // _bias0
"vadd.f32 d22, d22, %P9 \n" // _bias0
"vtrn.f32 q8, q10 \n"
"vtrn.f32 d18, d22 \n"
"vst1.f32 {d16-d18}, [%0], %10 \n"
"vst1.f32 {d20-d22}, [%1], %10 \n"
// loop1
"vld1.f32 {d16-d19}, [%2] \n"
"vld1.f32 {d20-d23}, [%3] \n"
"add %2, %2, #64 \n"
"add %3, %3, #64 \n"
"vtrn.32 q8, q10 \n" // q8 = 0 2 q10 = 1 3
"vtrn.32 q9, q11 \n" // q9 = 4 6 q11 = 5 7
"vadd.f32 d4, d20, d17 \n"
"vsub.f32 d5, d20, d17 \n"
"vadd.f32 d6, d21, d18 \n"
"vsub.f32 d7, d21, d18 \n"
"vadd.f32 d8, d22, d19 \n"
"vsub.f32 d9, d22, d19 \n" // spare d17 ~ d22
"vmov d20, d5 \n"
"vmov d18, d4 \n"
"vadd.f32 d16, d16, d4 \n"
"vmla.f32 d20, d9, %f8[0] \n"
"vmov d17, d4 \n"
"vmov d21, d5 \n"
"vmla.f32 d18, d6, %f8[0] \n"
"vadd.f32 d22, d23, d5 \n"
"vmla.f32 d16, d8, %f8[1] \n"
"vadd.f32 d20, d20, d7 \n"
"vmla.f32 d17, d6, %e8[0] \n"
"vmla.f32 d21, d7, %e8[1] \n"
"vadd.f32 d18, d18, d8 \n"
"vmla.f32 d22, d7, %f8[1] \n"
"vadd.f32 d16, d16, d6 \n"
"vadd.f32 d20, d20, d7 \n"
"vmla.f32 d17, d8, %e8[1] \n"
"vmla.f32 d21, d9, %e8[0] \n"
"vadd.f32 d18, d18, d8 \n"
"vadd.f32 d22, d22, d9 \n"
"vadd.f32 d16, d16, %P9 \n" // _bias0
"vadd.f32 d20, d20, %P9 \n" // _bias0
"vadd.f32 d17, d17, %P9 \n" // _bias0
"vadd.f32 d21, d21, %P9 \n" // _bias0
"vadd.f32 d18, d18, %P9 \n" // _bias0
"vadd.f32 d22, d22, %P9 \n" // _bias0
"vtrn.f32 q8, q10 \n"
"vtrn.f32 d18, d22 \n"
"vst1.f32 {d16-d18}, [%0], %10 \n"
"vst1.f32 {d20-d22}, [%1], %10 \n"
// loop2
"vld1.f32 {d16-d19}, [%2] \n"
"vld1.f32 {d20-d23}, [%3] \n"
"add %2, %2, #64 \n"
"add %3, %3, #64 \n"
"vtrn.32 q8, q10 \n" // q8 = 0 2 q10 = 1 3
"vtrn.32 q9, q11 \n" // q9 = 4 6 q11 = 5 7
"vadd.f32 d4, d20, d17 \n"
"vsub.f32 d5, d20, d17 \n"
"vadd.f32 d6, d21, d18 \n"
"vsub.f32 d7, d21, d18 \n"
"vadd.f32 d8, d22, d19 \n"
"vsub.f32 d9, d22, d19 \n" // spare d17 ~ d22
"vmov d20, d5 \n"
"vmov d18, d4 \n"
"vadd.f32 d16, d16, d4 \n"
"vmla.f32 d20, d9, %f8[0] \n"
"vmov d17, d4 \n"
"vmov d21, d5 \n"
"vmla.f32 d18, d6, %f8[0] \n"
"vadd.f32 d22, d23, d5 \n"
"vmla.f32 d16, d8, %f8[1] \n"
"vadd.f32 d20, d20, d7 \n"
"vmla.f32 d17, d6, %e8[0] \n"
"vmla.f32 d21, d7, %e8[1] \n"
"vadd.f32 d18, d18, d8 \n"
"vmla.f32 d22, d7, %f8[1] \n"
"vadd.f32 d16, d16, d6 \n"
"vadd.f32 d20, d20, d7 \n"
"vmla.f32 d17, d8, %e8[1] \n"
"vmla.f32 d21, d9, %e8[0] \n"
"vadd.f32 d18, d18, d8 \n"
"vadd.f32 d22, d22, d9 \n"
"vadd.f32 d16, d16, %P9 \n" // _bias0
"vadd.f32 d20, d20, %P9 \n" // _bias0
"vadd.f32 d17, d17, %P9 \n" // _bias0
"vadd.f32 d21, d21, %P9 \n" // _bias0
"vadd.f32 d18, d18, %P9 \n" // _bias0
"vadd.f32 d22, d22, %P9 \n" // _bias0
"vtrn.f32 q8, q10 \n"
"vtrn.f32 d18, d22 \n"
"vst1.f32 {d16-d18}, [%0], %10 \n"
"vst1.f32 {d20-d22}, [%1], %10 \n"
: "=r"(output0), // %0
"=r"(output1), // %1
"=r"(t0), // %2
"=r"(t1) // %3
: "0"(output0),
"1"(output1),
"2"(t0),
"3"(t1),
"w"(_coeff), // %8
"w"(_bias0), // %9
"r"(stepw) // %10
: "memory", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
#else
const float* output0_tm_0 = out0_tm.row(i * w_tm / 8 + j);
const float* output0_tm_1 = out0_tm.row(i * w_tm / 8 + j + tiles);
const float* output0_tm_2 = out0_tm.row(i * w_tm / 8 + j + tiles * 2);
const float* output0_tm_3 = out0_tm.row(i * w_tm / 8 + j + tiles * 3);
const float* output0_tm_4 = out0_tm.row(i * w_tm / 8 + j + tiles * 4);
const float* output0_tm_5 = out0_tm.row(i * w_tm / 8 + j + tiles * 5);
const float* output0_tm_6 = out0_tm.row(i * w_tm / 8 + j + tiles * 6);
const float* output0_tm_7 = out0_tm.row(i * w_tm / 8 + j + tiles * 7);
for (int m = 0; m < 8; m++)
{
float tmp024a = output0_tm_1[0] + output0_tm_2[0];
float tmp135a = output0_tm_1[0] - output0_tm_2[0];
float tmp024b = output0_tm_3[0] + output0_tm_4[0];
float tmp135b = output0_tm_3[0] - output0_tm_4[0];
float tmp024c = output0_tm_5[0] + output0_tm_6[0];
float tmp135c = output0_tm_5[0] - output0_tm_6[0];
tmp[0][m] = output0_tm_0[0] + tmp024a + tmp024b + tmp024c * 32;
tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8;
tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c;
tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16;
tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4;
tmp[5][m] = output0_tm_7[0] + tmp135a + tmp135b * 32 + tmp135c;
output0_tm_0 += out0_tm.w * tiles * 8;
output0_tm_1 += out0_tm.w * tiles * 8;
output0_tm_2 += out0_tm.w * tiles * 8;
output0_tm_3 += out0_tm.w * tiles * 8;
output0_tm_4 += out0_tm.w * tiles * 8;
output0_tm_5 += out0_tm.w * tiles * 8;
output0_tm_6 += out0_tm.w * tiles * 8;
output0_tm_7 += out0_tm.w * tiles * 8;
}
float* output0 = out0.row(i * 6) + j * 6;
for (int m = 0; m < 6; m++)
{
const float* tmp0 = tmp[m];
float tmp024a = tmp0[1] + tmp0[2];
float tmp135a = tmp0[1] - tmp0[2];
float tmp024b = tmp0[3] + tmp0[4];
float tmp135b = tmp0[3] - tmp0[4];
float tmp024c = tmp0[5] + tmp0[6];
float tmp135c = tmp0[5] - tmp0[6];
output0[0] = bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32;
output0[2] = bias0 + tmp024a + tmp024b * 4 + tmp024c * 8;
output0[4] = bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c;
output0[1] = bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16;
output0[3] = bias0 + tmp135a + tmp135b * 8 + tmp135c * 4;
output0[5] = bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c;
output0 += outw;
}
#endif // __ARM_NEON
}
}
}
}
// END transform output
// cut result pad
if (top_blob_bordered.w != top_blob.w || top_blob_bordered.h != top_blob.h)
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
static void conv3x3s2_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2 * outw + w;
const float* kernel = _kernel;
const float* bias = _bias;
int nn_outch = outch >> 1;
int remain_outch_start = nn_outch << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 2;
Mat out0 = top_blob.channel(p);
Mat out1 = top_blob.channel(p + 1);
const float bias0 = bias ? bias[p] : 0.f;
const float bias1 = bias ? bias[p + 1] : 0.f;
out0.fill(bias0);
out1.fill(bias1);
const float* k0 = kernel + p * inch * 9;
const float* k1 = kernel + (p + 1) * inch * 9;
for (int q = 0; q < inch; q++)
{
float* outptr0 = out0;
float* outptr1 = out1;
const float* img0 = bottom_blob.channel(q);
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w * 2;
#if __ARM_NEON
float32x4_t _k00 = vld1q_f32(k0);
float32x4_t _k03 = vld1q_f32(k0 + 3);
float32x4_t _k06 = vld1q_f32(k0 + 6);
float32x4_t _k10 = vld1q_f32(k1);
float32x4_t _k13 = vld1q_f32(k1 + 3);
float32x4_t _k16 = vld1q_f32(k1 + 6);
#endif // __ARM_NEON
int i = 0;
for (; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw & 3;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"prfm pldl1keep, [%3, #256] \n"
"ld2 {v8.4s, v9.4s}, [%3], #32 \n" // v8 v9 = r0
"0: \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v6.4s}, [%1] \n" // v6 = _sum0
"fmul v12.4s, v8.4s, %12.s[0] \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v7.4s}, [%2] \n" // v7 = _sum1
"fmul v13.4s, v8.4s, %15.s[0] \n"
"prfm pldl1keep, [%3, #128] \n"
"ld2 {v10.4s, v11.4s}, [%3] \n" // v10
"fmla v6.4s, v9.4s, %12.s[1] \n"
"ext v14.16b, v8.16b, v10.16b, #4\n"
"fmla v7.4s, v9.4s, %15.s[1] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld2 {v8.4s, v9.4s}, [%4], #32 \n" // r1
"fmla v12.4s, v14.4s, %12.s[2] \n"
"fmla v13.4s, v14.4s, %15.s[2] \n"
"prfm pldl1keep, [%4, #128] \n"
"ld2 {v10.4s, v11.4s}, [%4] \n"
"fmla v6.4s, v8.4s, %13.s[0] \n"
"fmla v7.4s, v8.4s, %16.s[0] \n"
"ext v14.16b, v8.16b, v10.16b, #4\n"
"fmla v12.4s, v9.4s, %13.s[1] \n"
"fmla v13.4s, v9.4s, %16.s[1] \n"
"prfm pldl1keep, [%5, #256] \n"
"ld2 {v8.4s, v9.4s}, [%5], #32 \n" // r2
"fmla v6.4s, v14.4s, %13.s[2] \n"
"fmla v7.4s, v14.4s, %16.s[2] \n"
"prfm pldl1keep, [%5, #128] \n"
"ld2 {v10.4s, v11.4s}, [%5] \n"
"fmla v12.4s, v8.4s, %14.s[0] \n"
"fmla v13.4s, v8.4s, %17.s[0] \n"
"ext v14.16b, v8.16b, v10.16b, #4\n"
"fmla v6.4s, v9.4s, %14.s[1] \n"
"fmla v7.4s, v9.4s, %17.s[1] \n"
"fmla v12.4s, v14.4s, %14.s[2] \n"
"fmla v13.4s, v14.4s, %17.s[2] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld2 {v8.4s, v9.4s}, [%3], #32 \n" // v8 v9 = r0
"fadd v6.4s, v6.4s, v12.4s \n"
"fadd v7.4s, v7.4s, v13.4s \n"
"subs %w0, %w0, #1 \n"
"st1 {v6.4s}, [%1], #16 \n"
"st1 {v7.4s}, [%2], #16 \n"
"bne 0b \n"
"sub %3, %3, #32 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2) // %5
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(r0),
"4"(r1),
"5"(r2),
"w"(_k00), // %12
"w"(_k03), // %13
"w"(_k06), // %14
"w"(_k10), // %15
"w"(_k13), // %16
"w"(_k16) // %17
: "cc", "memory", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15");
}
#else
if (nn > 0)
{
asm volatile(
"pld [%3, #256] \n"
"vld2.f32 {d16-d19}, [%3]! \n" // q8 q9 = r0
"0: \n"
"pld [%1, #128] \n"
"vld1.f32 {d12-d13}, [%1] \n" // q6 = _sum0
"vmul.f32 q12, q8, %e12[0] \n"
"pld [%2, #128] \n"
"vld1.f32 {d14-d15}, [%2] \n" // q7 = _sum1
"vmul.f32 q13, q8, %e15[0] \n"
"pld [%3, #128] \n"
"vld2.f32 {d20-d21}, [%3] \n" // q10
"vmla.f32 q6, q9, %e12[1] \n"
"vext.32 q11, q8, q10, #1 \n"
"vmla.f32 q7, q9, %e15[1] \n"
"pld [%4, #256] \n"
"vld2.f32 {d16-d19}, [%4]! \n" // r1
"vmla.f32 q12, q11, %f12[0] \n"
"vmla.f32 q13, q11, %f15[0] \n"
"pld [%4, #128] \n"
"vld2.f32 {d20-d21}, [%4] \n"
"vmla.f32 q6, q8, %e13[0] \n"
"vmla.f32 q7, q8, %e16[0] \n"
"vext.32 q11, q8, q10, #1 \n"
"vmla.f32 q12, q9, %e13[1] \n"
"vmla.f32 q13, q9, %e16[1] \n"
"pld [%5, #256] \n"
"vld2.f32 {d16-d19}, [%5]! \n" // r2
"vmla.f32 q6, q11, %f13[0] \n"
"vmla.f32 q7, q11, %f16[0] \n"
"pld [%5, #128] \n"
"vld2.f32 {d20-d21}, [%5] \n"
"vmla.f32 q12, q8, %e14[0] \n"
"vmla.f32 q13, q8, %e17[0] \n"
"vext.32 q11, q8, q10, #1 \n"
"vmla.f32 q6, q9, %e14[1] \n"
"vmla.f32 q7, q9, %e17[1] \n"
"vmla.f32 q12, q11, %f14[0] \n"
"vmla.f32 q13, q11, %f17[0] \n"
"pld [%3, #256] \n"
"vld2.f32 {d16-d19}, [%3]! \n" // q8 q9 = r0
"vadd.f32 q6, q6, q12 \n"
"vadd.f32 q7, q7, q13 \n"
"subs %0, #1 \n"
"vst1.f32 {d12-d13}, [%1]! \n"
"vst1.f32 {d14-d15}, [%2]! \n"
"bne 0b \n"
"sub %3, #32 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2) // %5
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(r0),
"4"(r1),
"5"(r2),
"w"(_k00), // %12
"w"(_k03), // %13
"w"(_k06), // %14
"w"(_k10), // %15
"w"(_k13), // %16
"w"(_k16) // %17
: "cc", "memory", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain > 0; remain--)
{
#if __ARM_NEON
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _sum0 = vmulq_f32(_r00, _k00);
float32x4_t _sum1 = vmulq_f32(_r00, _k10);
_sum0 = vmlaq_f32(_sum0, _r10, _k03);
_sum1 = vmlaq_f32(_sum1, _r10, _k13);
_sum0 = vmlaq_f32(_sum0, _r20, _k06);
_sum1 = vmlaq_f32(_sum1, _r20, _k16);
_sum0 = vsetq_lane_f32(*outptr0, _sum0, 3);
_sum1 = vsetq_lane_f32(*outptr1, _sum1, 3);
#if __aarch64__
*outptr0 = vaddvq_f32(_sum0);
*outptr1 = vaddvq_f32(_sum1);
#else
float32x2_t _ss0 = vadd_f32(vget_low_f32(_sum0), vget_high_f32(_sum0));
float32x2_t _ss1 = vadd_f32(vget_low_f32(_sum1), vget_high_f32(_sum1));
float32x2_t _ss01 = vpadd_f32(_ss0, _ss1);
*outptr0 = vget_lane_f32(_ss01, 0);
*outptr1 = vget_lane_f32(_ss01, 1);
#endif // __aarch64__
#else
float sum0 = 0.f;
float sum1 = 0.f;
sum0 += r0[0] * k0[0];
sum0 += r0[1] * k0[1];
sum0 += r0[2] * k0[2];
sum0 += r1[0] * k0[3];
sum0 += r1[1] * k0[4];
sum0 += r1[2] * k0[5];
sum0 += r2[0] * k0[6];
sum0 += r2[1] * k0[7];
sum0 += r2[2] * k0[8];
sum1 += r0[0] * k1[0];
sum1 += r0[1] * k1[1];
sum1 += r0[2] * k1[2];
sum1 += r1[0] * k1[3];
sum1 += r1[1] * k1[4];
sum1 += r1[2] * k1[5];
sum1 += r2[0] * k1[6];
sum1 += r2[1] * k1[7];
sum1 += r2[2] * k1[8];
*outptr0 += sum0;
*outptr1 += sum1;
#endif // __ARM_NEON
r0 += 2;
r1 += 2;
r2 += 2;
outptr0++;
outptr1++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
k0 += 9;
k1 += 9;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
const float* kernel0 = kernel + p * inch * 9;
for (int q = 0; q < inch; q++)
{
float* outptr = out;
const float* img0 = bottom_blob.channel(q);
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w * 2;
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
#if __ARM_NEON
float32x4_t _k0123 = vld1q_f32(k0);
float32x4_t _k3456 = vld1q_f32(k1);
float32x4_t _k6789 = vld1q_f32(k2);
#endif // __ARM_NEON
int i = 0;
for (; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw & 3;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"prfm pldl1keep, [%2, #256] \n"
"ld2 {v2.4s, v3.4s}, [%2], #32 \n"
"0: \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v0.4s}, [%1] \n"
"fmla v0.4s, v2.4s, %10.s[0] \n"
"fmul v10.4s, v3.4s, %10.s[1] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld2 {v8.4s, v9.4s}, [%2] \n"
"ext v1.16b, v2.16b, v8.16b, #4 \n"
"fmul v11.4s, v1.4s, %10.s[2] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld2 {v2.4s, v3.4s}, [%3], #32 \n"
"fmla v0.4s, v2.4s, %11.s[0] \n"
"fmla v10.4s, v3.4s, %11.s[1] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld2 {v8.4s, v9.4s}, [%3] \n"
"ext v1.16b, v2.16b, v8.16b, #4 \n"
"fmla v11.4s, v1.4s, %11.s[2] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld2 {v2.4s, v3.4s}, [%4], #32 \n"
"fmla v0.4s, v2.4s, %12.s[0] \n"
"fmla v10.4s, v3.4s, %12.s[1] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld2 {v8.4s, v9.4s}, [%4] \n"
"ext v1.16b, v2.16b, v8.16b, #4 \n"
"fmla v11.4s, v1.4s, %12.s[2] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld2 {v2.4s, v3.4s}, [%2], #32 \n"
"fadd v0.4s, v0.4s, v10.4s \n"
"fadd v0.4s, v0.4s, v11.4s \n"
"subs %w0, %w0, #1 \n"
"st1 {v0.4s}, [%1], #16 \n"
"bne 0b \n"
"sub %2, %2, #32 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k0123), // %10
"w"(_k3456), // %11
"w"(_k6789) // %12
: "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15");
}
#else
if (nn > 0)
{
asm volatile(
"pld [%2, #256] \n"
"vld2.f32 {d4-d7}, [%2]! \n"
"0: \n"
"pld [%1, #128] \n"
"vld1.f32 {d0-d1}, [%1] \n"
"vmla.f32 q0, q2, %e10[0] \n"
"vmul.f32 q10, q3, %e10[1] \n"
"pld [%2, #128] \n"
"vld2.f32 {d16-d17}, [%2] \n"
"vext.32 q1, q2, q8, #1 \n"
"vmul.f32 q11, q1, %f10[0] \n"
"pld [%3, #256] \n"
"vld2.f32 {d4-d7}, [%3]! \n"
"vmla.f32 q0, q2, %e11[0] \n"
"vmla.f32 q10, q3, %e11[1] \n"
"pld [%3, #128] \n"
"vld2.f32 {d16-d17}, [%3] \n"
"vext.32 q1, q2, q8, #1 \n"
"vmla.f32 q11, q1, %f11[0] \n"
"pld [%4, #256] \n"
"vld2.f32 {d4-d7}, [%4]! \n"
"vmla.f32 q0, q2, %e12[0] \n"
"vmla.f32 q10, q3, %e12[1] \n"
"pld [%4, #128] \n"
"vld2.f32 {d16-d17}, [%4] \n"
"vext.32 q1, q2, q8, #1 \n"
"vmla.f32 q11, q1, %f12[0] \n"
"pld [%2, #256] \n"
"vld2.f32 {d4-d7}, [%2]! \n"
"vadd.f32 q0, q0, q10 \n"
"vadd.f32 q0, q0, q11 \n"
"subs %0, #1 \n"
"vst1.f32 {d0-d1}, [%1]! \n"
"bne 0b \n"
"sub %2, #32 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k0123), // %10
"w"(_k3456), // %11
"w"(_k6789) // %12
: "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain > 0; remain--)
{
#if __ARM_NEON
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _sum = vmulq_f32(_r00, _k0123);
_sum = vmlaq_f32(_sum, _r10, _k3456);
_sum = vmlaq_f32(_sum, _r20, _k6789);
_sum = vsetq_lane_f32(*outptr, _sum, 3);
#if __aarch64__
*outptr = vaddvq_f32(_sum);
#else
float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum));
_ss = vpadd_f32(_ss, _ss);
*outptr = vget_lane_f32(_ss, 0);
#endif // __aarch64__
#else
float sum = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
*outptr += sum;
#endif // __ARM_NEON
r0 += 2;
r1 += 2;
r2 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
kernel0 += 9;
}
}
}
static void conv3x3s2_transform_kernel_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch)
{
kernel_tm.create(8 * 9, inch, outch / 8 + outch % 8);
const float* kernel = _kernel;
int p = 0;
for (; p + 7 < outch; p += 8)
{
const float* k0 = kernel + (p + 0) * inch * 9;
const float* k1 = kernel + (p + 1) * inch * 9;
const float* k2 = kernel + (p + 2) * inch * 9;
const float* k3 = kernel + (p + 3) * inch * 9;
const float* k4 = kernel + (p + 4) * inch * 9;
const float* k5 = kernel + (p + 5) * inch * 9;
const float* k6 = kernel + (p + 6) * inch * 9;
const float* k7 = kernel + (p + 7) * inch * 9;
float* ktmp = kernel_tm.channel(p / 8);
for (int q = 0; q < inch; q++)
{
for (int k = 0; k < 9; k++)
{
ktmp[0] = k0[k];
ktmp[1] = k1[k];
ktmp[2] = k2[k];
ktmp[3] = k3[k];
ktmp[4] = k4[k];
ktmp[5] = k5[k];
ktmp[6] = k6[k];
ktmp[7] = k7[k];
ktmp += 8;
}
k0 += 9;
k1 += 9;
k2 += 9;
k3 += 9;
k4 += 9;
k5 += 9;
k6 += 9;
k7 += 9;
}
}
for (; p < outch; p++)
{
const float* k0 = kernel + (p + 0) * inch * 9;
float* ktmp = kernel_tm.channel(p / 8 + p % 8);
for (int q = 0; q < inch; q++)
{
for (int k = 0; k < 9; k++)
{
ktmp[k] = k0[k];
}
ktmp += 9;
k0 += 9;
}
}
}
static void conv3x3s2_packed_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2 * outw + w;
// const float* kernel = _kernel;
const float* bias = _bias;
int nn_outch = outch >> 3;
int remain_outch_start = nn_outch << 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 8;
Mat out0 = top_blob.channel(p + 0);
Mat out1 = top_blob.channel(p + 1);
Mat out2 = top_blob.channel(p + 2);
Mat out3 = top_blob.channel(p + 3);
Mat out4 = top_blob.channel(p + 4);
Mat out5 = top_blob.channel(p + 5);
Mat out6 = top_blob.channel(p + 6);
Mat out7 = top_blob.channel(p + 7);
const float bias0 = bias ? bias[p + 0] : 0.f;
const float bias1 = bias ? bias[p + 1] : 0.f;
const float bias2 = bias ? bias[p + 2] : 0.f;
const float bias3 = bias ? bias[p + 3] : 0.f;
const float bias4 = bias ? bias[p + 4] : 0.f;
const float bias5 = bias ? bias[p + 5] : 0.f;
const float bias6 = bias ? bias[p + 6] : 0.f;
const float bias7 = bias ? bias[p + 7] : 0.f;
out0.fill(bias0);
out1.fill(bias1);
out2.fill(bias2);
out3.fill(bias3);
out4.fill(bias4);
out5.fill(bias5);
out6.fill(bias6);
out7.fill(bias7);
const float* ktmp = _kernel.channel(p / 8);
for (int q = 0; q < inch; q++)
{
float* outptr0 = out0;
float* outptr1 = out1;
float* outptr2 = out2;
float* outptr3 = out3;
float* outptr4 = out4;
float* outptr5 = out5;
float* outptr6 = out6;
float* outptr7 = out7;
const float* img0 = bottom_blob.channel(q);
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w * 2;
int i = 0;
for (; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw & 3;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"0: \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v8.4s}, [%1] \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v9.4s}, [%2] \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v10.4s}, [%3] \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v11.4s}, [%4] \n"
///
"prfm pldl1keep, [%9, #256] \n"
"ld2 {v4.4s, v5.4s}, [%9], #32 \n" // v4=00 v5=01
"ld1 {v0.4s, v1.4s}, [%12], #32 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v4.4s, v0.s[1] \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v12.4s}, [%5] \n"
"prfm pldl1keep, [%6, #128] \n"
"ld1 {v13.4s}, [%6] \n"
"fmla v10.4s, v4.4s, v0.s[2] \n"
"fmla v11.4s, v4.4s, v0.s[3] \n"
"prfm pldl1keep, [%7, #128] \n"
"ld1 {v14.4s}, [%7] \n"
"prfm pldl1keep, [%8, #128] \n"
"ld1 {v15.4s}, [%8] \n"
"ld1 {v2.4s, v3.4s}, [%12], #32 \n"
"fmla v12.4s, v4.4s, v1.s[0] \n"
"fmla v13.4s, v4.4s, v1.s[1] \n"
"fmla v14.4s, v4.4s, v1.s[2] \n"
"fmla v15.4s, v4.4s, v1.s[3] \n"
"prfm pldl1keep, [%9, #256] \n"
"ld2 {v6.4s, v7.4s}, [%9] \n" // v6
"fmla v8.4s, v5.4s, v2.s[0] \n"
"fmla v9.4s, v5.4s, v2.s[1] \n"
"fmla v10.4s, v5.4s, v2.s[2] \n"
"fmla v11.4s, v5.4s, v2.s[3] \n"
"ext v6.16b, v4.16b, v6.16b, #4 \n" // v6=02
"ld1 {v0.4s, v1.4s}, [%12], #32 \n"
"fmla v12.4s, v5.4s, v3.s[0] \n"
"fmla v13.4s, v5.4s, v3.s[1] \n"
"fmla v14.4s, v5.4s, v3.s[2] \n"
"fmla v15.4s, v5.4s, v3.s[3] \n"
///
"prfm pldl1keep, [%10, #256] \n"
"ld2 {v4.4s, v5.4s}, [%10], #32 \n" // v4=10 v5=11
"fmla v8.4s, v6.4s, v0.s[0] \n"
"fmla v9.4s, v6.4s, v0.s[1] \n"
"fmla v10.4s, v6.4s, v0.s[2] \n"
"fmla v11.4s, v6.4s, v0.s[3] \n"
"ld1 {v2.4s, v3.4s}, [%12], #32 \n"
"fmla v12.4s, v6.4s, v1.s[0] \n"
"fmla v13.4s, v6.4s, v1.s[1] \n"
"fmla v14.4s, v6.4s, v1.s[2] \n"
"fmla v15.4s, v6.4s, v1.s[3] \n"
"fmla v8.4s, v4.4s, v2.s[0] \n"
"fmla v9.4s, v4.4s, v2.s[1] \n"
"fmla v10.4s, v4.4s, v2.s[2] \n"
"fmla v11.4s, v4.4s, v2.s[3] \n"
"ld1 {v0.4s, v1.4s}, [%12], #32 \n"
"fmla v12.4s, v4.4s, v3.s[0] \n"
"fmla v13.4s, v4.4s, v3.s[1] \n"
"fmla v14.4s, v4.4s, v3.s[2] \n"
"fmla v15.4s, v4.4s, v3.s[3] \n"
"prfm pldl1keep, [%10, #256] \n"
"ld2 {v6.4s, v7.4s}, [%10] \n" // v6
"fmla v8.4s, v5.4s, v0.s[0] \n"
"fmla v9.4s, v5.4s, v0.s[1] \n"
"fmla v10.4s, v5.4s, v0.s[2] \n"
"fmla v11.4s, v5.4s, v0.s[3] \n"
"ld1 {v2.4s, v3.4s}, [%12], #32 \n"
"ext v6.16b, v4.16b, v6.16b, #4 \n" // v6=12
"fmla v12.4s, v5.4s, v1.s[0] \n"
"fmla v13.4s, v5.4s, v1.s[1] \n"
"fmla v14.4s, v5.4s, v1.s[2] \n"
"fmla v15.4s, v5.4s, v1.s[3] \n"
///
"prfm pldl1keep, [%11, #256] \n"
"ld2 {v4.4s, v5.4s}, [%11], #32 \n" // v4=20 v5=21
"fmla v8.4s, v6.4s, v2.s[0] \n"
"fmla v9.4s, v6.4s, v2.s[1] \n"
"fmla v10.4s, v6.4s, v2.s[2] \n"
"fmla v11.4s, v6.4s, v2.s[3] \n"
"ld1 {v0.4s, v1.4s}, [%12], #32 \n"
"fmla v12.4s, v6.4s, v3.s[0] \n"
"fmla v13.4s, v6.4s, v3.s[1] \n"
"fmla v14.4s, v6.4s, v3.s[2] \n"
"fmla v15.4s, v6.4s, v3.s[3] \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v4.4s, v0.s[1] \n"
"fmla v10.4s, v4.4s, v0.s[2] \n"
"fmla v11.4s, v4.4s, v0.s[3] \n"
"ld1 {v2.4s, v3.4s}, [%12], #32 \n"
"fmla v12.4s, v4.4s, v1.s[0] \n"
"fmla v13.4s, v4.4s, v1.s[1] \n"
"fmla v14.4s, v4.4s, v1.s[2] \n"
"fmla v15.4s, v4.4s, v1.s[3] \n"
"prfm pldl1keep, [%11, #256] \n"
"ld2 {v6.4s, v7.4s}, [%11] \n" // v6
"fmla v8.4s, v5.4s, v2.s[0] \n"
"fmla v9.4s, v5.4s, v2.s[1] \n"
"fmla v10.4s, v5.4s, v2.s[2] \n"
"fmla v11.4s, v5.4s, v2.s[3] \n"
"ext v6.16b, v4.16b, v6.16b, #4 \n" // v6=22
"ld1 {v0.4s, v1.4s}, [%12], #32 \n"
"fmla v12.4s, v5.4s, v3.s[0] \n"
"fmla v13.4s, v5.4s, v3.s[1] \n"
"fmla v14.4s, v5.4s, v3.s[2] \n"
"fmla v15.4s, v5.4s, v3.s[3] \n"
"fmla v8.4s, v6.4s, v0.s[0] \n"
"fmla v9.4s, v6.4s, v0.s[1] \n"
"fmla v10.4s, v6.4s, v0.s[2] \n"
"fmla v11.4s, v6.4s, v0.s[3] \n"
"fmla v12.4s, v6.4s, v1.s[0] \n"
"fmla v13.4s, v6.4s, v1.s[1] \n"
"st1 {v8.4s}, [%1], #16 \n"
"st1 {v9.4s}, [%2], #16 \n"
"fmla v14.4s, v6.4s, v1.s[2] \n"
"fmla v15.4s, v6.4s, v1.s[3] \n"
"st1 {v10.4s}, [%3], #16 \n"
"st1 {v11.4s}, [%4], #16 \n"
"sub %12, %12, #288 \n"
"st1 {v12.4s}, [%5], #16 \n"
"st1 {v13.4s}, [%6], #16 \n"
"subs %w0, %w0, #1 \n"
"st1 {v14.4s}, [%7], #16 \n"
"st1 {v15.4s}, [%8], #16 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(outptr4), // %5
"=r"(outptr5), // %6
"=r"(outptr6), // %7
"=r"(outptr7), // %8
"=r"(r0), // %9
"=r"(r1), // %10
"=r"(r2), // %11
"=r"(ktmp) // %12
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(outptr4),
"6"(outptr5),
"7"(outptr6),
"8"(outptr7),
"9"(r0),
"10"(r1),
"11"(r2),
"12"(ktmp)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15");
}
#else // __aarch64__
for (; nn > 0; nn--)
{
asm volatile(
"pld [%0, #128] \n"
"vld1.f32 {d16-d17}, [%0] \n"
"pld [%1, #128] \n"
"vld1.f32 {d18-d19}, [%1] \n"
"pld [%2, #128] \n"
"vld1.f32 {d20-d21}, [%2] \n"
"pld [%3, #128] \n"
"vld1.f32 {d22-d23}, [%3] \n"
///
"pld [%8, #256] \n"
"vld2.f32 {d8-d11}, [%8]! \n" // q4=00 q5=01
"vld1.f32 {d0-d3}, [%11 :128]! \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q4, d0[1] \n"
"pld [%4, #128] \n"
"vld1.f32 {d24-d25}, [%4] \n"
"pld [%5, #128] \n"
"vld1.f32 {d26-d27}, [%5] \n"
"vmla.f32 q10, q4, d1[0] \n"
"vmla.f32 q11, q4, d1[1] \n"
"pld [%6, #128] \n"
"vld1.f32 {d28-d29}, [%6] \n"
"pld [%7, #128] \n"
"vld1.f32 {d30-d31}, [%7] \n"
"vld1.f32 {d4-d7}, [%11 :128]! \n"
"vmla.f32 q12, q4, d2[0] \n"
"vmla.f32 q13, q4, d2[1] \n"
"vmla.f32 q14, q4, d3[0] \n"
"vmla.f32 q15, q4, d3[1] \n"
"pld [%8, #128] \n"
"vld2.f32 {d12-d13}, [%8] \n" // q6
"vmla.f32 q8, q5, d4[0] \n"
"vmla.f32 q9, q5, d4[1] \n"
"vmla.f32 q10, q5, d5[0] \n"
"vmla.f32 q11, q5, d5[1] \n"
"vext.f32 q6, q4, q6, #1 \n" // q6=02
"vld1.f32 {d0-d3}, [%11 :128]! \n"
"vmla.f32 q12, q5, d6[0] \n"
"vmla.f32 q13, q5, d6[1] \n"
"vmla.f32 q14, q5, d7[0] \n"
"vmla.f32 q15, q5, d7[1] \n"
///
"pld [%9, #256] \n"
"vld2.f32 {d8-d11}, [%9]! \n" // q4=10 q5=11
"vmla.f32 q8, q6, d0[0] \n"
"vmla.f32 q9, q6, d0[1] \n"
"vmla.f32 q10, q6, d1[0] \n"
"vmla.f32 q11, q6, d1[1] \n"
"vld1.f32 {d4-d7}, [%11 :128]! \n"
"vmla.f32 q12, q6, d2[0] \n"
"vmla.f32 q13, q6, d2[1] \n"
"vmla.f32 q14, q6, d3[0] \n"
"vmla.f32 q15, q6, d3[1] \n"
"vmla.f32 q8, q4, d4[0] \n"
"vmla.f32 q9, q4, d4[1] \n"
"vmla.f32 q10, q4, d5[0] \n"
"vmla.f32 q11, q4, d5[1] \n"
"vld1.f32 {d0-d3}, [%11 :128]! \n"
"vmla.f32 q12, q4, d6[0] \n"
"vmla.f32 q13, q4, d6[1] \n"
"vmla.f32 q14, q4, d7[0] \n"
"vmla.f32 q15, q4, d7[1] \n"
"pld [%9, #128] \n"
"vld2.f32 {d12-d13}, [%9] \n" // q6
"vmla.f32 q8, q5, d0[0] \n"
"vmla.f32 q9, q5, d0[1] \n"
"vmla.f32 q10, q5, d1[0] \n"
"vmla.f32 q11, q5, d1[1] \n"
"vld1.f32 {d4-d7}, [%11 :128]! \n"
"vext.f32 q6, q4, q6, #1 \n" // q6=12
"vmla.f32 q12, q5, d2[0] \n"
"vmla.f32 q13, q5, d2[1] \n"
"vmla.f32 q14, q5, d3[0] \n"
"vmla.f32 q15, q5, d3[1] \n"
///
"pld [%10, #256] \n"
"vld2.f32 {d8-d11}, [%10]! \n" // q4=20 q5=21
"vmla.f32 q8, q6, d4[0] \n"
"vmla.f32 q9, q6, d4[1] \n"
"vmla.f32 q10, q6, d5[0] \n"
"vmla.f32 q11, q6, d5[1] \n"
"vld1.f32 {d0-d3}, [%11 :128]! \n"
"vmla.f32 q12, q6, d6[0] \n"
"vmla.f32 q13, q6, d6[1] \n"
"vmla.f32 q14, q6, d7[0] \n"
"vmla.f32 q15, q6, d7[1] \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q4, d0[1] \n"
"vmla.f32 q10, q4, d1[0] \n"
"vmla.f32 q11, q4, d1[1] \n"
"vld1.f32 {d4-d7}, [%11 :128]! \n"
"vmla.f32 q12, q4, d2[0] \n"
"vmla.f32 q13, q4, d2[1] \n"
"vmla.f32 q14, q4, d3[0] \n"
"vmla.f32 q15, q4, d3[1] \n"
"pld [%10, #128] \n"
"vld2.f32 {d12-d13}, [%10] \n" // q6
"vmla.f32 q8, q5, d4[0] \n"
"vmla.f32 q9, q5, d4[1] \n"
"vmla.f32 q10, q5, d5[0] \n"
"vmla.f32 q11, q5, d5[1] \n"
"vext.f32 q6, q4, q6, #1 \n" // q6=22
"vld1.f32 {d0-d3}, [%11 :128]! \n"
"vmla.f32 q12, q5, d6[0] \n"
"vmla.f32 q13, q5, d6[1] \n"
"vmla.f32 q14, q5, d7[0] \n"
"vmla.f32 q15, q5, d7[1] \n"
"vmla.f32 q8, q6, d0[0] \n"
"vmla.f32 q9, q6, d0[1] \n"
"vmla.f32 q10, q6, d1[0] \n"
"vmla.f32 q11, q6, d1[1] \n"
"vmla.f32 q12, q6, d2[0] \n"
"vmla.f32 q13, q6, d2[1] \n"
"vst1.f32 {d16-d17}, [%0]! \n"
"vst1.f32 {d18-d19}, [%1]! \n"
"vmla.f32 q14, q6, d3[0] \n"
"vmla.f32 q15, q6, d3[1] \n"
"vst1.f32 {d20-d21}, [%2]! \n"
"vst1.f32 {d22-d23}, [%3]! \n"
"sub %11, %11, #288 \n"
"vst1.f32 {d24-d25}, [%4]! \n"
"vst1.f32 {d26-d27}, [%5]! \n"
"vst1.f32 {d28-d29}, [%6]! \n"
"vst1.f32 {d30-d31}, [%7]! \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(outptr4), // %4
"=r"(outptr5), // %5
"=r"(outptr6), // %6
"=r"(outptr7), // %7
"=r"(r0), // %8
"=r"(r1), // %9
"=r"(r2), // %10
"=r"(ktmp) // %11
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(outptr4),
"5"(outptr5),
"6"(outptr6),
"7"(outptr7),
"8"(r0),
"9"(r1),
"10"(r2),
"11"(ktmp)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain > 0; remain--)
{
#if __ARM_NEON
#if __aarch64__
asm volatile(
"ld1 {v10.4s, v11.4s}, [%11], #32 \n"
"prfm pldl1keep, [%8, #128] \n"
"ld1 {v0.4s}, [%8] \n"
"ld1 {v12.4s, v13.4s}, [%11], #32 \n"
"ld1 {v8.s}[0], [%0] \n"
"ld1 {v8.s}[1], [%1] \n"
"ld1 {v8.s}[2], [%2] \n"
"ld1 {v8.s}[3], [%3] \n"
"fmul v14.4s, v10.4s, v0.s[0] \n"
"fmul v15.4s, v11.4s, v0.s[0] \n"
"ld1 {v9.s}[0], [%4] \n"
"ld1 {v9.s}[1], [%5] \n"
"ld1 {v9.s}[2], [%6] \n"
"ld1 {v9.s}[3], [%7] \n"
"ld1 {v10.4s, v11.4s}, [%11], #32 \n"
"fmla v8.4s, v12.4s, v0.s[1] \n"
"fmla v9.4s, v13.4s, v0.s[1] \n"
"ld1 {v12.4s, v13.4s}, [%11], #32 \n"
"fmla v14.4s, v10.4s, v0.s[2] \n"
"fmla v15.4s, v11.4s, v0.s[2] \n"
"prfm pldl1keep, [%9, #128] \n"
"ld1 {v1.4s}, [%9] \n"
"ld1 {v10.4s, v11.4s}, [%11], #32 \n"
"fmla v8.4s, v12.4s, v1.s[0] \n"
"fmla v9.4s, v13.4s, v1.s[0] \n"
"ld1 {v12.4s, v13.4s}, [%11], #32 \n"
"fmla v14.4s, v10.4s, v1.s[1] \n"
"fmla v15.4s, v11.4s, v1.s[1] \n"
"ld1 {v10.4s, v11.4s}, [%11], #32 \n"
"fmla v8.4s, v12.4s, v1.s[2] \n"
"fmla v9.4s, v13.4s, v1.s[2] \n"
"prfm pldl1keep, [%10, #128] \n"
"ld1 {v0.4s}, [%10] \n"
"ld1 {v12.4s, v13.4s}, [%11], #32 \n"
"fmla v14.4s, v10.4s, v0.s[0] \n"
"fmla v15.4s, v11.4s, v0.s[0] \n"
"ld1 {v10.4s, v11.4s}, [%11], #32 \n"
"fmla v8.4s, v12.4s, v0.s[1] \n"
"fmla v9.4s, v13.4s, v0.s[1] \n"
"fmla v14.4s, v10.4s, v0.s[2] \n"
"fmla v15.4s, v11.4s, v0.s[2] \n"
"fadd v8.4s, v8.4s, v14.4s \n"
"fadd v9.4s, v9.4s, v15.4s \n"
"sub %11, %11, #288 \n"
"st1 {v8.s}[0], [%0], #4 \n"
"st1 {v8.s}[1], [%1], #4 \n"
"st1 {v8.s}[2], [%2], #4 \n"
"st1 {v8.s}[3], [%3], #4 \n"
"st1 {v9.s}[0], [%4], #4 \n"
"st1 {v9.s}[1], [%5], #4 \n"
"st1 {v9.s}[2], [%6], #4 \n"
"st1 {v9.s}[3], [%7], #4 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(outptr4), // %4
"=r"(outptr5), // %5
"=r"(outptr6), // %6
"=r"(outptr7), // %7
"=r"(r0), // %8
"=r"(r1), // %9
"=r"(r2), // %10
"=r"(ktmp) // %11
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(outptr4),
"5"(outptr5),
"6"(outptr6),
"7"(outptr7),
"8"(r0),
"9"(r1),
"10"(r2),
"11"(ktmp)
: "memory", "v0", "v1", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15");
#else // __aarch64__
asm volatile(
"vld1.f32 {d20-d23}, [%11 :128]! \n"
"pld [%8, #128] \n"
"vld1.f32 {d0-d1}, [%8] \n"
"vld1.f32 {d24-d27}, [%11 :128]! \n"
"vld1.f32 {d16[0]}, [%0] \n"
"vld1.f32 {d16[1]}, [%1] \n"
"vld1.f32 {d17[0]}, [%2] \n"
"vld1.f32 {d17[1]}, [%3] \n"
"vmul.f32 q14, q10, d0[0] \n"
"vmul.f32 q15, q11, d0[0] \n"
"vld1.f32 {d18[0]}, [%4] \n"
"vld1.f32 {d18[1]}, [%5] \n"
"vld1.f32 {d19[0]}, [%6] \n"
"vld1.f32 {d19[1]}, [%7] \n"
"vld1.f32 {d20-d23}, [%11 :128]! \n"
"vmla.f32 q8, q12, d0[1] \n"
"vmla.f32 q9, q13, d0[1] \n"
"vld1.f32 {d24-d27}, [%11 :128]! \n"
"vmla.f32 q14, q10, d1[0] \n"
"vmla.f32 q15, q11, d1[0] \n"
"pld [%9, #128] \n"
"vld1.f32 {d2-d3}, [%9] \n"
"vld1.f32 {d20-d23}, [%11 :128]! \n"
"vmla.f32 q8, q12, d2[0] \n"
"vmla.f32 q9, q13, d2[0] \n"
"vld1.f32 {d24-d27}, [%11 :128]! \n"
"vmla.f32 q14, q10, d2[1] \n"
"vmla.f32 q15, q11, d2[1] \n"
"vld1.f32 {d20-d23}, [%11 :128]! \n"
"vmla.f32 q8, q12, d3[0] \n"
"vmla.f32 q9, q13, d3[0] \n"
"pld [%10, #128] \n"
"vld1.f32 {d0-d1}, [%10] \n"
"vld1.f32 {d24-d27}, [%11 :128]! \n"
"vmla.f32 q14, q10, d0[0] \n"
"vmla.f32 q15, q11, d0[0] \n"
"vld1.f32 {d20-d23}, [%11 :128]! \n"
"vmla.f32 q8, q12, d0[1] \n"
"vmla.f32 q9, q13, d0[1] \n"
"vmla.f32 q14, q10, d1[0] \n"
"vmla.f32 q15, q11, d1[0] \n"
"vadd.f32 q8, q8, q14 \n"
"vadd.f32 q9, q9, q15 \n"
"sub %11, %11, #288 \n"
"vst1.f32 {d16[0]}, [%0]! \n"
"vst1.f32 {d16[1]}, [%1]! \n"
"vst1.f32 {d17[0]}, [%2]! \n"
"vst1.f32 {d17[1]}, [%3]! \n"
"vst1.f32 {d18[0]}, [%4]! \n"
"vst1.f32 {d18[1]}, [%5]! \n"
"vst1.f32 {d19[0]}, [%6]! \n"
"vst1.f32 {d19[1]}, [%7]! \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(outptr4), // %4
"=r"(outptr5), // %5
"=r"(outptr6), // %6
"=r"(outptr7), // %7
"=r"(r0), // %8
"=r"(r1), // %9
"=r"(r2), // %10
"=r"(ktmp) // %11
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(outptr4),
"5"(outptr5),
"6"(outptr6),
"7"(outptr7),
"8"(r0),
"9"(r1),
"10"(r2),
"11"(ktmp)
: "memory", "q0", "q1", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
#else // __ARM_NEON
float sum0 = 0.f;
float sum1 = 0.f;
float sum2 = 0.f;
float sum3 = 0.f;
float sum4 = 0.f;
float sum5 = 0.f;
float sum6 = 0.f;
float sum7 = 0.f;
sum0 += r0[0] * ktmp[0];
sum1 += r0[0] * ktmp[1];
sum2 += r0[0] * ktmp[2];
sum3 += r0[0] * ktmp[3];
sum4 += r0[0] * ktmp[4];
sum5 += r0[0] * ktmp[5];
sum6 += r0[0] * ktmp[6];
sum7 += r0[0] * ktmp[7];
ktmp += 8;
sum0 += r0[1] * ktmp[0];
sum1 += r0[1] * ktmp[1];
sum2 += r0[1] * ktmp[2];
sum3 += r0[1] * ktmp[3];
sum4 += r0[1] * ktmp[4];
sum5 += r0[1] * ktmp[5];
sum6 += r0[1] * ktmp[6];
sum7 += r0[1] * ktmp[7];
ktmp += 8;
sum0 += r0[2] * ktmp[0];
sum1 += r0[2] * ktmp[1];
sum2 += r0[2] * ktmp[2];
sum3 += r0[2] * ktmp[3];
sum4 += r0[2] * ktmp[4];
sum5 += r0[2] * ktmp[5];
sum6 += r0[2] * ktmp[6];
sum7 += r0[2] * ktmp[7];
ktmp += 8;
sum0 += r1[0] * ktmp[0];
sum1 += r1[0] * ktmp[1];
sum2 += r1[0] * ktmp[2];
sum3 += r1[0] * ktmp[3];
sum4 += r1[0] * ktmp[4];
sum5 += r1[0] * ktmp[5];
sum6 += r1[0] * ktmp[6];
sum7 += r1[0] * ktmp[7];
ktmp += 8;
sum0 += r1[1] * ktmp[0];
sum1 += r1[1] * ktmp[1];
sum2 += r1[1] * ktmp[2];
sum3 += r1[1] * ktmp[3];
sum4 += r1[1] * ktmp[4];
sum5 += r1[1] * ktmp[5];
sum6 += r1[1] * ktmp[6];
sum7 += r1[1] * ktmp[7];
ktmp += 8;
sum0 += r1[2] * ktmp[0];
sum1 += r1[2] * ktmp[1];
sum2 += r1[2] * ktmp[2];
sum3 += r1[2] * ktmp[3];
sum4 += r1[2] * ktmp[4];
sum5 += r1[2] * ktmp[5];
sum6 += r1[2] * ktmp[6];
sum7 += r1[2] * ktmp[7];
ktmp += 8;
sum0 += r2[0] * ktmp[0];
sum1 += r2[0] * ktmp[1];
sum2 += r2[0] * ktmp[2];
sum3 += r2[0] * ktmp[3];
sum4 += r2[0] * ktmp[4];
sum5 += r2[0] * ktmp[5];
sum6 += r2[0] * ktmp[6];
sum7 += r2[0] * ktmp[7];
ktmp += 8;
sum0 += r2[1] * ktmp[0];
sum1 += r2[1] * ktmp[1];
sum2 += r2[1] * ktmp[2];
sum3 += r2[1] * ktmp[3];
sum4 += r2[1] * ktmp[4];
sum5 += r2[1] * ktmp[5];
sum6 += r2[1] * ktmp[6];
sum7 += r2[1] * ktmp[7];
ktmp += 8;
sum0 += r2[2] * ktmp[0];
sum1 += r2[2] * ktmp[1];
sum2 += r2[2] * ktmp[2];
sum3 += r2[2] * ktmp[3];
sum4 += r2[2] * ktmp[4];
sum5 += r2[2] * ktmp[5];
sum6 += r2[2] * ktmp[6];
sum7 += r2[2] * ktmp[7];
ktmp += 8;
*outptr0 += sum0;
*outptr1 += sum1;
*outptr2 += sum2;
*outptr3 += sum3;
*outptr4 += sum4;
*outptr5 += sum5;
*outptr6 += sum6;
*outptr7 += sum7;
ktmp -= 8 * 9;
outptr0++;
outptr1++;
outptr2++;
outptr3++;
outptr4++;
outptr5++;
outptr6++;
outptr7++;
#endif // __ARM_NEON
r0 += 2;
r1 += 2;
r2 += 2;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
ktmp += 8 * 9;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
const float* ktmp = _kernel.channel(p / 8 + p % 8);
for (int q = 0; q < inch; q++)
{
float* outptr = out;
const float* img0 = bottom_blob.channel(q);
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w * 2;
const float* k0 = ktmp;
const float* k1 = ktmp + 3;
const float* k2 = ktmp + 6;
#if __ARM_NEON
float32x4_t _k0123 = vld1q_f32(k0);
float32x4_t _k3456 = vld1q_f32(k1);
float32x4_t _k6789 = vld1q_f32(k2);
#endif // __ARM_NEON
int i = 0;
for (; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw & 3;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"prfm pldl1keep, [%2, #256] \n"
"ld2 {v2.4s, v3.4s}, [%2], #32 \n"
"0: \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v0.4s}, [%1] \n"
"fmla v0.4s, v2.4s, %10.s[0] \n"
"fmul v10.4s, v3.4s, %10.s[1] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld2 {v8.4s, v9.4s}, [%2] \n"
"ext v1.16b, v2.16b, v8.16b, #4 \n"
"fmul v11.4s, v1.4s, %10.s[2] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld2 {v2.4s, v3.4s}, [%3], #32 \n"
"fmla v0.4s, v2.4s, %11.s[0] \n"
"fmla v10.4s, v3.4s, %11.s[1] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld2 {v8.4s, v9.4s}, [%3] \n"
"ext v1.16b, v2.16b, v8.16b, #4 \n"
"fmla v11.4s, v1.4s, %11.s[2] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld2 {v2.4s, v3.4s}, [%4], #32 \n"
"fmla v0.4s, v2.4s, %12.s[0] \n"
"fmla v10.4s, v3.4s, %12.s[1] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld2 {v8.4s, v9.4s}, [%4] \n"
"ext v1.16b, v2.16b, v8.16b, #4 \n"
"fmla v11.4s, v1.4s, %12.s[2] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld2 {v2.4s, v3.4s}, [%2], #32 \n"
"fadd v0.4s, v0.4s, v10.4s \n"
"fadd v0.4s, v0.4s, v11.4s \n"
"subs %w0, %w0, #1 \n"
"st1 {v0.4s}, [%1], #16 \n"
"bne 0b \n"
"sub %2, %2, #32 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k0123), // %10
"w"(_k3456), // %11
"w"(_k6789) // %12
: "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15");
}
#else
if (nn > 0)
{
asm volatile(
"pld [%2, #256] \n"
"vld2.f32 {d4-d7}, [%2]! \n"
"0: \n"
"pld [%1, #128] \n"
"vld1.f32 {d0-d1}, [%1] \n"
"vmla.f32 q0, q2, %e10[0] \n"
"vmul.f32 q10, q3, %e10[1] \n"
"pld [%2, #128] \n"
"vld2.f32 {d16-d17}, [%2] \n"
"vext.32 q1, q2, q8, #1 \n"
"vmul.f32 q11, q1, %f10[0] \n"
"pld [%3, #256] \n"
"vld2.f32 {d4-d7}, [%3]! \n"
"vmla.f32 q0, q2, %e11[0] \n"
"vmla.f32 q10, q3, %e11[1] \n"
"pld [%3, #128] \n"
"vld2.f32 {d16-d17}, [%3] \n"
"vext.32 q1, q2, q8, #1 \n"
"vmla.f32 q11, q1, %f11[0] \n"
"pld [%4, #256] \n"
"vld2.f32 {d4-d7}, [%4]! \n"
"vmla.f32 q0, q2, %e12[0] \n"
"vmla.f32 q10, q3, %e12[1] \n"
"pld [%4, #128] \n"
"vld2.f32 {d16-d17}, [%4] \n"
"vext.32 q1, q2, q8, #1 \n"
"vmla.f32 q11, q1, %f12[0] \n"
"pld [%2, #256] \n"
"vld2.f32 {d4-d7}, [%2]! \n"
"vadd.f32 q0, q0, q10 \n"
"vadd.f32 q0, q0, q11 \n"
"subs %0, #1 \n"
"vst1.f32 {d0-d1}, [%1]! \n"
"bne 0b \n"
"sub %2, #32 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k0123), // %10
"w"(_k3456), // %11
"w"(_k6789) // %12
: "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain > 0; remain--)
{
#if __ARM_NEON
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _sum = vmulq_f32(_r00, _k0123);
_sum = vmlaq_f32(_sum, _r10, _k3456);
_sum = vmlaq_f32(_sum, _r20, _k6789);
_sum = vsetq_lane_f32(*outptr, _sum, 3);
#if __aarch64__
*outptr = vaddvq_f32(_sum);
#else
float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum));
_ss = vpadd_f32(_ss, _ss);
*outptr = vget_lane_f32(_ss, 0);
#endif // __aarch64__
#else
float sum = 0;
sum += r0[0] * ktmp[0];
sum += r0[1] * ktmp[1];
sum += r0[2] * ktmp[2];
sum += r1[0] * ktmp[3];
sum += r1[1] * ktmp[4];
sum += r1[2] * ktmp[5];
sum += r2[0] * ktmp[6];
sum += r2[1] * ktmp[7];
sum += r2[2] * ktmp[8];
*outptr += sum;
#endif // __ARM_NEON
r0 += 2;
r1 += 2;
r2 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
ktmp += 9;
}
}
}
|
templatemath.h | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
* Copyright (c) 2019 Konduit K.K.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
/*
* templatemath.h
*
* Created on: Jan 1, 2016
* Author: agibsonccc
*/
#ifndef TEMPLATEMATH_H_
#define TEMPLATEMATH_H_
#include <system/dll.h>
#include <system/pointercast.h>
#include <math/platformmath.h>
#include <array/DataTypeUtils.h>
#define BFLOAT16_MAX_VALUE 32737.
#define HALF_MAX_VALUE 65504.
#define FLOAT_MAX_VALUE 3.4028235E38
#define DOUBLE_MAX_VALUE 1.7976931348623157E308
#define FLOAT_MIN_NORMAL 1.17549435e-38
#ifndef M_E
#define M_E 2.718281828459
#endif
#ifndef M_PI
#define M_PI 3.14159265358979323846
#endif
namespace sd {
#ifdef __CUDACC__
#endif
namespace math {
template<typename T>
math_def inline T nd4j_abs(T value);
template<typename T>
math_def inline void nd4j_swap(T &val1, T &val2);
template<typename T>
math_def inline T nd4j_max(T val1, T val2);
template<typename T>
math_def inline T nd4j_min(T val1, T val2);
template <typename T>
math_def inline bool nd4j_eq(T val1, T val2, double eps);
template<typename T, typename Z>
math_def inline Z nd4j_re(T val1, T val2);
template<typename T, typename Z>
math_def inline Z nd4j_rint(T val1);
template<typename T, typename Z>
math_def inline Z nd4j_copysign(T val1, T val2);
template <typename T, typename Z>
math_def inline Z nd4j_softplus(T val);
template <typename T>
math_def inline T nd4j_rotl(T val, T shift);
template <typename T>
math_def inline T nd4j_rotr(T val, T shift);
//#ifndef __CUDACC__
template<typename X, typename Y, typename Z>
math_def inline Z nd4j_dot(X *x, Y *y, int length);
//#endif
template<typename T, typename Z>
math_def inline Z nd4j_ceil(T val1);
template<typename T>
math_def inline bool nd4j_isnan(T val1);
template<typename T>
math_def inline bool nd4j_isinf(T val1);
template<typename T>
math_def inline bool nd4j_isfin(T val1);
template<typename T, typename Z>
math_def inline Z nd4j_cos(T val);
template<typename T, typename Z>
math_def inline Z nd4j_cosh(T val);
template<typename X, typename Z>
math_def inline Z nd4j_exp(X val);
template<typename T, typename Z>
math_def inline Z nd4j_floor(T val);
template<typename X, typename Z>
math_def inline Z nd4j_log(X val);
template<typename X, typename Y, typename Z>
math_def inline Z nd4j_pow(X val, Y val2);
template<typename T, typename Z>
math_def inline Z nd4j_round(T val);
template<typename X, typename Y, typename Z>
math_def inline Z nd4j_remainder(X num, Y denom);
template<typename X, typename Y, typename Z>
math_def inline Z nd4j_fmod(X num, Y denom);
template<typename T, typename Z>
math_def inline Z nd4j_erf(T num);
template<typename T, typename Z>
math_def inline Z nd4j_erfc(T num);
math_def inline int32_t floatToRawIntBits(float d) {
union {
float f;
int32_t i;
} tmp;
tmp.f = d;
return tmp.i;
}
math_def inline float intBitsToFloat(int32_t i) {
union {
float f;
int32_t i;
} tmp;
tmp.i = i;
return tmp.f;
}
math_def inline float mulsignf(float x, float y) {
return intBitsToFloat(floatToRawIntBits(x) ^ (floatToRawIntBits(y) & (1 << 31)));
}
math_def inline float copysignfk(float x, float y) {
return intBitsToFloat((floatToRawIntBits(x) & ~(1 << 31)) ^ (floatToRawIntBits(y) & (1 << 31)));
}
template<typename T, typename Z>
math_def inline Z nd4j_sigmoid(T val) {
return (Z) 1.0f / ((Z) 1.0f + nd4j_exp<T, Z>(-val));
}
template<typename T, typename Z>
math_def inline Z nd4j_elu(T val, T alpha) {
if (val >= (T) 0.f)
return val;
return static_cast<Z>(alpha) * (nd4j_exp<T, Z>(val) - static_cast<Z>(1.0f));
}
template<typename T, typename Z>
math_def inline Z nd4j_leakyrelu(T val,T alpha) {
if (val < (T) 0.0f)
return alpha * val;
else
return val;
}
template<typename T, typename Z>
math_def inline Z nd4j_eluderivative(T val, T alpha) {
if (val >= static_cast<T>(0.0f))
return static_cast<Z>(1.0f);
return static_cast<Z>(alpha) * nd4j_exp<T, Z>(val);
//return val >= 0.0 ? 1.0 : nd4j_exp(val);
}
template<typename T, typename Z>
math_def inline Z nd4j_sin(T val);
template<typename T, typename Z>
math_def inline Z nd4j_sinh(T val);
template<typename T, typename Z>
math_def inline Z nd4j_softplus(T val) {
return nd4j_log<T, Z>((Z) 1.0f + nd4j_exp<T, Z>(val));
}
template<typename T, typename Z>
math_def inline Z nd4j_softsign(T val) {
return val / ((T) 1.0f + sd::math::nd4j_abs<T>(val));
}
template<typename X, typename Z>
math_def inline Z nd4j_sqrt(X val);
template<typename X, typename Z>
math_def inline Z nd4j_tanh(X val);
template<typename T, typename Z>
math_def inline Z nd4j_tan(T val);
template<typename X, typename Z>
math_def inline Z nd4j_atan2(X val1, X val2);
template<typename X, typename Z>
math_def inline Z nd4j_atan2(X val1, X val2) {
return p_atan2<Z>(static_cast<Z>(val1), static_cast<Z>(val2));
}
template<typename T, typename Z>
math_def inline Z nd4j_tan(T tval) {
return p_tan<Z>(static_cast<Z>(tval));
}
template<typename T, typename Z>
math_def inline Z nd4j_tanhderivative(T val) {
Z tanh = nd4j_tanh<T,Z>(val);
return (Z) 1.0f - tanh * tanh;
}
template <typename T, typename Z>
math_def inline T nd4j_sigmoidderivative(T val) {
Z sigmoid = nd4j_sigmoid<T,Z>(val);
return sigmoid * ((Z) 1.0f - sigmoid);
}
template<typename T, typename Z>
math_def inline T nd4j_softsignderivative(T val) {
T y = (T) 1.0f + nd4j_abs(val);
return (Z) 1.0f / (y * y);
}
template<typename T, typename Z>
math_def inline T nd4j_sgn(T val) {
return val < (T) 0.0f ? (Z) -1.0f : val > (T) 0.0f ? (Z) 1.0f : (Z) 0.0f;
}
template<typename T, typename Z>
math_def inline Z nd4j_sign(T val) {
return nd4j_sgn<T, Z>(val);
}
template<typename T, typename Z>
math_def inline Z nd4j_signum(T val) {
return nd4j_sgn<T, Z>(val);
}
template<typename X, typename Z>
math_def inline Z nd4j_gamma(X a);
template<typename X, typename Z>
math_def inline Z nd4j_lgamma(X x);
//#ifndef __CUDACC__
/*
template<>
math_def inline float16 nd4j_dot<float16>(float16 *x, float16 *y, int length) {
float16 dot = (float16) 0.0f;
// TODO: since we can't use simd on unions, we might use something else here.
for(int e = 0; e < length; e++) {
dot += x[e] * y[e];
}
return dot;
}
*/
template<typename X, typename Y, typename Z>
math_def inline Z nd4j_dot(X *x, Y *y, int length) {
Z dot = (Z)0.0f;
for(int e = 0; e < length; e++) {
dot += static_cast<Z>(x[e]) * static_cast<Z>(y[e]);
}
return dot;
}
//#endif
template<typename T, typename Z>
math_def inline Z nd4j_acos(T val);
template<typename T, typename Z>
math_def inline Z nd4j_sech(T val);
template<typename T, typename Z>
math_def inline Z nd4j_acosh(T val);
template<typename T, typename Z>
math_def inline Z nd4j_asin(T val);
template<typename T, typename Z>
math_def inline Z nd4j_asinh(T val);
template<typename T, typename Z>
math_def inline Z nd4j_asinh(T val) {
//Math.log(Math.sqrt(Math.pow(x, 2) + 1) + x)
return nd4j_log<Z, Z>(nd4j_sqrt<Z, Z>(nd4j_pow<T,T,Z>(val, (T) 2) + (Z) 1.f) + (Z) val);
}
template<typename T, typename Z>
math_def inline Z nd4j_atan(T val);
template<typename T, typename Z>
math_def inline Z nd4j_atanh(T val);
template<>
math_def inline float16 nd4j_abs<float16>(float16 value) {
#ifdef NATIVE_HALFS
if (value < (float16) 0.f) {
return float16(__hneg(value.data));
} else
return value;
#else
return (float16) fabsf((float) value);
#endif
}
template<>
math_def inline bfloat16 nd4j_abs<bfloat16>(bfloat16 value) {
return (bfloat16) fabsf((float) value);
}
template<>
math_def inline float nd4j_abs<float>(float value) {
return fabsf(value);
}
template<>
math_def inline double nd4j_abs<double>(double value) {
return fabs(value);
}
template<>
math_def inline int nd4j_abs<int>(int value) {
return abs(value);
}
template<>
math_def inline Nd4jLong nd4j_abs<Nd4jLong>(Nd4jLong value) {
return llabs(value);
}
template<>
math_def inline bool nd4j_abs<bool>(bool value) {
return value;
}
template<>
math_def inline uint8_t nd4j_abs<uint8_t>(uint8_t value) {
return value;
}
template<>
math_def inline uint16_t nd4j_abs<uint16_t>(uint16_t value) {
return value;
}
template<>
math_def inline uint32_t nd4j_abs<uint32_t>(uint32_t value) {
return value;
}
template<>
math_def inline Nd4jULong nd4j_abs<Nd4jULong>(Nd4jULong value) {
return value;
}
template<>
math_def inline int8_t nd4j_abs<int8_t>(int8_t value) {
return value < 0 ? -value : value;
}
template<>
math_def inline int16_t nd4j_abs<int16_t>(int16_t value) {
return value < 0 ? -value : value;
}
template<>
math_def inline bool nd4j_isnan<float16>(float16 value) {
return *(value.data.getXP()) == 0x7fffU;
}
template<>
math_def inline bool nd4j_isnan<bfloat16>(bfloat16 value) {
return value == bfloat16::nan(); //0x7fffU;
}
template<>
math_def inline bool nd4j_isnan<float>(float value) {
return value != value;
}
template<>
math_def inline bool nd4j_isnan<double>(double value) {
return value != value;
}
template<>
math_def inline bool nd4j_isnan<int>(int value) {
return false;
}
template<>
math_def inline bool nd4j_isnan<uint32_t>(uint32_t value) {
return false;
}
template<>
math_def inline bool nd4j_isnan<uint16_t>(uint16_t value) {
return false;
}
template<>
math_def inline bool nd4j_isnan<uint8_t>(uint8_t value) {
return false;
}
template<>
math_def inline bool nd4j_isnan<int16_t>(int16_t value) {
return false;
}
template<>
math_def inline bool nd4j_isnan<int8_t>(int8_t value) {
return false;
}
template<>
math_def inline bool nd4j_isnan<bool>(bool value) {
return false;
}
template<>
math_def inline bool nd4j_isnan<Nd4jLong>(Nd4jLong value) {
return false;
}
template<>
math_def inline bool nd4j_isnan<Nd4jULong>(Nd4jULong value) {
return false;
}
template<>
math_def inline bool nd4j_isinf<float16>(float16 value) {
return value < (float16) -HALF_MAX_VALUE || value > (float16) HALF_MAX_VALUE;
}
template<>
math_def inline bool nd4j_isinf<bfloat16>(bfloat16 value) {
return value < (bfloat16) -BFLOAT16_MAX_VALUE || value > (bfloat16) BFLOAT16_MAX_VALUE;
}
template<>
math_def inline bool nd4j_isinf<float>(float value) {
#ifdef __CUDACC__
return isinf(value);
#else
return std::isinf(value);
#endif
//return value < -FLOAT_MAX_VALUE || value > FLOAT_MAX_VALUE;
}
template<>
math_def inline bool nd4j_isinf<double>(double value) {
#ifdef __CUDACC__
return isinf(value);
#else
return std::isinf(value);
#endif
//return value < -DOUBLE_MAX_VALUE || value > DOUBLE_MAX_VALUE;
}
template<>
math_def inline bool nd4j_isinf<int>(int value) {
return false;
}
template<>
math_def inline bool nd4j_isinf<uint32_t>(uint32_t value) {
return false;
}
template<>
math_def inline bool nd4j_isinf<uint16_t>(uint16_t value) {
return false;
}
template<>
math_def inline bool nd4j_isinf<uint8_t>(uint8_t value) {
return false;
}
template<>
math_def inline bool nd4j_isinf<int16_t>(int16_t value) {
return false;
}
template<>
math_def inline bool nd4j_isinf<int8_t>(int8_t value) {
return false;
}
template<>
math_def inline bool nd4j_isinf<bool>(bool value) {
return false;
}
template<>
math_def inline bool nd4j_isinf<Nd4jLong>(Nd4jLong value) {
return false;
}
template<>
math_def inline bool nd4j_isinf<Nd4jULong>(Nd4jULong value) {
return false;
}
template<typename T>
math_def inline bool nd4j_isfin(T value) {
return !nd4j_isnan<T>(value) && !nd4j_isinf<T>(value);
}
template<>
math_def inline float16 nd4j_copysign<float16>(float16 val1, float16 val2) {
return (float16) copysignf((float) val1, (float) val2);
}
template<>
math_def inline float nd4j_copysign<float>(float val1, float val2) {
return copysignf(val1, val2);
}
template<>
math_def inline double nd4j_copysign<double>(double val1, double val2) {
return copysign(val1, val2);
}
template<>
math_def inline int nd4j_copysign<int>(int val1, int val2) {
if (val2 < 0) return -(nd4j_abs<int>(val1));
else return nd4j_abs<int>(val1);
}
template<>
math_def inline Nd4jLong nd4j_copysign<Nd4jLong>(Nd4jLong val1, Nd4jLong val2) {
if (val2 < 0) return -(nd4j_abs<Nd4jLong>(val1));
else return nd4j_abs<Nd4jLong>(val1);
}
template<>
math_def inline bool nd4j_max(bool val1, bool val2) {
return (val1 || val2) ? true : false;
}
template<typename T>
math_def inline T nd4j_max(T val1, T val2) {
return val1 > val2 ? val1 : val2;
}
template<>
math_def inline bool nd4j_min(bool val1, bool val2) {
return (val1 && val2) ? true : false;
}
template<typename T>
math_def inline T nd4j_min(T val1, T val2) {
return val1 < val2 ? val1 : val2;
}
template <typename T>
math_def inline bool nd4j_eq(T d1, T d2, double eps) {
if (sd::math::nd4j_isinf<T>(d1) && sd::math::nd4j_isinf<T>(d2)) {
if (d1 > 0 && d2 > 0)
return true;
else if (d1 < 0 && d2 < 0)
return true;
else
return false;
}
auto diff = static_cast<double>(sd::math::nd4j_abs<T>(d1 - d2));
// works well except in the range of very large numbers
if (diff <= eps)
return true;
// Knuth approach
// works well except in the range of very small numbers
if (diff <= sd::math::nd4j_max<double>(sd::math::nd4j_abs<double>(static_cast<double>(d1)), sd::math::nd4j_abs<double>(static_cast<double>(d2))) * eps)
return true;
return false;
}
template <typename X, typename Z>
math_def inline Z nd4j_ceil(X val) {
return static_cast<Z>(p_ceil<X>(val));
}
template <typename X, typename Z>
math_def inline Z nd4j_round(X val) {
return static_cast<Z>(p_round<X>(val));
}
template <typename X, typename Z>
math_def inline Z nd4j_asin(X val) {
return p_asin<Z>(static_cast<Z>(val));
}
template <typename X, typename Z>
math_def inline Z nd4j_atan(X val) {
return p_atan<Z>(static_cast<Z>(val));
}
template <typename X, typename Z>
math_def inline Z nd4j_atanh(X val) {
return p_atanh<Z>(static_cast<Z>(val));
}
template <typename X, typename Z>
math_def inline Z nd4j_cosh(X val) {
return p_cosh<Z>(static_cast<Z>(val));
}
template <typename X, typename Z>
math_def inline Z nd4j_rint(X val) {
return p_rint<X>(val);
}
template <typename X, typename Z>
math_def inline Z nd4j_sinh(X val) {
return p_sinh<Z>(static_cast<Z>(val));
}
template <typename X, typename Z>
math_def inline Z nd4j_acos(X val) {
return p_acos<Z>(static_cast<Z>(val));
}
template <typename X, typename Z>
math_def inline Z nd4j_sech(X val) {
return static_cast<Z>(1) / nd4j_cosh<X,Z>(val);
}
template <typename X, typename Z>
math_def inline Z nd4j_acosh(X val) {
return p_acosh<Z>(static_cast<Z>(val));
}
template <typename X, typename Z>
math_def inline Z nd4j_cos(X val) {
return p_cos<Z>(static_cast<Z>(val));
}
template <typename X, typename Z>
math_def inline Z nd4j_exp(X val) {
return p_exp<X>(val);
}
template<typename X, typename Z>
math_def inline Z nd4j_floor(X val) {
return static_cast<Z>(p_floor<X>(val));
}
template<typename X, typename Z>
math_def inline Z nd4j_log(X val) {
return static_cast<Z>(p_log<X>(val));
}
/**
* This func is special case - it must return floating point value, and optionally Y arg can be floating point argument
* @tparam X
* @tparam Y
* @tparam Z
* @param val
* @param val2
* @return
*/
template <>
math_def inline float nd4j_pow(float val, float val2) {
return p_pow<float>(val, val2);
}
template <typename X, typename Y, typename Z>
math_def inline Z nd4j_pow(X val, Y val2) {
return p_pow<Z>(static_cast<Z>(val), static_cast<Z>(val2));
}
/**
* LogGamma(a) - float point extension of ln(n!)
**/
template <typename X, typename Z>
math_def inline Z nd4j_lgamma(X x) {
// if (x <= X(0.0))
// {
// std::stringstream os;
// os << "Logarithm of Gamma has sence only for positive values, but " << x << " was given.";
// throw std::invalid_argument( os.str() );
// }
if (x < X(12.0)) {
return nd4j_log<Z,Z>(nd4j_gamma<X,Z>(x));
}
// Abramowitz and Stegun 6.1.41
// Asymptotic series should be good to at least 11 or 12 figures
// For error analysis, see Whittiker and Watson
// A Course in Modern Analysis (1927), page 252
static const double c[8] = {
1.0/12.0,
-1.0/360.0,
1.0/1260.0,
-1.0/1680.0,
1.0/1188.0,
-691.0/360360.0,
1.0/156.0,
-3617.0/122400.0
};
double z = Z(1.0 / Z(x * x));
double sum = c[7];
for (int i = 6; i >= 0; i--) {
sum *= z;
sum += c[i];
}
double series = sum / Z(x);
static const double halfLogTwoPi = 0.91893853320467274178032973640562;
return Z((double(x) - 0.5) * nd4j_log<X,double>(x) - double(x) + halfLogTwoPi + series);
}
template<typename T>
math_def inline T nd4j_re(T val1, T val2) {
if (val1 == (T) 0.0f && val2 == (T) 0.0f)
return (T) 0.0f;
return nd4j_abs<T>(val1 - val2) / (nd4j_abs<T>(val1) + nd4j_abs<T>(val2));
}
template <typename X, typename Y, typename Z>
math_def inline Z nd4j_remainder(X val, Y val2) {
return p_remainder<Z>(static_cast<Z>(val), static_cast<Z>(val2));
}
template <typename X, typename Y, typename Z>
math_def inline Z nd4j_fmod(X val, Y val2) {
return p_fmod<Z>(static_cast<Z>(val), static_cast<Z>(val2));
}
template <typename X, typename Z>
math_def inline Z nd4j_sin(X val) {
return p_sin<Z>(static_cast<Z>(val));
}
template <typename X, typename Z>
math_def inline Z nd4j_sqrt(X val) {
return p_sqrt<Z>(static_cast<Z>(val));
}
template <typename X>
math_def inline X neg_tanh(X val) {
X o = static_cast<X>(1.0f);
X t = static_cast<X>(2.0f);
X e = static_cast<X>(M_E);
auto p = sd::math::nd4j_pow<X, X, X>(e, val * t);
return (p - o)/ (p + o);
}
template <typename X>
math_def inline X pos_tanh(X val) {
X o = static_cast<X>(1.0f);
X t = static_cast<X>(-2.0f);
X e = static_cast<X>(M_E);
auto p = sd::math::nd4j_pow<X, X, X>(e, val * t);
return (o - p) / (o + p);
}
math_def inline float neu_tanh(float val, float sign) {
float e(M_E);
float av = sign * val;
auto p = sd::math::nd4j_pow<float, float, float>(e, -av * 2.f);
return (1 - p) / (1 + p);
}
template <>
math_def inline float nd4j_tanh(float val) {
float sign = copysignfk(1.0f, val);
return sign * neu_tanh(val, sign);
}
template <typename X, typename Z>
math_def inline Z nd4j_tanh(X val) {
return val <= 0 ? neg_tanh(val) : pos_tanh(val);
}
template <typename T>
math_def inline T nd4j_rotl(T val, T shift) {
return p_rotl<T>(val, shift);
}
template <typename T>
math_def inline T nd4j_rotr(T val, T shift) {
return p_rotr<T>(val, shift);
}
template <typename X, typename Z>
math_def inline Z nd4j_erf(X val) {
return p_erf<Z>(static_cast<Z>(val));
}
template <typename X, typename Z>
math_def inline Z nd4j_erfc(X val) {
return p_erfc<Z>(static_cast<Z>(val));
}
template<typename T>
math_def inline void nd4j_swap(T &val1, T &val2) {
T temp = val1; val1=val2; val2=temp;
};
template <typename X, typename Z>
math_def inline Z nd4j_gamma(X a) {
// nd4j_lgamma<X,Z>(a);
// return (Z)std::tgamma(a);
// Split the function domain into three intervals:
// (0, 0.001), [0.001, 12), and (12, infinity)
///////////////////////////////////////////////////////////////////////////
// First interval: (0, 0.001)
//
// For small a, 1/Gamma(a) has power series a + gamma a^2 - ...
// So in this range, 1/Gamma(a) = a + gamma a^2 with error on the order of a^3.
// The relative error over this interval is less than 6e-7.
const double eulerGamma = 0.577215664901532860606512090; // Euler's gamma constant
if (a < X(0.001))
return Z(1.0 / ((double)a * (1.0 + eulerGamma * (double)a)));
///////////////////////////////////////////////////////////////////////////
// Second interval: [0.001, 12)
if (a < X(12.0)) {
// The algorithm directly approximates gamma over (1,2) and uses
// reduction identities to reduce other arguments to this interval.
double y = (double)a;
int n = 0;
bool argWasLessThanOne = y < 1.0;
// Add or subtract integers as necessary to bring y into (1,2)
// Will correct for this below
if (argWasLessThanOne) {
y += 1.0;
}
else {
n = static_cast<int>(floor(y)) - 1; // will use n later
y -= n;
}
// numerator coefficients for approximation over the interval (1,2)
static const double p[] = {
-1.71618513886549492533811E+0,
2.47656508055759199108314E+1,
-3.79804256470945635097577E+2,
6.29331155312818442661052E+2,
8.66966202790413211295064E+2,
-3.14512729688483675254357E+4,
-3.61444134186911729807069E+4,
6.64561438202405440627855E+4
};
// denominator coefficients for approximation over the interval (1,2)
static const double q[] = {
-3.08402300119738975254353E+1,
3.15350626979604161529144E+2,
-1.01515636749021914166146E+3,
-3.10777167157231109440444E+3,
2.25381184209801510330112E+4,
4.75584627752788110767815E+3,
-1.34659959864969306392456E+5,
-1.15132259675553483497211E+5
};
double num = 0.0;
double den = 1.0;
double z = y - 1;
for (auto i = 0; i < 8; i++) {
num = (num + p[i]) * z;
den = den * z + q[i];
}
double result = num / den + 1.0;
// Apply correction if argument was not initially in (1,2)
if (argWasLessThanOne) {
// Use identity gamma(z) = gamma(z+1)/z
// The variable "result" now holds gamma of the original y + 1
// Thus we use y-1 to get back the orginal y.
result /= (y - 1.0);
}
else {
// Use the identity gamma(z+n) = z*(z+1)* ... *(z+n-1)*gamma(z)
for (auto i = 0; i < n; i++)
result *= y++;
}
return Z(result);
}
///////////////////////////////////////////////////////////////////////////
// Third interval: [12, infinity)
if (a > 171.624) {
// Correct answer too large to display. Force +infinity.
return Z(DOUBLE_MAX_VALUE);
// return DataTypeUtils::infOrMax<Z>();
}
return sd::math::nd4j_exp<Z,Z>(sd::math::nd4j_lgamma<X,Z>(a));
}
template <typename X, typename Y, typename Z>
math_def inline Z nd4j_igamma(X a, Y x) {
Z aim = nd4j_pow<X, X, Z>(x, a) / (nd4j_exp<X, Z>(x) * nd4j_gamma<Y, Z>(a));
auto sum = Z(0.);
auto denom = Z(1.);
if (a <= X(0.000001))
//throw std::runtime_error("Cannot calculate gamma for a zero val.");
return Z(0);
for (int i = 0; Z(1./denom) > Z(1.0e-12); i++) {
denom *= (a + i);
sum += nd4j_pow<X, int, Z>(x, i) / denom;
}
return aim * sum;
}
template <typename X, typename Y, typename Z>
math_def inline Z nd4j_igammac(X a, Y x) {
return Z(1.) - nd4j_igamma<X, Y, Z>(a, x);
}
#ifdef __CUDACC__
namespace atomics {
template <typename T>
inline __device__ T nd4j_atomicAdd(T* address, T val);
template <typename T>
inline __device__ T nd4j_atomicSub(T* address, T val);
template <typename T>
inline __device__ T nd4j_atomicMul(T* address, T val);
template <typename T>
inline __device__ T nd4j_atomicDiv(T* address, T val);
template <typename T>
inline __device__ T nd4j_atomicMin(T* address, T val);
template <typename T>
inline __device__ T nd4j_atomicMax(T* address, T val);
template <>
inline __device__ int32_t nd4j_atomicMin<int32_t>(int32_t* address, int32_t val) {
return atomicMin(address, val);
}
template <>
inline __device__ uint32_t nd4j_atomicMin<uint32_t>(uint32_t* address, uint32_t val) {
return atomicMin(address, val);
}
template <>
inline __device__ float nd4j_atomicMin<float>(float* address, float val) {
int* address_as_ull = (int*)address;
int old = __float_as_int(val), assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __float_as_int(math::nd4j_min(val, __int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
}
template <>
inline __device__ double nd4j_atomicMin<double>(double* address, double val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = __double_as_longlong(val), assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(math::nd4j_min(val, __longlong_as_double(assumed))));
} while (assumed != old);
return __longlong_as_double(old);
}
template <>
inline __device__ uint64_t nd4j_atomicMin<uint64_t>(uint64_t* address, uint64_t val) {
#if __CUDA_ARCH__ >= 350
return atomicMin((unsigned long long*)address, (unsigned long long)val);
#else
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = __double_as_longlong(val), assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, math::nd4j_min((unsigned long long)val, assumed));
} while (assumed != old);
return old;
#endif
}
template <>
inline __device__ Nd4jLong nd4j_atomicMin<Nd4jLong>(Nd4jLong* address, Nd4jLong val) {
#if __CUDA_ARCH__ >= 350
return atomicMin((unsigned long long*)address, (unsigned long long)val);
#else
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = (unsigned long long)val, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, math::nd4j_min(val, (Nd4jLong)assumed));
} while (assumed != old);
return old;
#endif
}
template <>
inline __device__ int16_t nd4j_atomicMin<int16_t>(int16_t* address, int16_t val) {
int32_t temp = *address;
*address = atomicMin(&temp, (int)val);
return *address;
}
template <>
inline __device__ bfloat16 nd4j_atomicMin<bfloat16>(bfloat16* address, bfloat16 val) {
return bfloat16(nd4j_atomicMin<int16_t>(&address->_data, val._data));
}
template <>
inline __device__ float16 nd4j_atomicMin<float16>(float16* address, float16 val) {
return float16(nd4j_atomicMin<int16_t>(reinterpret_cast<int16_t*>(&address->data), (int16_t)val.data));
}
template <>
inline __device__ int32_t nd4j_atomicMax<int32_t>(int32_t* address, int32_t val) {
return atomicMax(address, val);
}
template <>
inline __device__ uint32_t nd4j_atomicMax<uint32_t>(uint32_t* address, uint32_t val) {
return atomicMax(address, val);
}
template <>
inline __device__ double nd4j_atomicMax<double>(double* address, double val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = __double_as_longlong(val), assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(math::nd4j_max(val, __longlong_as_double(assumed))));
} while (assumed != old);
return __longlong_as_double(old);
}
template <>
inline __device__ float nd4j_atomicMax<float>(float* address, float val) {
int* address_as_ull = (int*)address;
int old = __float_as_int(val), assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __float_as_int(math::nd4j_max(val, __int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
}
template <>
inline __device__ uint8_t nd4j_atomicMin<uint8_t>(uint8_t* address, uint8_t val) {
uint32_t temp = *address;
*address = atomicMin(&temp, (uint32_t)val);
return *address;
}
template <>
inline __device__ int8_t nd4j_atomicMin<int8_t>(int8_t* address, int8_t val) {
int32_t temp = *address;
*address = atomicMin(&temp, (int)val);
return *address;
}
template <>
inline __device__ uint16_t nd4j_atomicMin<uint16_t>(uint16_t* address, uint16_t val) {
uint32_t temp = *address;
*address = atomicMin(&temp, (uint32_t)val);
return *address;
}
template <>
inline __device__ uint8_t nd4j_atomicMax<uint8_t>(uint8_t* address, uint8_t val) {
uint32_t temp = *address;
*address = atomicMax(&temp, (uint32_t)val);
return *address;
}
template <>
inline __device__ int8_t nd4j_atomicMax<int8_t>(int8_t* address, int8_t val) {
int32_t temp = *address;
*address = atomicMax(&temp, (int)val);
return *address;
}
template <>
inline __device__ uint16_t nd4j_atomicMax<uint16_t>(uint16_t* address, uint16_t val) {
uint32_t temp = *address;
*address = atomicMax(&temp, (uint32_t)val);
return *address;
}
template <>
inline __device__ int16_t nd4j_atomicMax<int16_t>(int16_t* address, int16_t val) {
int32_t temp = *address;
*address = atomicMax(&temp, (int32_t)val);
return *address;
}
template <>
inline __device__ float16 nd4j_atomicMax<float16>(float16* address, float16 val) {
auto address_as_ull = (int*) address;
long addr = (long) address;
bool misaligned = addr & 0x3;
if (misaligned)
address_as_ull = (int *) (address - 1);
PAIR old, assumed, fresh;
old.W = *address_as_ull;
do {
if (!misaligned) {
float16 res = nd4j_max((float16) old.B.H, val);
fresh.B.H = res.data;
fresh.B.L = old.B.L;
} else {
float16 res = nd4j_max((float16) old.B.L, val);
fresh.B.L = res.data;
fresh.B.H = old.B.H;
}
assumed.W = old.W;
old.W = atomicCAS(address_as_ull, assumed.W, fresh.W);
} while (assumed.W != old.W);
if (!misaligned) return old.B.H;
else return old.B.L;
}
template <>
inline __device__ bfloat16 nd4j_atomicMax<bfloat16>(bfloat16* address, bfloat16 val) {
auto address_as_ull = (int*) address;
long addr = (long)(address);
bool misaligned = addr & 0x3;
if (misaligned)
address_as_ull = (int *) (address - 1);
BPAIR old, assumed, fresh;
old.W = *address_as_ull;
do {
if (!misaligned) {
bfloat16 res = nd4j_max(old.B.H, val);
fresh.B.H = res;
fresh.B.L = old.B.L;
} else {
bfloat16 res = nd4j_max(old.B.L, val);
fresh.B.L = res;
fresh.B.H = old.B.H;
}
assumed.W = old.W;
old.W = atomicCAS(address_as_ull, assumed.W, fresh.W);
} while (assumed.W != old.W);
if (!misaligned) return old.B.H;
else return old.B.L;
}
template <>
inline __device__ uint64_t nd4j_atomicMax<uint64_t>(uint64_t* address, uint64_t val) {
#if __CUDA_ARCH__ >= 350
return atomicMax((unsigned long long*)address, (unsigned long long)val);
#else
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = __double_as_longlong(val), assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, math::nd4j_max((unsigned long long)val, assumed));
} while (assumed != old);
return old;
#endif
}
template <>
inline __device__ Nd4jLong nd4j_atomicMax<Nd4jLong>(Nd4jLong* address, Nd4jLong val) {
unsigned long long int* address_as_ull = (unsigned long long int *) address;
//return (Nd4jLong) atomicAdd(address_as_ull, (unsigned long long int) val);
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, (unsigned long long)nd4j_max(val, (Nd4jLong)assumed));
} while (assumed != old);
return old;
}
template <>
inline __device__ double nd4j_atomicAdd<double>(double* address, double val) {
unsigned long long int* address_as_ull =
(unsigned long long int *) address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,__double_as_longlong(val +
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
template <>
inline __device__ Nd4jLong nd4j_atomicAdd<Nd4jLong>(Nd4jLong* address, Nd4jLong val) {
unsigned long long int* address_as_ull = (unsigned long long int *) address;
//return (Nd4jLong) atomicAdd(address_as_ull, (unsigned long long int) val);
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, val + assumed);
} while (assumed != old);
return old;
}
template <>
inline __device__ long nd4j_atomicAdd<long>(long* address, long val) {
unsigned long long* address_as_ull = (unsigned long long int *) address;
// return atomicAdd(address, val);
unsigned long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, val + assumed);
} while (assumed != old);
return old;
}
template <>
inline __device__ uint32_t nd4j_atomicAdd<uint32_t>(uint32_t* address, uint32_t val) {
return atomicAdd(address, val);
}
template <>
inline __device__ uint64_t nd4j_atomicAdd<uint64_t>(uint64_t* address, uint64_t val) {
// unsigned long long* address_as_ull = (unsigned long long int *) address;
//
//// return atomicAdd(address, val);
// unsigned long int old = *address_as_ull, assumed;
// do {
// assumed = old;
// old = atomicCAS(address_as_ull, assumed, val + assumed);
// } while (assumed != old);
// return old;
return (uint64_t)atomicAdd((unsigned long long*)address, (unsigned long long)val);
}
template <>
inline __device__ float16 nd4j_atomicAdd<float16>(float16* address, float16 val) {
#if __CUDA_ARCH__ >= 700 && CUDA_VERSION_MAJOR >=10
atomicAdd(reinterpret_cast<__half*>(address), val.data);
#else
auto address_as_ull = (int*) address;
long addr = (long) address;
bool misaligned = addr & 0x3;
if (misaligned)
address_as_ull = (int *) (address - 1);
PAIR old, assumed, fresh;
old.W = *address_as_ull;
do {
if (!misaligned) {
float16 res = ((float16) old.B.H) + val;
fresh.B.H = res.data;
fresh.B.L = old.B.L;
} else {
float16 res = ((float16) old.B.L) + val;
fresh.B.L = res.data;
fresh.B.H = old.B.H;
}
assumed.W = old.W;
old.W = atomicCAS(address_as_ull, assumed.W, fresh.W);
} while (assumed.W != old.W);
if (!misaligned) return old.B.H;
else return old.B.L;
#endif
}
template <>
inline __device__ bfloat16 nd4j_atomicAdd<bfloat16>(bfloat16* address, bfloat16 val) {
auto address_as_ull = (int*) address;
auto addr = (long)(address);
bool misaligned = addr & 0x3;
if (misaligned)
address_as_ull = (int *) (address - 1);
BPAIR old, assumed, fresh;
old.W = *address_as_ull;
do {
if (!misaligned) {
bfloat16 res = old.B.H + val;
fresh.B.H = res;
fresh.B.L = old.B.L;
} else {
bfloat16 res = old.B.L + val;
fresh.B.L = res;
fresh.B.H = old.B.H;
}
assumed.W = old.W;
old.W = atomicCAS(address_as_ull, assumed.W, fresh.W);
} while (assumed.W != old.W);
if (!misaligned) return old.B.H;
else return old.B.L;
}
template <typename T>
static inline __device__ T internal_16bit_atomicAdd(T* address, T val) {
size_t shift = ((size_t)address & 2);
int *base_address = (int *)((char*)address - shift);
union I16PAIR {
struct {
T H;
T L;
} B;
int W;
__host__ __device__
I16PAIR() {};
__host__ __device__
~I16PAIR() {};
};
I16PAIR pairNew, pairOld, pairAssumed;
if (reinterpret_cast<int*>(address) == base_address) {
pairOld.B.L = val;
do {
pairNew.B.L = pairOld.B.L;
pairNew.B.H = pairOld.B.H + val;
pairAssumed.W = pairOld.W;
pairOld.W = atomicCAS(base_address, pairAssumed.W, pairNew.W);
} while (pairAssumed.W != pairOld.W);
return (T) pairOld.B.H;
} else {
pairOld.B.H = val;
do {
pairNew.B.H = pairOld.B.H;
pairNew.B.L = pairOld.B.L + val;
pairAssumed.W = pairOld.W;
pairOld.W = atomicCAS(base_address, pairAssumed.W, pairNew.W);
} while (pairAssumed.W != pairOld.W);
return (T) pairOld.B.L;
}
}
template <>
inline __device__ int16_t nd4j_atomicAdd<int16_t>(int16_t* address, int16_t val) {
return internal_16bit_atomicAdd<int16_t>(address, val);
}
template <>
inline __device__ uint16_t nd4j_atomicAdd<uint16_t>(uint16_t* address, uint16_t val) {
return internal_16bit_atomicAdd<uint16_t>(address, val);
}
template <>
inline __device__ int8_t nd4j_atomicAdd<int8_t>(int8_t* address, int8_t val) {
int res = *address;
atomicAdd(&res, (int)val);
*address = res;
return *address;
}
template <>
inline __device__ uint8_t nd4j_atomicAdd<uint8_t>(uint8_t* address, uint8_t val) {
int res = *address;
atomicAdd(&res, (int)val);
*address = res;
return *address;
}
template <>
inline __device__ bool nd4j_atomicAdd<bool>(bool* address, bool val) {
*address += (val);
return *address;
}
template <>
inline __device__ double nd4j_atomicSub<double>(double* address, double val) {
return nd4j_atomicAdd<double>(address, -val);
}
template <>
inline __device__ double nd4j_atomicMul<double>(double* address, double val) {
unsigned long long int* address_as_ull =
(unsigned long long int*) address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,__double_as_longlong(val *
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
template <>
inline __device__ double nd4j_atomicDiv<double>(double* address, double val) {
return nd4j_atomicMul<double>(address, 1./val);
}
template <>
inline __device__ float nd4j_atomicAdd<float>(float* address, float val) {
return atomicAdd(address,val);
}
//template <>
//inline __device__ int nd4j_atomicAdd<int>(int* address, int val) {
// return atomicAdd(address, val);
//}
template <>
inline __device__ int32_t nd4j_atomicAdd<int32_t>(int32_t* address, int32_t val) {
return (int32_t)atomicAdd((int*)address, (int)val);
}
template <>
inline __device__ float nd4j_atomicSub<float>(float* address, float val) {
return nd4j_atomicAdd<float>(address, -val);
}
template <>
inline __device__ float16 nd4j_atomicSub<float16>(float16* address, float16 val) {
return nd4j_atomicAdd<float16>(address, -val);
}
template <>
inline __device__ bfloat16 nd4j_atomicSub<bfloat16>(bfloat16* address, bfloat16 val) {
return nd4j_atomicAdd<bfloat16>(address, -val);
}
template <>
inline __device__ float nd4j_atomicMul<float>(float* address, float val) {
int* address_as_ull =
( int*)address;
int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __float_as_int(val *
__int_as_float(assumed)));
} while (assumed != old);
return __int_as_float(old);
}
template <>
inline __device__ int8_t nd4j_atomicMul<int8_t>(int8_t* address, int8_t val) {
unsigned int *base_address = (unsigned int *)((size_t)address & ~3);
unsigned int selectors[] = {0x3214, 0x3240, 0x3410, 0x4210};
unsigned int sel = selectors[(size_t)address & 3];
unsigned int old, assumed, mul, new_;
old = *base_address;
do {
assumed = old;
mul = val * (int8_t)__byte_perm(old, 0, ((size_t)address & 3) | 0x4440);
new_ = __byte_perm(old, mul, sel);
if (new_ == old)
break;
old = atomicCAS(base_address, assumed, new_);
} while (assumed != old);
return (int8_t)old;
}
template <>
inline __device__ unsigned char nd4j_atomicMul<unsigned char>(unsigned char* address, unsigned char val) {
unsigned int *base_address = (unsigned int *)((size_t)address & ~3);
unsigned int selectors[] = {0x3214, 0x3240, 0x3410, 0x4210};
unsigned int sel = selectors[(size_t)address & 3];
unsigned int old, assumed, mul, new_;
old = *base_address;
do {
assumed = old;
mul = val * (uint8_t)__byte_perm(old, 0, ((size_t)address & 3) | 0x4440);
new_ = __byte_perm(old, mul, sel);
if (new_ == old)
break;
old = atomicCAS(base_address, assumed, new_);
} while (assumed != old);
return (uint8_t)old;
}
template <typename T>
static inline __device__ T internal_16bit_atomicMul(T* address, T val) {
size_t shift = ((size_t)address & 2);
int *base_address = (int *)((char*)address - shift);
union I16PAIR {
struct {
T H;
T L;
} B;
int W;
__host__ __device__
I16PAIR() {};
__host__ __device__
~I16PAIR() {};
};
I16PAIR pairNew, pairOld, pairAssumed;
if (reinterpret_cast<int*>(address) == base_address) {
pairOld.B.L = val;
do {
pairNew.B.L = pairOld.B.L;
pairNew.B.H = pairOld.B.H * val;
pairAssumed.W = pairOld.W;
pairOld.W = atomicCAS(base_address, pairAssumed.W, pairNew.W);
} while (pairAssumed.W != pairOld.W);
return (T) pairOld.B.H;
} else {
pairOld.B.H = val;
do {
pairNew.B.H = pairOld.B.H;
pairNew.B.L = pairOld.B.L * val;
pairAssumed.W = pairOld.W;
pairOld.W = atomicCAS(base_address, pairAssumed.W, pairNew.W);
} while (pairAssumed.W != pairOld.W);
return (T) pairOld.B.L;
}
}
template <>
inline __device__ int16_t nd4j_atomicMul<int16_t>(int16_t* address, int16_t val) {
return internal_16bit_atomicMul<int16_t>(address, val);
}
template <>
inline __device__ uint16_t nd4j_atomicMul<uint16_t>(uint16_t* address, uint16_t val) {
return internal_16bit_atomicMul<uint16_t>(address, val);
}
template <>
inline __device__ int nd4j_atomicMul<int>(int* address, int val) {
int* res_address = address;
int old = *res_address, assumed;
do {
assumed = old;
old = atomicCAS(res_address, assumed, val * assumed);
} while (assumed != old);
return old;
}
template <>
inline __device__ unsigned int nd4j_atomicMul<unsigned int>(unsigned int* address, unsigned int val) {
unsigned int* res_address = address;
unsigned int old = *res_address, assumed;
do {
assumed = old;
old = atomicCAS(res_address, assumed, val * assumed);
} while (assumed != old);
return old;
}
template <>
inline __device__ int64_t nd4j_atomicMul<int64_t>(int64_t* address, int64_t val) {
unsigned long long int* res_address = (unsigned long long int*)address;
unsigned long long int old = *res_address, assumed;
do {
assumed = old;
old = atomicCAS(res_address, assumed, val * assumed);
} while (assumed != old);
return (int64_t)old;
}
template <>
inline __device__ uint64_t nd4j_atomicMul<uint64_t>(uint64_t* address, uint64_t val) {
unsigned long long int* res_address = (unsigned long long int*)address;
unsigned long long int old = *res_address, assumed;
do {
assumed = old;
old = atomicCAS(res_address, assumed, val * assumed);
} while (assumed != old);
return (uint64_t)old;
}
#if !defined(_WIN32) && !defined(_WIN64)
template <>
inline __device__ Nd4jLong nd4j_atomicMul<Nd4jLong>(Nd4jLong* address, Nd4jLong val) {
unsigned long long int* res_address = (unsigned long long*)address;
unsigned long long int old = *res_address, assumed;
do {
assumed = old;
old = atomicCAS(res_address, assumed, val * assumed);
} while (assumed != old);
return (Nd4jLong)old;
}
#endif
template <>
inline __device__ bfloat16 nd4j_atomicMul<bfloat16>(bfloat16* address, bfloat16 val) {
return internal_16bit_atomicMul<bfloat16>(address, val);
}
template <>
inline __device__ float16 nd4j_atomicMul<float16>(float16* address, float16 val) {
return internal_16bit_atomicMul<float16>(address, val);
}
template <>
inline __device__ float nd4j_atomicDiv<float>(float* address, float val) {
return nd4j_atomicMul<float>(address, 1.f / val);
}
template <>
inline __device__ float16 nd4j_atomicDiv<float16>(float16* address, float16 val) {
return internal_16bit_atomicMul<float16>(address, (float16) 1.f / val);
}
template <>
inline __device__ bfloat16 nd4j_atomicDiv<bfloat16>(bfloat16* address, bfloat16 val) {
return internal_16bit_atomicMul<bfloat16>(address, (bfloat16) 1 / val);
}
}
#endif
}
}
#ifdef _OPENMP
#ifndef MAX_FLOAT
#define MAX_FLOAT 1e37
#endif
#pragma omp declare reduction(maxTF : float,double,float16,bfloat16 : \
omp_out = sd::math::nd4j_max(omp_in, omp_out) )\
initializer (omp_priv=-MAX_FLOAT)
#pragma omp declare reduction(minTF : float,double,float16,bfloat16 : \
omp_out = sd::math::nd4j_min(omp_in, omp_out) )\
initializer (omp_priv=MAX_FLOAT)
#pragma omp declare reduction(maxT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \
omp_out = sd::math::nd4j_max(omp_in, omp_out) )\
initializer (omp_priv=0)
#pragma omp declare reduction(minT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \
omp_out = sd::math::nd4j_min(omp_in, omp_out) )\
initializer (omp_priv=0)
#pragma omp declare reduction(amaxT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \
omp_out = sd::math::nd4j_max(sd::math::nd4j_abs(omp_in), sd::math::nd4j_abs(omp_out)) )
#pragma omp declare reduction(aminT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \
omp_out = sd::math::nd4j_min(sd::math::nd4j_abs(omp_in), sd::math::nd4j_abs(omp_out)) )
#pragma omp declare reduction(asumT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \
omp_out = sd::math::nd4j_abs(omp_in) + sd::math::nd4j_abs(omp_out))\
initializer (omp_priv=0)
#pragma omp declare reduction(sumT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \
omp_out = omp_in + omp_out)\
initializer (omp_priv=0)
#pragma omp declare reduction(prodT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \
omp_out = omp_in * omp_out)\
initializer (omp_priv=1)
#endif
#endif /* TEMPLATEMATH_H_ */
|
GB_binop__isge_fp64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__isge_fp64
// A.*B function (eWiseMult): GB_AemultB__isge_fp64
// A*D function (colscale): GB_AxD__isge_fp64
// D*A function (rowscale): GB_DxB__isge_fp64
// C+=B function (dense accum): GB_Cdense_accumB__isge_fp64
// C+=b function (dense accum): GB_Cdense_accumb__isge_fp64
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__isge_fp64
// C=scalar+B GB_bind1st__isge_fp64
// C=scalar+B' GB_bind1st_tran__isge_fp64
// C=A+scalar GB_bind2nd__isge_fp64
// C=A'+scalar GB_bind2nd_tran__isge_fp64
// C type: double
// A type: double
// B,b type: double
// BinaryOp: cij = (aij >= bij)
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
double
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
double bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
double t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x >= y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISGE || GxB_NO_FP64 || GxB_NO_ISGE_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__isge_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__isge_fp64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__isge_fp64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__isge_fp64
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *GB_RESTRICT Cx = (double *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__isge_fp64
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *GB_RESTRICT Cx = (double *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__isge_fp64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__isge_fp64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__isge_fp64
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *Cx = (double *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = Bx [p] ;
Cx [p] = (x >= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__isge_fp64
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
double *Cx = (double *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
double aij = Ax [p] ;
Cx [p] = (aij >= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = (x >= aij) ; \
}
GrB_Info GB_bind1st_tran__isge_fp64
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = (aij >= y) ; \
}
GrB_Info GB_bind2nd_tran__isge_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__abs_int64_int64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_int64_int64
// op(A') function: GB_tran__abs_int64_int64
// C type: int64_t
// A type: int64_t
// cast: int64_t cij = (int64_t) aij
// unaryop: cij = GB_IABS (aij)
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
int64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IABS (x) ;
// casting
#define GB_CASTING(z, aij) \
int64_t z = (int64_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_int64_int64
(
int64_t *Cx, // Cx and Ax may be aliased
int64_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_int64_int64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
main.c | /*BHEADER****************************************************************
* (c) 2007 The Regents of the University of California *
* *
* See the file COPYRIGHT_and_DISCLAIMER for a complete copyright *
* notice and disclaimer. *
* *
*EHEADER****************************************************************/
//--------------
// A micro kernel
//--------------
#include <stdio.h>
#include <stdlib.h>
#include "omp.h"
#include "headers.h"
//
const int testIter = /*50000;*/1000;
double totalWallTime = 0.0;
//
void test_Matvec();
void test_Relax();
void test_Axpy();
//
int main(int argc, char *argv[])
{
double t0 = 0.0,
t1 = 0.0,
del_wtime = 0.0;
int max_num_threads;
printf("\n");
printf("//------------ \n");
printf("// \n");
printf("// CORAL AMGmk Benchmark Version 1.0 \n");
printf("// \n");
printf("//------------ \n");
omp_set_num_threads(1);
#pragma omp parallel
#pragma omp master
max_num_threads = omp_get_num_threads();
printf("\nmax_num_threads = %d \n\n",max_num_threads );
printf("\n testIter = %d \n\n", testIter );
t0 = omp_get_wtime();
// Matvec
totalWallTime = 0.0;
test_Matvec();
printf("\n");
printf("//------------ \n");
printf("// \n");
printf("// MATVEC\n");
printf("// \n");
printf("//------------ \n");
printf("\nWall time = %f seconds. \n", totalWallTime);
// Relax
totalWallTime = 0.0;
test_Relax();
//__WHATIF__BEGIN__
printf("\n");
printf("//------------ \n");
printf("// \n");
printf("// Relax\n");
printf("// \n");
printf("//------------ \n");
printf("\nWall time = %f seconds. \n", totalWallTime);
// Axpy
totalWallTime = 0.0;
test_Axpy();
printf("\n");
printf("//------------ \n");
printf("// \n");
printf("// Axpy\n");
printf("// \n");
printf("//------------ \n");
printf("\nWall time = %f seconds. \n", totalWallTime);
t1 = omp_get_wtime();;
del_wtime = t1 - t0;
printf("\nTotal Wall time = %f seconds. \n", del_wtime);
//__WHATIF__END__
return 0;
}
void test_Matvec()
{
double t0 = 0.0,
t1 = 0.0;
hypre_CSRMatrix *A;
hypre_Vector *x, *y, *sol;
int nx, ny, nz, i;
double *values;
double *y_data, *sol_data;
double error, diff;
nx = 50; /* size per proc nx*ny*nz */
ny = 50;
nz = 50;
values = hypre_CTAlloc(double, 4);
values[0] = 6;
values[1] = -1;
values[2] = -1;
values[3] = -1;
A = GenerateSeqLaplacian(nx, ny, nz, values, &y, &x, &sol);
hypre_SeqVectorSetConstantValues(x,1);
hypre_SeqVectorSetConstantValues(y,0);
t0 = omp_get_wtime();
for (i=0; i<testIter; ++i)
hypre_CSRMatrixMatvec(1,A,x,0,y);
t1 = omp_get_wtime() ;
totalWallTime += t1 - t0;
y_data = hypre_VectorData(y);
sol_data = hypre_VectorData(sol);
error = 0;
for (i=0; i < nx*ny*nz; i++)
{
diff = fabs(y_data[i]-sol_data[i]);
if (diff > error) error = diff;
}
if (error > 0) printf(" \n Matvec: error: %e\n", error);
hypre_TFree(values);
hypre_CSRMatrixDestroy(A);
hypre_SeqVectorDestroy(x);
hypre_SeqVectorDestroy(y);
hypre_SeqVectorDestroy(sol);
}
void test_Relax()
{
double t0 = 0.0,
t1 = 0.0;
hypre_CSRMatrix *A;
hypre_Vector *x, *y, *sol;
int nx, ny, nz, i;
double *values;
double *x_data;
double diff, error;
nx = 50; /* size per proc nx*ny*nz */
ny = 50;
nz = 50;
values = hypre_CTAlloc(double, 4);
values[0] = 6;
values[1] = -1;
values[2] = -1;
values[3] = -1;
A = GenerateSeqLaplacian(nx, ny, nz, values, &y, &x, &sol);
hypre_SeqVectorSetConstantValues(x,1);
t0 = omp_get_wtime();
for (i=0; i<testIter; ++i)
hypre_BoomerAMGSeqRelax(A, sol, x);
t1 = omp_get_wtime();
totalWallTime += t1 - t0;
x_data = hypre_VectorData(x);
error = 0;
for (i=0; i < nx*ny*nz; i++)
{
diff = fabs(x_data[i]-1);
if (diff > error) error = diff;
}
if (error > 0) printf(" \n Relax: error: %e\n", error);
hypre_TFree(values);
hypre_CSRMatrixDestroy(A);
hypre_SeqVectorDestroy(x);
hypre_SeqVectorDestroy(y);
hypre_SeqVectorDestroy(sol);
}
void test_Axpy()
{
double t0 = 0.0,
t1 = 0.0;
hypre_Vector *x, *y;
int nx, i;
double alpha=0.5;
double diff, error;
double *y_data;
nx = 125000; /* size per proc */
x = hypre_SeqVectorCreate(nx);
y = hypre_SeqVectorCreate(nx);
hypre_SeqVectorInitialize(x);
hypre_SeqVectorInitialize(y);
hypre_SeqVectorSetConstantValues(x,1);
hypre_SeqVectorSetConstantValues(y,1);
t0 = omp_get_wtime();
for (i=0; i<testIter; ++i)
hypre_SeqVectorAxpy(alpha,x,y);
t1 = omp_get_wtime();
y_data = hypre_VectorData(y);
error = 0;
for (i=0; i < nx; i++)
{
diff = fabs(y_data[i]-1-0.5*(double)testIter);
if (diff > error) error = diff;
}
if (error > 0) printf(" \n Axpy: error: %e\n", error);
totalWallTime += t1 - t0;
hypre_SeqVectorDestroy(x);
hypre_SeqVectorDestroy(y);
}
|
GB_binop__ne_int64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__ne_int64
// A.*B function (eWiseMult): GB_AemultB__ne_int64
// A*D function (colscale): GB_AxD__ne_int64
// D*A function (rowscale): GB_DxB__ne_int64
// C+=B function (dense accum): GB_Cdense_accumB__ne_int64
// C+=b function (dense accum): GB_Cdense_accumb__ne_int64
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__ne_int64
// C=scalar+B GB_bind1st__ne_int64
// C=scalar+B' GB_bind1st_tran__ne_int64
// C=A+scalar GB_bind2nd__ne_int64
// C=A'+scalar GB_bind2nd_tran__ne_int64
// C type: bool
// A type: int64_t
// B,b type: int64_t
// BinaryOp: cij = (aij != bij)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x != y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_NE || GxB_NO_INT64 || GxB_NO_NE_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__ne_int64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__ne_int64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__ne_int64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__ne_int64
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__ne_int64
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__ne_int64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__ne_int64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__ne_int64
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = Bx [p] ;
Cx [p] = (x != bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__ne_int64
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = Ax [p] ;
Cx [p] = (aij != y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = (x != aij) ; \
}
GrB_Info GB_bind1st_tran__ne_int64
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = (aij != y) ; \
}
GrB_Info GB_bind2nd_tran__ne_int64
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__minv_fp32_uint64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_fp32_uint64
// op(A') function: GB_tran__minv_fp32_uint64
// C type: float
// A type: uint64_t
// cast: float cij = (float) aij
// unaryop: cij = (1.0F)/aij
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = (1.0F)/x ;
// casting
#define GB_CASTING(z, aij) \
float z = (float) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_FP32 || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_fp32_uint64
(
float *Cx, // Cx and Ax may be aliased
uint64_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_fp32_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
KDTree.h | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#ifndef _SPTAG_COMMON_KDTREE_H_
#define _SPTAG_COMMON_KDTREE_H_
#include <vector>
#include <string>
#include <shared_mutex>
#include "../VectorIndex.h"
#include "CommonUtils.h"
#include "QueryResultSet.h"
#include "WorkSpace.h"
namespace SPTAG
{
namespace COMMON
{
// node type for storing KDT
struct KDTNode
{
SizeType left;
SizeType right;
DimensionType split_dim;
float split_value;
};
class KDTree
{
public:
KDTree() : m_iTreeNumber(2), m_numTopDimensionKDTSplit(5), m_iSamples(1000), m_lock(new std::shared_timed_mutex) {}
KDTree(const KDTree& other) : m_iTreeNumber(other.m_iTreeNumber),
m_numTopDimensionKDTSplit(other.m_numTopDimensionKDTSplit),
m_iSamples(other.m_iSamples), m_lock(new std::shared_timed_mutex) {}
~KDTree() {}
inline const KDTNode& operator[](SizeType index) const { return m_pTreeRoots[index]; }
inline KDTNode& operator[](SizeType index) { return m_pTreeRoots[index]; }
inline SizeType size() const { return (SizeType)m_pTreeRoots.size(); }
inline SizeType sizePerTree() const {
std::shared_lock<std::shared_timed_mutex> lock(*m_lock);
return (SizeType)m_pTreeRoots.size() - m_pTreeStart.back();
}
template <typename T>
void Rebuild(const Dataset<T>& data, IAbortOperation* abort)
{
COMMON::KDTree newTrees(*this);
newTrees.BuildTrees<T>(data, 1, nullptr, abort);
std::unique_lock<std::shared_timed_mutex> lock(*m_lock);
m_pTreeRoots.swap(newTrees.m_pTreeRoots);
m_pTreeStart.swap(newTrees.m_pTreeStart);
}
template <typename T>
void BuildTrees(const Dataset<T>& data, int numOfThreads, std::vector<SizeType>* indices = nullptr, IAbortOperation* abort = nullptr)
{
if (COMMON::DistanceUtils::Quantizer)
{
switch (COMMON::DistanceUtils::Quantizer->GetReconstructType())
{
#define DefineVectorValueType(Name, Type) \
case VectorValueType::Name: \
BuildTreesCore<T, Type>(data, numOfThreads, indices, abort); \
break;
#include "inc/Core/DefinitionList.h"
#undef DefineVectorValueType
}
}
else
{
BuildTreesCore<T, T>(data, numOfThreads, indices, abort);
}
}
template <typename T, typename R>
void BuildTreesCore(const Dataset<T>& data, int numOfThreads, std::vector<SizeType>* indices = nullptr, IAbortOperation* abort = nullptr)
{
std::vector<SizeType> localindices;
if (indices == nullptr) {
localindices.resize(data.R());
for (SizeType i = 0; i < localindices.size(); i++) localindices[i] = i;
}
else {
localindices.assign(indices->begin(), indices->end());
}
m_pTreeRoots.resize(m_iTreeNumber * localindices.size());
m_pTreeStart.resize(m_iTreeNumber, 0);
#pragma omp parallel for num_threads(numOfThreads)
for (int i = 0; i < m_iTreeNumber; i++)
{
if (abort && abort->ShouldAbort()) continue;
Sleep(i * 100); std::srand(clock());
std::vector<SizeType> pindices(localindices.begin(), localindices.end());
std::random_shuffle(pindices.begin(), pindices.end());
m_pTreeStart[i] = i * (SizeType)pindices.size();
LOG(Helper::LogLevel::LL_Info, "Start to build KDTree %d\n", i + 1);
SizeType iTreeSize = m_pTreeStart[i];
DivideTree<T, R>(data, pindices, 0, (SizeType)pindices.size() - 1, m_pTreeStart[i], iTreeSize, abort);
LOG(Helper::LogLevel::LL_Info, "%d KDTree built, %d %zu\n", i + 1, iTreeSize - m_pTreeStart[i], pindices.size());
}
}
inline std::uint64_t BufferSize() const
{
return sizeof(int) + sizeof(SizeType) * m_iTreeNumber +
sizeof(SizeType) + sizeof(KDTNode) * m_pTreeRoots.size();
}
ErrorCode SaveTrees(std::shared_ptr<Helper::DiskPriorityIO> p_out) const
{
std::shared_lock<std::shared_timed_mutex> lock(*m_lock);
IOBINARY(p_out, WriteBinary, sizeof(m_iTreeNumber), (char*)&m_iTreeNumber);
IOBINARY(p_out, WriteBinary, sizeof(SizeType) * m_iTreeNumber, (char*)m_pTreeStart.data());
SizeType treeNodeSize = (SizeType)m_pTreeRoots.size();
IOBINARY(p_out, WriteBinary, sizeof(treeNodeSize), (char*)&treeNodeSize);
IOBINARY(p_out, WriteBinary, sizeof(KDTNode) * treeNodeSize, (char*)m_pTreeRoots.data());
LOG(Helper::LogLevel::LL_Info, "Save KDT (%d,%d) Finish!\n", m_iTreeNumber, treeNodeSize);
return ErrorCode::Success;
}
ErrorCode SaveTrees(std::string sTreeFileName) const
{
LOG(Helper::LogLevel::LL_Info, "Save KDT to %s\n", sTreeFileName.c_str());
auto ptr = f_createIO();
if (ptr == nullptr || !ptr->Initialize(sTreeFileName.c_str(), std::ios::binary | std::ios::out)) return ErrorCode::FailedCreateFile;
return SaveTrees(ptr);
}
ErrorCode LoadTrees(char* pKDTMemFile)
{
m_iTreeNumber = *((int*)pKDTMemFile);
pKDTMemFile += sizeof(int);
m_pTreeStart.resize(m_iTreeNumber);
memcpy(m_pTreeStart.data(), pKDTMemFile, sizeof(SizeType) * m_iTreeNumber);
pKDTMemFile += sizeof(SizeType)*m_iTreeNumber;
SizeType treeNodeSize = *((SizeType*)pKDTMemFile);
pKDTMemFile += sizeof(SizeType);
m_pTreeRoots.resize(treeNodeSize);
memcpy(m_pTreeRoots.data(), pKDTMemFile, sizeof(KDTNode) * treeNodeSize);
LOG(Helper::LogLevel::LL_Info, "Load KDT (%d,%d) Finish!\n", m_iTreeNumber, treeNodeSize);
return ErrorCode::Success;
}
ErrorCode LoadTrees(std::shared_ptr<Helper::DiskPriorityIO> p_input)
{
if (m_bOldVersion) {
struct KdtreeNode
{
int left;
int right;
short split_dim;
float split_value;
} tmpNode;
IOBINARY(p_input, ReadBinary, sizeof(m_iTreeNumber), (char*)&m_iTreeNumber);
int treeNodeSize = 0;
for (int i = 0; i < m_iTreeNumber; i++)
{
m_pTreeStart.push_back(treeNodeSize);
int iNodeSize;
IOBINARY(p_input, ReadBinary, sizeof(iNodeSize), (char*)&iNodeSize);
m_pTreeRoots.resize(treeNodeSize + iNodeSize);
for (int j = treeNodeSize; j < treeNodeSize + iNodeSize; j++) {
IOBINARY(p_input, ReadBinary, sizeof(KdtreeNode), (char*)(&tmpNode));
m_pTreeRoots[j].left = tmpNode.left + treeNodeSize;
m_pTreeRoots[j].right = tmpNode.right + treeNodeSize;
m_pTreeRoots[j].split_dim = tmpNode.split_dim;
m_pTreeRoots[j].split_value = tmpNode.split_value;
}
treeNodeSize += iNodeSize;
}
LOG(Helper::LogLevel::LL_Info, "Load KDT (%d,%d) Finish!\n", m_iTreeNumber, treeNodeSize);
return ErrorCode::Success;
}
IOBINARY(p_input, ReadBinary, sizeof(m_iTreeNumber), (char*)&m_iTreeNumber);
m_pTreeStart.resize(m_iTreeNumber);
IOBINARY(p_input, ReadBinary, sizeof(SizeType) * m_iTreeNumber, (char*)m_pTreeStart.data());
SizeType treeNodeSize;
IOBINARY(p_input, ReadBinary, sizeof(treeNodeSize), (char*)&treeNodeSize);
m_pTreeRoots.resize(treeNodeSize);
IOBINARY(p_input, ReadBinary, sizeof(KDTNode) * treeNodeSize, (char*)m_pTreeRoots.data());
LOG(Helper::LogLevel::LL_Info, "Load KDT (%d,%d) Finish!\n", m_iTreeNumber, treeNodeSize);
return ErrorCode::Success;
}
ErrorCode LoadTrees(std::string sTreeFileName)
{
LOG(Helper::LogLevel::LL_Info, "Load KDT From %s\n", sTreeFileName.c_str());
auto ptr = f_createIO();
if (ptr == nullptr || !ptr->Initialize(sTreeFileName.c_str(), std::ios::binary | std::ios::in)) return ErrorCode::FailedOpenFile;
return LoadTrees(ptr);
}
template <typename T>
void InitSearchTrees(const Dataset<T>& p_data, float(*fComputeDistance)(const T* pX, const T* pY, DimensionType length), COMMON::QueryResultSet<T> &p_query, COMMON::WorkSpace &p_space) const
{
for (int i = 0; i < m_iTreeNumber; i++) {
KDTSearch(p_data, fComputeDistance, p_query, p_space, m_pTreeStart[i], 0);
}
}
template <typename T>
void SearchTrees(const Dataset<T>& p_data, float(*fComputeDistance)(const T* pX, const T* pY, DimensionType length), COMMON::QueryResultSet<T> &p_query, COMMON::WorkSpace &p_space, const int p_limits) const
{
while (!p_space.m_SPTQueue.empty() && p_space.m_iNumberOfCheckedLeaves < p_limits)
{
auto& tcell = p_space.m_SPTQueue.pop();
KDTSearch(p_data, fComputeDistance, p_query, p_space, tcell.node, tcell.distance);
}
}
private:
template <typename T>
void KDTSearch(const Dataset<T>& p_data, float(*fComputeDistance)(const T* pX, const T* pY, DimensionType length), COMMON::QueryResultSet<T>& p_query,
COMMON::WorkSpace& p_space, const SizeType node, const float distBound) const
{
if (COMMON::DistanceUtils::Quantizer)
{
switch (COMMON::DistanceUtils::Quantizer->GetReconstructType())
{
#define DefineVectorValueType(Name, Type) \
case VectorValueType::Name: \
return KDTSearchCore<T, Type>(p_data, fComputeDistance, p_query, p_space, node, distBound);
#include "inc/Core/DefinitionList.h"
#undef DefineVectorValueType
}
}
else
{
return KDTSearchCore<T, T>(p_data, fComputeDistance, p_query, p_space, node, distBound);
}
}
template <typename T, typename Q>
void KDTSearchCore(const Dataset<T>& p_data, float(*fComputeDistance)(const T* pX, const T* pY, DimensionType length), COMMON::QueryResultSet<T> &p_query,
COMMON::WorkSpace& p_space, const SizeType node, const float distBound) const {
if (node < 0)
{
SizeType index = -node - 1;
if (index >= p_data.R()) return;
#ifdef PREFETCH
const T* data = p_data[index];
_mm_prefetch((const char*)data, _MM_HINT_T0);
_mm_prefetch((const char*)(data + 64), _MM_HINT_T0);
#endif
if (p_space.CheckAndSet(index)) return;
++p_space.m_iNumberOfTreeCheckedLeaves;
++p_space.m_iNumberOfCheckedLeaves;
p_space.m_NGQueue.insert(NodeDistPair(index, fComputeDistance(p_query.GetQuantizedTarget(), data, p_data.C())));
return;
}
auto& tnode = m_pTreeRoots[node];
float diff = ((Q*) p_query.GetTarget())[tnode.split_dim] - tnode.split_value;
float distanceBound = distBound + diff * diff;
SizeType otherChild, bestChild;
if (diff < 0)
{
bestChild = tnode.left;
otherChild = tnode.right;
}
else
{
otherChild = tnode.left;
bestChild = tnode.right;
}
p_space.m_SPTQueue.insert(NodeDistPair(otherChild, distanceBound));
KDTSearchCore<T,Q>(p_data, fComputeDistance, p_query, p_space, bestChild, distBound);
}
template <typename T, typename R>
void DivideTree(const Dataset<T>& data, std::vector<SizeType>& indices, SizeType first, SizeType last,
SizeType index, SizeType &iTreeSize, IAbortOperation* abort = nullptr) {
if (abort && abort->ShouldAbort()) return;
ChooseDivision<T, R>(data, m_pTreeRoots[index], indices, first, last);
SizeType i = Subdivide<T, R>(data, m_pTreeRoots[index], indices, first, last);
if (i - 1 <= first)
{
m_pTreeRoots[index].left = -indices[first] - 1;
}
else
{
iTreeSize++;
m_pTreeRoots[index].left = iTreeSize;
DivideTree<T, R>(data, indices, first, i - 1, iTreeSize, iTreeSize);
}
if (last == i)
{
m_pTreeRoots[index].right = -indices[last] - 1;
}
else
{
iTreeSize++;
m_pTreeRoots[index].right = iTreeSize;
DivideTree<T, R>(data, indices, i, last, iTreeSize, iTreeSize);
}
}
template <typename T, typename R>
void ChooseDivision(const Dataset<T>& data, KDTNode& node, const std::vector<SizeType>& indices, const SizeType first, const SizeType last)
{
SizeType cols = data.C();
bool quantizer_exists = (nullptr != COMMON::DistanceUtils::Quantizer);
R* v_holder = nullptr;
if (quantizer_exists)
{
cols = COMMON::DistanceUtils::Quantizer->ReconstructDim();
v_holder = (R*)_mm_malloc(COMMON::DistanceUtils::Quantizer->ReconstructSize(), ALIGN_SPTAG);
}
std::vector<float> meanValues(cols, 0);
std::vector<float> varianceValues(cols, 0);
SizeType end = min(first + m_iSamples, last);
SizeType count = end - first + 1;
// calculate the mean of each dimension
for (SizeType j = first; j <= end; j++)
{
R* v;
if (quantizer_exists)
{
COMMON::DistanceUtils::Quantizer->ReconstructVector((uint8_t*)data[indices[j]], v_holder);
v = v_holder;
}
else
{
v = (R*)data[indices[j]];
}
for (DimensionType k = 0; k < cols; k++)
{
meanValues[k] += v[k];
}
}
for (DimensionType k = 0; k < cols; k++)
{
meanValues[k] /= count;
}
// calculate the variance of each dimension
for (SizeType j = first; j <= end; j++)
{
R* v;
if (quantizer_exists)
{
COMMON::DistanceUtils::Quantizer->ReconstructVector((uint8_t*)data[indices[j]], v_holder);
v = v_holder;
}
else
{
v = (R*)data[indices[j]];
}
for (DimensionType k = 0; k < cols; k++)
{
float dist = v[k] - meanValues[k];
varianceValues[k] += dist*dist;
}
}
if (quantizer_exists)
{
_mm_free(v_holder);
}
// choose the split dimension as one of the dimension inside TOP_DIM maximum variance
node.split_dim = SelectDivisionDimension(varianceValues);
// determine the threshold
node.split_value = meanValues[node.split_dim];
}
DimensionType SelectDivisionDimension(const std::vector<float>& varianceValues) const
{
// Record the top maximum variances
std::vector<DimensionType> topind(m_numTopDimensionKDTSplit);
int num = 0;
// order the variances
for (DimensionType i = 0; i < (DimensionType)varianceValues.size(); i++)
{
if (num < m_numTopDimensionKDTSplit || varianceValues[i] > varianceValues[topind[num - 1]])
{
if (num < m_numTopDimensionKDTSplit)
{
topind[num++] = i;
}
else
{
topind[num - 1] = i;
}
int j = num - 1;
// order the TOP_DIM variances
while (j > 0 && varianceValues[topind[j]] > varianceValues[topind[j - 1]])
{
std::swap(topind[j], topind[j - 1]);
j--;
}
}
}
// randomly choose a dimension from TOP_DIM
return topind[COMMON::Utils::rand(num)];
}
template <typename T, typename R>
SizeType Subdivide(const Dataset<T>& data, const KDTNode& node, std::vector<SizeType>& indices, const SizeType first, const SizeType last) const
{
SizeType i = first;
SizeType j = last;
bool quantizer_exists = (bool) COMMON::DistanceUtils::Quantizer;
R* v_holder = nullptr;
if (quantizer_exists)
{
v_holder = (R*)_mm_malloc(COMMON::DistanceUtils::Quantizer->ReconstructSize(), ALIGN_SPTAG);
}
// decide which child one point belongs
while (i <= j)
{
R* v;
SizeType ind = indices[i];
if (quantizer_exists)
{
COMMON::DistanceUtils::Quantizer->ReconstructVector((uint8_t*)data[ind], v_holder);
v = v_holder;
}
else
{
v = (R*)data[ind];
}
float val = v[node.split_dim];
if (val < node.split_value)
{
i++;
}
else
{
std::swap(indices[i], indices[j]);
j--;
}
}
if (quantizer_exists)
{
_mm_free(v_holder);
}
// if all the points in the node are equal,equally split the node into 2
if ((i == first) || (i == last + 1))
{
i = (first + last + 1) / 2;
}
return i;
}
private:
std::vector<SizeType> m_pTreeStart;
std::vector<KDTNode> m_pTreeRoots;
public:
std::unique_ptr<std::shared_timed_mutex> m_lock;
int m_iTreeNumber, m_numTopDimensionKDTSplit, m_iSamples;
bool m_bOldVersion;
};
}
}
#endif
|
ofmo-counter.c | #include <stdio.h>
#include <stdlib.h>
#include <mpi.h>
#include <string.h>
#include <math.h>
#include <limits.h>
#include "ofmo-def.h"
#include "ofmo-prof.h"
#ifdef _OPENMP
#include <omp.h>
#else
#include "omp-dummy.h"
#endif
/* -----------------------------------------------------------------
* ハイブリッド並列時のワーカー内global counterに関する関数群
* ----------------------------------------------------------------- */
/* global counter
* The master thread of master process(rank=0) is special thread for
* global counter and doesn't perform any jobs.
* This function must be called from thread-parallel region.
* MPI_THREAD_SERIALIZED support is needed.
* */
#define N_INC_STATE 4
#define NCNTR 3
static int nused_counter = 0;
static struct {
MPI_Comm comm;
int myrank;
int nprocs;
int init_val;
int current_val;
int last_val;
int node_val;
int finish_flag;
int maxthreads;
int inc_level;
int incs[N_INC_STATE];
int limit[N_INC_STATE];
} gc[NCNTR];
int ofmo_gc_init(
const int id,
MPI_Comm comm, const int init_val,
const int njobs ) {
int provided;
MPI_Query_thread( &provided );
if ( provided < MPI_THREAD_SERIALIZED ) {
if ( fp_prof )
fdbg( fp_prof,
"ERROR: MPI_THREAD_SERIALIZED is not supported\n");
return -1;
}
if ( id<0 || id>=NCNTR ) return -1;
MPI_Comm_rank( comm, &(gc[id].myrank) );
MPI_Comm_size( comm, &(gc[id].nprocs) );
gc[id].comm = comm;
gc[id].init_val = init_val;
gc[id].current_val = init_val; // 現在の値
gc[id].last_val = init_val + njobs; // 全体の最後の値
gc[id].node_val = init_val; // 小区間の最後の値
gc[id].finish_flag = false;
gc[id].maxthreads = omp_get_max_threads();
if ( id == 0 ) { // for IFC3C
gc[id].inc_level = 0;
gc[id].limit[0] = gc[id].last_val - (njobs>>1);
gc[id].limit[1] = gc[id].last_val - (njobs>>2);
gc[id].limit[2] = gc[id].last_val - (njobs>>3);
gc[id].limit[3] = INT_MAX;
gc[id].incs[0] = gc[id].maxthreads*2;
gc[id].incs[1] = gc[id].maxthreads;
gc[id].incs[2] = gc[id].maxthreads>>1;
gc[id].incs[3] = gc[id].maxthreads>>2;
if ( gc[id].incs[2] < 2 ) gc[id].incs[2] = 2;
} else if ( id == 1 ) { // for IFC2C
gc[id].inc_level = 0;
gc[id].limit[0] = gc[id].last_val - (njobs>>1);
gc[id].limit[1] = gc[id].last_val - (njobs>>2);
gc[id].limit[2] = gc[id].last_val - (njobs>>3);
gc[id].limit[3] = INT_MAX;
gc[id].incs[0] = gc[id].maxthreads*3;
gc[id].incs[1] = gc[id].maxthreads*2;
gc[id].incs[2] = gc[id].maxthreads;
gc[id].incs[3] = gc[id].maxthreads>>1;
} else if ( id == 2 ) { // for IFC4C
gc[id].limit[0] = gc[id].last_val - (njobs>>1);
gc[id].limit[1] = gc[id].last_val - (njobs>>2);
gc[id].limit[2] = gc[id].last_val - (njobs>>3);
gc[id].limit[3] = INT_MAX;
gc[id].incs[0] = gc[id].maxthreads*4;
gc[id].incs[1] = gc[id].maxthreads*2;
gc[id].incs[2] = gc[id].maxthreads;
gc[id].incs[3] = gc[id].maxthreads>>1;
}
if ( gc[id].incs[3] < 1 ) gc[id].incs[3] = 1;
// added
for ( int i=0; i<N_INC_STATE; i++ ) {
if ( (init_val+gc[id].incs[i]) < gc[id].limit[i] ) {
gc[id].inc_level = i;
break;
}
}
nused_counter++;
return 0;
}
/*
* カウンタの値を取得する関数
* スレッド並列領域内で呼び出す
* */
int ofmo_gc_nxtval( const int id ) {
int myrank, tag=15, val, mythread;
int inc, master;
int buf[2];
MPI_Status status;
MPI_Comm comm;
if ( id<0 || id>=NCNTR ) return -1;
myrank = gc[id].myrank;
comm = gc[id].comm;
master = 0;
if ( gc[id].nprocs == 1 ) {
#pragma omp critical
{
val = gc[id].current_val;
gc[id].current_val++;
}
} else {
if ( myrank == master ) {
mythread = omp_get_thread_num();
if ( mythread == 0 ) { // master-masterの処理
int nfinished[NCNTR], next, ID;
if ( nused_counter == 0 ) return INT_MAX;
for ( int i=0; i<NCNTR; i++ ) nfinished[i] = 0;
while (1) {
MPI_Recv(&ID, 1, MPI_INT, MPI_ANY_SOURCE, tag,
comm, &status);
inc = gc[ID].incs[ gc[ID].inc_level ];
#pragma omp critical
{
val = gc[ID].current_val;
gc[ID].current_val += inc;
}
buf[0] = val; buf[1] = inc;
MPI_Send( buf, 2, MPI_INT, status.MPI_SOURCE, tag,
comm );
next = val + inc;
if ( next >= gc[ID].last_val ) nfinished[ID]++;
if ( nfinished[ID] >= ( gc[ID].nprocs - 1 ) ) {
nused_counter--;
if ( nused_counter == 0 ) break;
}
if ( next >= gc[ID].limit[gc[ID].inc_level] )
gc[ID].inc_level++;
} // while (1)
val = INT_MAX;
} else {
#pragma omp critical
{
val = gc[id].current_val;
gc[id].current_val++;
}
} // if ( mythread == 0 )
} else { // マスタープロセス以外
#pragma omp critical
{
// ローカルにジョブが余っていなかったら
if ( gc[id].current_val == gc[id].node_val
&& !gc[id].finish_flag ) {
buf[0] = id;
MPI_Send( buf, 1, MPI_INT, 0, tag, comm );
MPI_Recv( buf, 2, MPI_INT, 0, tag, comm, &status );
gc[id].current_val = buf[0];
inc = buf[1];
gc[id].node_val = gc[id].current_val + inc;
if ( gc[id].node_val >= gc[id].last_val
&& !gc[id].finish_flag )
gc[id].finish_flag = true;
}
val = gc[id].current_val;
gc[id].current_val++;
}
} // if ( myrank == master )
} // if ( gc[id].nprocs == 1 )
return val;
}
|
ConvolutionUnfold.h | #pragma once
#include <string.h>
#include <math.h>
#include <algorithm>
#include "General.h"
#include "TensorRef.h"
#include "Vector-inl.h"
OPS_API int TS_Unfolded_Copy(
TensorRef* finput,
TensorRef* input,
int kW,
int kH,
int dW,
int dH,
int padW,
int padH,
int nInputPlane,
int inputWidth,
int inputHeight,
int outputWidth,
int outputHeight);
OPS_API int TS_Unfolded_Acc(
TensorRef *finput,
TensorRef *input,
int kW,
int kH,
int dW,
int dH,
int padW,
int padH,
int nInputPlane,
int inputWidth,
int inputHeight,
int outputWidth,
int outputHeight);
// note: due to write issues, this one cannot be parallelized as well as unfolded_copy
template<typename T>
void unfolded_acc(
TensorRef *finput,
TensorRef *input,
int kW,
int kH,
int dW,
int dH,
int padW,
int padH,
int nInputPlane,
int inputWidth,
int inputHeight,
int outputWidth,
int outputHeight)
{
size_t nip;
T *input_data = (T*)input->buffer;
T *finput_data = (T*)finput->buffer;
#pragma omp parallel for private(nip)
for (nip = 0; nip < nInputPlane; nip++)
{
size_t kw, kh, y, x;
__int64 ix = 0, iy = 0;
for (kh = 0; kh < kH; kh++)
{
for (kw = 0; kw < kW; kw++)
{
T *src = finput_data + nip*(kH*kW*outputHeight*outputWidth) + kh*(kW*outputHeight*outputWidth) + kw*(outputHeight*outputWidth);
T *dst = input_data + nip*(inputHeight*inputWidth);
if (padW > 0 || padH > 0) {
size_t lpad, rpad;
for (y = 0; y < outputHeight; y++) {
iy = (__int64)(y*dH - padH + kh);
if (iy < 0 || iy >= inputHeight) {
}
else {
if (dW == 1) {
ix = (__int64)(0 - padW + kw);
lpad = std::max(size_t(0), (padW - kw));
rpad = std::max(size_t(0), (padW - (kW - kw - 1)));
Vector_add<T>(dst + (size_t)(iy*inputWidth + ix + lpad), src + (size_t)(y*outputWidth + lpad), 1, outputWidth - lpad - rpad);
}
else {
for (x = 0; x<outputWidth; x++) {
ix = (__int64)(x*dW - padW + kw);
if (ix < 0 || ix >= inputWidth) {
}
else
Vector_add<T>(dst + (size_t)(iy*inputWidth + ix), src + (size_t)(y*outputWidth + x), 1, 1);
}
}
}
}
}
else {
for (y = 0; y < outputHeight; y++) {
iy = (__int64)(y*dH + kh);
ix = (__int64)(0 + kw);
if (dW == 1)
Vector_add<T>(dst + (size_t)(iy*inputWidth + ix), src + (size_t)(y*outputWidth), 1, outputWidth);
else {
for (x = 0; x < outputWidth; x++)
Vector_add<T>(dst + (size_t)(iy*inputWidth + ix + x*dW), src + (size_t)(y*outputWidth + x), 1, 1);
}
}
}
}
}
}
}
template<typename T>
void unfolded_copy(TensorRef *finput, TensorRef *input,
int kW,
int kH,
int dW,
int dH,
int padW,
int padH,
int nInputPlane,
int inputWidth,
int inputHeight,
int outputWidth,
int outputHeight)
{
long k;
T *input_data = (T*)input->buffer;
T *finput_data = (T*)finput->buffer;
#pragma omp parallel for private(k)
for (k = 0; k < nInputPlane*kH*kW; k++) {
size_t nip = k / (kH*kW);
size_t rest = k % (kH*kW);
size_t kh = rest / kW;
size_t kw = rest % kW;
size_t x, y;
__int64 ix, iy;
T *dst = finput_data + nip*(kH*kW*outputHeight*outputWidth) + kh*(kW*outputHeight*outputWidth) + kw*(outputHeight*outputWidth);
T *src = input_data + nip*(inputHeight*inputWidth);
if (padW > 0 || padH > 0) {
size_t lpad, rpad;
for (y = 0; y < outputHeight; y++) {
iy = (__int64)(y*dH - padH + kh);
if (iy < 0 || iy >= inputHeight) {
memset(dst + y*outputWidth, 0, sizeof(T)*outputWidth);
}
else {
if (dW == 1) {
ix = (__int64)(0 - padW + kw);
lpad = std::max(size_t(0), (padW - kw));
rpad = std::max(size_t(0), (padW - (kW - kw - 1)));
if (outputWidth - rpad - lpad <= 0) {
memset(dst + (size_t)(y*outputWidth), 0, sizeof(T)*outputWidth);
}
else {
if (lpad > 0) memset(dst + y*outputWidth, 0, sizeof(T)*lpad);
memcpy(dst + (size_t)(y*outputWidth + lpad), src + (size_t)(iy*inputWidth + ix + lpad), sizeof(T)*(outputWidth - rpad - lpad));
if (rpad > 0) memset(dst + y*outputWidth + outputWidth - rpad, 0, sizeof(T)*rpad);
}
}
else {
for (x = 0; x<outputWidth; x++) {
ix = (__int64)(x*dW - padW + kw);
if (ix < 0 || ix >= inputWidth)
memset(dst + (size_t)(y*outputWidth + x), 0, sizeof(T) * 1);
else
memcpy(dst + (size_t)(y*outputWidth + x), src + (size_t)(iy*inputWidth + ix), sizeof(T)*(1));
}
}
}
}
}
else {
for (y = 0; y < outputHeight; y++) {
iy = (__int64)(y*dH + kh);
ix = (__int64)(0 + kw);
if (dW == 1)
memcpy(dst + (size_t)(y*outputWidth), src + (size_t)(iy*inputWidth + ix), sizeof(T)*outputWidth);
else {
for (x = 0; x<outputWidth; x++)
memcpy(dst + (size_t)(y*outputWidth + x), src + (size_t)(iy*inputWidth + ix + x*dW), sizeof(T)*(1));
}
}
}
}
} |
MzXMLHandler.h | // --------------------------------------------------------------------------
// OpenMS -- Open-Source Mass Spectrometry
// --------------------------------------------------------------------------
// Copyright The OpenMS Team -- Eberhard Karls University Tuebingen,
// ETH Zurich, and Freie Universitaet Berlin 2002-2016.
//
// This software is released under a three-clause BSD license:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of any author or any participating institution
// may be used to endorse or promote products derived from this software
// without specific prior written permission.
// For a full list of authors, refer to the file AUTHORS.
// --------------------------------------------------------------------------
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL ANY OF THE AUTHORS OR THE CONTRIBUTING
// INSTITUTIONS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// --------------------------------------------------------------------------
// $Maintainer: Andreas Bertsch $
// $Authors: Marc Sturm $
// --------------------------------------------------------------------------
#ifndef OPENMS_FORMAT_HANDLERS_MZXMLHANDLER_H
#define OPENMS_FORMAT_HANDLERS_MZXMLHANDLER_H
#include <OpenMS/CONCEPT/ProgressLogger.h>
#include <OpenMS/FORMAT/Base64.h>
#include <OpenMS/FORMAT/OPTIONS/PeakFileOptions.h>
#include <OpenMS/FORMAT/HANDLERS/XMLHandler.h>
#include <OpenMS/DATASTRUCTURES/String.h>
#include <OpenMS/KERNEL/MSExperiment.h>
#include <OpenMS/INTERFACES/IMSDataConsumer.h>
#include <stack>
namespace OpenMS
{
class MetaInfoInterface;
namespace Internal
{
/**
@brief XML handlers for MzXMLFile
MapType has to be a MSExperiment or have the same interface.
Do not use this class. It is only needed in MzXMLFile.
*/
template <typename MapType>
class MzXMLHandler :
public XMLHandler
{
public:
/**@name Constructors and destructor */
//@{
/// Constructor for a read-only handler
MzXMLHandler(MapType& exp, const String& filename, const String& version, ProgressLogger& logger) :
XMLHandler(filename, version),
exp_(&exp),
cexp_(0),
decoder_(),
nesting_level_(0),
skip_spectrum_(false),
spec_write_counter_(1),
consumer_(NULL),
scan_count_(0),
logger_(logger)
{
init_();
}
/// Constructor for a write-only handler
MzXMLHandler(const MapType& exp, const String& filename, const String& version, const ProgressLogger& logger) :
XMLHandler(filename, version),
exp_(0),
cexp_(&exp),
decoder_(),
nesting_level_(0),
skip_spectrum_(false),
spec_write_counter_(1),
consumer_(NULL),
scan_count_(0),
logger_(logger)
{
init_();
}
/// Destructor
virtual ~MzXMLHandler() {}
//@}
// Docu in base class
virtual void endElement(const XMLCh* const uri, const XMLCh* const local_name, const XMLCh* const qname);
// Docu in base class
virtual void startElement(const XMLCh* const uri, const XMLCh* const local_name, const XMLCh* const qname, const xercesc::Attributes& attributes);
// Docu in base class
virtual void characters(const XMLCh* const chars, const XMLSize_t length);
/// Write the contents to a stream
void writeTo(std::ostream& os);
/// Sets the options
void setOptions(const PeakFileOptions& options)
{
options_ = options;
}
///Gets the scan count
UInt getScanCount()
{
return scan_count_;
}
/// Set the IMSDataConsumer consumer which will consume the read data
void setMSDataConsumer(Interfaces::IMSDataConsumer<MapType> * consumer)
{
consumer_ = consumer;
}
private:
/// initialize members (call from C'tor)
void init_()
{
cv_terms_.resize(6);
//Polarity
String("any;+;-").split(';', cv_terms_[0]);
//Scan type
// is no longer used cv_terms_[1] is empty now
//Ionization method
String(";ESI;EI;CI;FAB;;;;;;;;;;;;;APCI;;;;;;;;MALDI").split(';', cv_terms_[2]);
cv_terms_[2].resize(IonSource::SIZE_OF_IONIZATIONMETHOD);
//Mass analyzer
String(";Quadrupole;Quadrupole Ion Trap;;;TOF;Magnetic Sector;FT-ICR;").split(';', cv_terms_[3]);
cv_terms_[3].resize(MassAnalyzer::SIZE_OF_ANALYZERTYPE);
//Detector
String(";EMT;;;Faraday Cup;;;;;Channeltron;Daly;Microchannel plate").split(';', cv_terms_[4]);
cv_terms_[4].resize(IonDetector::SIZE_OF_TYPE);
//Resolution method
String(";FWHM;TenPercentValley;Baseline").split(';', cv_terms_[5]);
cv_terms_[5].resize(MassAnalyzer::SIZE_OF_RESOLUTIONMETHOD);
/* // OLD:
cv_terms_.resize(6);
//Polarity
String("any;+;-").split(';',cv_terms_[0]);
//Scan type
// is no longer used cv_terms_[1] is empty now
//Ionization method
String(";ESI;EI;CI;FAB;TSP;MALDI;FD;FI;PD;SI;TI;API;ISI;CID;CAD;HN;APCI;APPI;ICP").split(';',cv_terms_[2]);
//Mass analyzer
String(";Quadrupole;Quadrupole Ion Trap;;;TOF;Magnetic Sector;FT-ICR;").split(';',cv_terms_[3]);
//Detector
String(";EMT;Daly;;Faraday Cup;;;;Channeltron").split(';',cv_terms_[4]);
//Resolution method
String(";FWHM;TenPercentValley;Baseline").split(';',cv_terms_[5]);
*/
}
protected:
/// Peak type
typedef typename MapType::PeakType PeakType;
/// Spectrum type
typedef MSSpectrum<PeakType> SpectrumType;
/// map pointer for reading
MapType* exp_;
/// map pointer for writing
const MapType* cexp_;
/// Options for loading and storing
PeakFileOptions options_;
/**@name temporary data structures to hold parsed data */
//@{
Base64 decoder_;
Int nesting_level_;
/**
@brief Data necessary to generate a single spectrum
Small struct holds all data necessary to populate a spectrum at a
later timepoint (since reading of the base64 data and generation of
spectra can be done at distinct timepoints).
*/
struct SpectrumData
{
UInt peak_count_;
String precision_;
String compressionType_;
String char_rest_;
SpectrumType spectrum;
bool skip_data;
};
/// Vector of spectrum data stored for later parallel processing
std::vector< SpectrumData > spectrum_data_;
//@}
/// Flag that indicates whether this spectrum should be skipped (due to options)
bool skip_spectrum_;
/// spectrum counter (spectra without peaks are not written)
UInt spec_write_counter_;
/// Consumer class to work on spectra
Interfaces::IMSDataConsumer<MapType>* consumer_;
/// Consumer class to work on spectra
UInt scan_count_;
/// Progress logging class
const ProgressLogger& logger_;
/// write metaInfo to xml (usually in nameValue-tag)
inline void writeUserParam_(std::ostream& os, const MetaInfoInterface& meta, int indent = 4, String tag = "nameValue")
{
std::vector<String> keys; // Vector to hold keys to meta info
meta.getKeys(keys);
for (std::vector<String>::const_iterator it = keys.begin(); it != keys.end(); ++it)
{
if ((*it)[0] != '#') // internally used meta info start with '#'
{
os << String(indent, '\t') << "<" << tag << " name=\"" << *it << "\" value=\"" << writeXMLEscape(meta.getMetaValue(*it)) << "\"/>\n";
}
}
}
/// data processing auxiliary variable
std::vector< boost::shared_ptr< DataProcessing> > data_processing_;
/**
@brief Fill a single spectrum with data from input
@note Do not modify any internal state variables of the class since
this function will be executed in parallel.
*/
void doPopulateSpectraWithData_(SpectrumData & spectrum_data)
{
typedef typename SpectrumType::PeakType PeakType;
//std::cout << "reading scan" << "\n";
if (spectrum_data.char_rest_ == "") // no peaks
{
return;
}
//remove whitespaces from binary data
//this should not be necessary, but linebreaks inside the base64 data are unfortunately no exception
spectrum_data.char_rest_.removeWhitespaces();
if (spectrum_data.precision_ == "64")
{
std::vector<double> data;
if (spectrum_data.compressionType_ == "zlib")
{
decoder_.decode(spectrum_data.char_rest_, Base64::BYTEORDER_BIGENDIAN, data, true);
}
else
{
decoder_.decode(spectrum_data.char_rest_, Base64::BYTEORDER_BIGENDIAN, data);
}
spectrum_data.char_rest_ = "";
PeakType peak;
//push_back the peaks into the container
for (Size n = 0; n < (2 * spectrum_data.peak_count_); n += 2)
{
// check if peak in in the specified m/z and intensity range
if ((!options_.hasMZRange() || options_.getMZRange().encloses(DPosition<1>(data[n])))
&& (!options_.hasIntensityRange() || options_.getIntensityRange().encloses(DPosition<1>(data[n + 1]))))
{
peak.setMZ(data[n]);
peak.setIntensity(data[n + 1]);
spectrum_data.spectrum.push_back(peak);
}
}
}
else //precision 32
{
std::vector<float> data;
if (spectrum_data.compressionType_ == "zlib")
{
decoder_.decode(spectrum_data.char_rest_, Base64::BYTEORDER_BIGENDIAN, data, true);
}
else
{
decoder_.decode(spectrum_data.char_rest_, Base64::BYTEORDER_BIGENDIAN, data);
}
spectrum_data.char_rest_ = "";
PeakType peak;
//push_back the peaks into the container
for (Size n = 0; n < (2 * spectrum_data.peak_count_); n += 2)
{
if ((!options_.hasMZRange() || options_.getMZRange().encloses(DPosition<1>(data[n])))
&& (!options_.hasIntensityRange() || options_.getIntensityRange().encloses(DPosition<1>(data[n + 1]))))
{
peak.setMZ(data[n]);
peak.setIntensity(data[n + 1]);
spectrum_data.spectrum.push_back(peak);
}
}
}
}
/**
@brief Populate all spectra on the stack with data from input
Will populate all spectra on the current work stack with data (using
multiple threads if available) and append them to the result.
*/
void populateSpectraWithData_()
{
// Whether spectrum should be populated with data
if (options_.getFillData())
{
size_t errCount = 0;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (SignedSize i = 0; i < (SignedSize)spectrum_data_.size(); i++)
{
// parallel exception catching and re-throwing business
if (!errCount) // no need to parse further if already an error was encountered
{
try
{
doPopulateSpectraWithData_(spectrum_data_[i]);
if (options_.getSortSpectraByMZ() && !spectrum_data_[i].spectrum.isSorted())
{
spectrum_data_[i].spectrum.sortByPosition();
}
}
catch (...)
{
#pragma omp critical(HandleException)
++errCount;
}
}
}
if (errCount != 0)
{
throw Exception::ParseError(__FILE__, __LINE__, __PRETTY_FUNCTION__, file_, "Error during parsing of binary data.");
}
}
// Append all spectra
for (Size i = 0; i < spectrum_data_.size(); i++)
{
if (consumer_ != NULL)
{
consumer_->consumeSpectrum(spectrum_data_[i].spectrum);
if (options_.getAlwaysAppendData())
{
exp_->addSpectrum(spectrum_data_[i].spectrum);
}
}
else
{
exp_->addSpectrum(spectrum_data_[i].spectrum);
}
}
// Delete batch
spectrum_data_.clear();
}
private:
/// Not implemented
MzXMLHandler();
static const XMLCh* s_value_;
static const XMLCh* s_count_;
static const XMLCh* s_type_;
static const XMLCh* s_name_;
static const XMLCh* s_version_;
static const XMLCh* s_filename_;
static const XMLCh* s_filetype_;
static const XMLCh* s_filesha1_;
static const XMLCh* s_completiontime_;
static const XMLCh* s_precision_;
static const XMLCh* s_byteorder_;
static const XMLCh* s_pairorder_;
static const XMLCh* s_compressionType_;
static const XMLCh* s_precursorintensity_;
static const XMLCh* s_precursorcharge_;
static const XMLCh* s_windowwideness_;
static const XMLCh* s_mslevel_;
static const XMLCh* s_peakscount_;
static const XMLCh* s_polarity_;
static const XMLCh* s_scantype_;
static const XMLCh* s_filterline_;
static const XMLCh* s_retentiontime_;
static const XMLCh* s_startmz_;
static const XMLCh* s_endmz_;
static const XMLCh* s_first_;
static const XMLCh* s_last_;
static const XMLCh* s_phone_;
static const XMLCh* s_email_;
static const XMLCh* s_uri_;
static const XMLCh* s_num_;
static const XMLCh* s_intensitycutoff_;
static const XMLCh* s_centroided_;
static const XMLCh* s_deisotoped_;
static const XMLCh* s_chargedeconvoluted_;
// init all the static members, which is necessary because otherwise the undefined order will cause problems
void initStaticMembers_()
{
static bool init(false);
if (!init)
{
s_value_ = xercesc::XMLString::transcode("value");
s_count_ = xercesc::XMLString::transcode("scanCount");
s_type_ = xercesc::XMLString::transcode("type");
s_name_ = xercesc::XMLString::transcode("name");
s_version_ = xercesc::XMLString::transcode("version");
s_filename_ = xercesc::XMLString::transcode("fileName");
s_filetype_ = xercesc::XMLString::transcode("fileType");
s_filesha1_ = xercesc::XMLString::transcode("fileSha1");
s_completiontime_ = xercesc::XMLString::transcode("completionTime");
s_precision_ = xercesc::XMLString::transcode("precision");
s_byteorder_ = xercesc::XMLString::transcode("byteOrder");
s_pairorder_ = xercesc::XMLString::transcode("pairOrder");
s_compressionType_ = xercesc::XMLString::transcode("compressionType");
s_precursorintensity_ = xercesc::XMLString::transcode("precursorIntensity");
s_precursorcharge_ = xercesc::XMLString::transcode("precursorCharge");
s_windowwideness_ = xercesc::XMLString::transcode("windowWideness");
s_mslevel_ = xercesc::XMLString::transcode("msLevel");
s_peakscount_ = xercesc::XMLString::transcode("peaksCount");
s_polarity_ = xercesc::XMLString::transcode("polarity");
s_scantype_ = xercesc::XMLString::transcode("scanType");
s_filterline_ = xercesc::XMLString::transcode("filterLine");
s_retentiontime_ = xercesc::XMLString::transcode("retentionTime");
s_startmz_ = xercesc::XMLString::transcode("startMz");
s_endmz_ = xercesc::XMLString::transcode("endMz");
s_first_ = xercesc::XMLString::transcode("first");
s_last_ = xercesc::XMLString::transcode("last");
s_phone_ = xercesc::XMLString::transcode("phone");
s_email_ = xercesc::XMLString::transcode("email");
s_uri_ = xercesc::XMLString::transcode("URI");
s_num_ = xercesc::XMLString::transcode("num");
s_intensitycutoff_ = xercesc::XMLString::transcode("intensityCutoff");
s_centroided_ = xercesc::XMLString::transcode("centroided");
s_deisotoped_ = xercesc::XMLString::transcode("deisotoped");
s_chargedeconvoluted_ = xercesc::XMLString::transcode("chargeDeconvoluted");
init = true;
}
return;
}
};
//--------------------------------------------------------------------------------
// this cannot be moved into a function as VS2008 does not allow more than 31 static members in a function .. don't ask...
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_value_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_count_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_type_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_name_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_version_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_filename_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_filetype_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_filesha1_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_completiontime_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_precision_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_byteorder_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_pairorder_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_compressionType_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_precursorintensity_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_precursorcharge_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_windowwideness_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_mslevel_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_peakscount_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_polarity_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_scantype_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_filterline_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_retentiontime_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_startmz_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_endmz_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_first_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_last_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_phone_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_email_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_uri_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_num_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_intensitycutoff_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_centroided_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_deisotoped_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_chargedeconvoluted_ = 0;
template <typename MapType>
void MzXMLHandler<MapType>::startElement(const XMLCh* const /*uri*/,
const XMLCh* const /*local_name*/, const XMLCh* const qname,
const xercesc::Attributes& attributes)
{
OPENMS_PRECONDITION(nesting_level_ >= 0, "Nesting level needs to be zero or more")
static bool init_static_members(false);
if (!init_static_members)
{
initStaticMembers_();
}
String tag = sm_.convert(qname);
open_tags_.push_back(tag);
//std::cout << " -- Start -- "<< tag << " -- " << "\n";
//Skip all tags until the the next scan
if (skip_spectrum_ && tag != "scan")
return;
if (tag == "msRun")
{
Int count = 0;
optionalAttributeAsInt_(count, attributes, s_count_);
exp_->reserve(count);
logger_.startProgress(0, count, "loading mzXML file");
scan_count_ = 0;
data_processing_.clear();
//start and end time are xs:duration. This makes no sense => ignore them
}
else if (tag == "parentFile")
{
SourceFile sf;
sf.setNameOfFile(attributeAsString_(attributes, s_filename_));
sf.setFileType(attributeAsString_(attributes, s_filetype_));
sf.setChecksum(attributeAsString_(attributes, s_filesha1_), SourceFile::SHA1);
exp_->getSourceFiles().push_back(sf);
}
else if (tag == "software")
{
String& parent_tag = *(open_tags_.end() - 2);
if (parent_tag == "dataProcessing")
{
data_processing_.back()->getSoftware().setVersion(attributeAsString_(attributes, s_version_));
data_processing_.back()->getSoftware().setName(attributeAsString_(attributes, s_name_));
data_processing_.back()->setMetaValue("#type", String(attributeAsString_(attributes, s_type_)));
String time;
optionalAttributeAsString_(time, attributes, s_completiontime_);
data_processing_.back()->setCompletionTime(asDateTime_(time));
}
else if (parent_tag == "msInstrument")
{
exp_->getInstrument().getSoftware().setVersion(attributeAsString_(attributes, s_version_));
exp_->getInstrument().getSoftware().setName(attributeAsString_(attributes, s_name_));
}
}
else if (tag == "peaks")
{
//precision
spectrum_data_.back().precision_ = "32";
optionalAttributeAsString_(spectrum_data_.back().precision_, attributes, s_precision_);
if (spectrum_data_.back().precision_ != "32" && spectrum_data_.back().precision_ != "64")
{
error(LOAD, String("Invalid precision '") + spectrum_data_.back().precision_ + "' in element 'peaks'");
}
//byte order
String byte_order = "network";
optionalAttributeAsString_(byte_order, attributes, s_byteorder_);
if (byte_order != "network")
{
error(LOAD, String("Invalid or missing byte order '") + byte_order + "' in element 'peaks'. Must be 'network'!");
}
//pair order
String pair_order = "m/z-int";
optionalAttributeAsString_(pair_order, attributes, s_pairorder_);
if (pair_order != "m/z-int")
{
error(LOAD, String("Invalid or missing pair order '") + pair_order + "' in element 'peaks'. Must be 'm/z-int'!");
}
//compressionType
spectrum_data_.back().compressionType_ = "none";
optionalAttributeAsString_(spectrum_data_.back().compressionType_, attributes, s_compressionType_);
if (spectrum_data_.back().compressionType_ != "none" && spectrum_data_.back().compressionType_ != "zlib")
{
error(LOAD, String("Invalid compression type ") + spectrum_data_.back().compressionType_ + "in elements 'peaks'. Must be 'none' or 'zlib'! ");
}
}
else if (tag == "precursorMz")
{
//add new precursor
spectrum_data_.back().spectrum.getPrecursors().push_back(Precursor());
//intensity
try
{
spectrum_data_.back().spectrum.getPrecursors().back().setIntensity(attributeAsDouble_(attributes, s_precursorintensity_));
}
catch (Exception::ParseError& /*e*/)
{
error(LOAD, "Mandatory attribute 'precursorIntensity' of tag 'precursorMz' not found! Setting precursor intensity to zero!");
}
//charge
Int charge = 0;
if (optionalAttributeAsInt_(charge, attributes, s_precursorcharge_))
{
spectrum_data_.back().spectrum.getPrecursors().back().setCharge(charge);
}
//window bounds (here only the width is stored in both fields - this is corrected when we parse the m/z position)
double window = 0.0;
if (optionalAttributeAsDouble_(window, attributes, s_windowwideness_))
{
spectrum_data_.back().spectrum.getPrecursors().back().setIsolationWindowLowerOffset(window);
}
}
else if (tag == "scan")
{
skip_spectrum_ = false;
nesting_level_++;
if (options_.getMetadataOnly())
throw EndParsingSoftly(__FILE__, __LINE__, __PRETTY_FUNCTION__);
// check if the scan is in the desired MS / RT range
UInt ms_level = attributeAsInt_(attributes, s_mslevel_);
if (ms_level == 0)
{
warning(LOAD, String("Invalid 'msLevel' attribute with value '0' in 'scan' element found. Assuming ms level 1!"));
ms_level = 1;
}
//parse retention time and convert it from xs:duration to seconds
double retention_time = 0.0;
String time_string = "";
if (optionalAttributeAsString_(time_string, attributes, s_retentiontime_))
{
time_string = time_string.suffix('T');
//std::cout << "Initial trim: " << time_string << "\n";
if (time_string.has('H'))
{
retention_time += 3600 * asDouble_(time_string.prefix('H'));
time_string = time_string.suffix('H');
//std::cout << "After H: " << time_string << "\n";
}
if (time_string.has('M'))
{
retention_time += 60 * asDouble_(time_string.prefix('M'));
time_string = time_string.suffix('M');
//std::cout << "After M: " << time_string << "\n";
}
if (time_string.has('S'))
{
retention_time += asDouble_(time_string.prefix('S'));
time_string = time_string.suffix('S');
//std::cout << "After S: " << time_string << "\n";
}
}
logger_.setProgress(scan_count_);
if ((options_.hasRTRange() && !options_.getRTRange().encloses(DPosition<1>(retention_time)))
|| (options_.hasMSLevels() && !options_.containsMSLevel(ms_level))
|| options_.getSizeOnly())
{
// skip this tag
skip_spectrum_ = true;
++scan_count_;
return;
}
// Add a new spectrum, initialize and set MS level and RT
spectrum_data_.resize(spectrum_data_.size() + 1); // TODO !!
spectrum_data_.back().peak_count_ = 0;
spectrum_data_.back().spectrum.setMSLevel(ms_level);
spectrum_data_.back().spectrum.setRT(retention_time);
spectrum_data_.back().spectrum.setNativeID(String("scan=") + attributeAsString_(attributes, s_num_));
//peak count == twice the scan size
spectrum_data_.back().peak_count_ = attributeAsInt_(attributes, s_peakscount_);
spectrum_data_.back().spectrum.reserve(spectrum_data_.back().peak_count_ / 2 + 1);
spectrum_data_.back().spectrum.setDataProcessing(data_processing_);
//centroided, chargeDeconvoluted, deisotoped, collisionEnergy are ignored
//other optional attributes
ScanWindow window;
optionalAttributeAsDouble_(window.begin, attributes, s_startmz_);
optionalAttributeAsDouble_(window.end, attributes, s_endmz_);
if (window.begin != 0.0 || window.end != 0.0)
{
spectrum_data_.back().spectrum.getInstrumentSettings().getScanWindows().push_back(window);
}
String polarity = "any";
optionalAttributeAsString_(polarity, attributes, s_polarity_);
spectrum_data_.back().spectrum.getInstrumentSettings().setPolarity((IonSource::Polarity) cvStringToEnum_(0, polarity, "polarity"));
// Filter string (see CV term MS:1000512 in mzML)
String filterLine = "";
optionalAttributeAsString_(filterLine, attributes, s_filterline_);
if (!filterLine.empty())
{
spectrum_data_.back().spectrum.setMetaValue("filter string", filterLine);
}
String type = "";
optionalAttributeAsString_(type, attributes, s_scantype_);
if (type == "")
{
//unknown/unset => do nothing here => no warning in the end
}
else if (type == "zoom")
{
spectrum_data_.back().spectrum.getInstrumentSettings().setZoomScan(true);
spectrum_data_.back().spectrum.getInstrumentSettings().setScanMode(InstrumentSettings::MASSSPECTRUM);
}
else if (type == "Full")
{
if (ms_level > 1)
spectrum_data_.back().spectrum.getInstrumentSettings().setScanMode(InstrumentSettings::MSNSPECTRUM);
else
spectrum_data_.back().spectrum.getInstrumentSettings().setScanMode(InstrumentSettings::MASSSPECTRUM);
}
else if (type == "SIM")
{
spectrum_data_.back().spectrum.getInstrumentSettings().setScanMode(InstrumentSettings::SIM);
}
else if (type == "SRM" || type == "MRM")
{
spectrum_data_.back().spectrum.getInstrumentSettings().setScanMode(InstrumentSettings::SRM);
}
else if (type == "CRM")
{
spectrum_data_.back().spectrum.getInstrumentSettings().setScanMode(InstrumentSettings::CRM);
}
else if (type == "Q1")
{
spectrum_data_.back().spectrum.getInstrumentSettings().setScanMode(InstrumentSettings::MASSSPECTRUM);
}
else if (type == "Q3")
{
spectrum_data_.back().spectrum.getInstrumentSettings().setScanMode(InstrumentSettings::MASSSPECTRUM);
}
else if (type == "EMS") //Non-standard type: Enhanced MS (ABI - Sashimi converter)
{
spectrum_data_.back().spectrum.getInstrumentSettings().setScanMode(InstrumentSettings::MASSSPECTRUM);
}
else if (type == "EPI") //Non-standard type: Enhanced Product Ion (ABI - Sashimi converter)
{
spectrum_data_.back().spectrum.getInstrumentSettings().setScanMode(InstrumentSettings::MASSSPECTRUM);
spectrum_data_.back().spectrum.setMSLevel(2);
}
else if (type == "ER") // Non-standard type: Enhanced Resolution (ABI - Sashimi converter)
{
spectrum_data_.back().spectrum.getInstrumentSettings().setZoomScan(true);
spectrum_data_.back().spectrum.getInstrumentSettings().setScanMode(InstrumentSettings::MASSSPECTRUM);
}
else
{
spectrum_data_.back().spectrum.getInstrumentSettings().setScanMode(InstrumentSettings::MASSSPECTRUM);
warning(LOAD, String("Unknown scan mode '") + type + "'. Assuming full scan");
}
++scan_count_;
}
else if (tag == "operator")
{
exp_->getContacts().resize(1);
exp_->getContacts().back().setFirstName(attributeAsString_(attributes, s_first_));
exp_->getContacts().back().setLastName(attributeAsString_(attributes, s_last_));
String tmp = "";
optionalAttributeAsString_(tmp, attributes, s_email_);
exp_->getContacts().back().setEmail(tmp);
tmp = "";
optionalAttributeAsString_(tmp, attributes, s_phone_);
if (tmp != "")
{
exp_->getContacts().back().setMetaValue("#phone", tmp);
}
tmp = "";
optionalAttributeAsString_(tmp, attributes, s_uri_);
exp_->getContacts().back().setURL(tmp);
}
else if (tag == "msManufacturer")
{
exp_->getInstrument().setVendor(attributeAsString_(attributes, s_value_));
}
else if (tag == "msModel")
{
exp_->getInstrument().setModel(attributeAsString_(attributes, s_value_));
}
else if (tag == "msIonisation")
{
exp_->getInstrument().getIonSources().resize(1);
exp_->getInstrument().getIonSources()[0].setIonizationMethod((IonSource::IonizationMethod) cvStringToEnum_(2, attributeAsString_(attributes, s_value_), "msIonization"));
}
else if (tag == "msMassAnalyzer")
{
exp_->getInstrument().getMassAnalyzers().resize(1);
exp_->getInstrument().getMassAnalyzers()[0].setType((MassAnalyzer::AnalyzerType) cvStringToEnum_(3, attributeAsString_(attributes, s_value_), "msMassAnalyzer"));
}
else if (tag == "msDetector")
{
exp_->getInstrument().getIonDetectors().resize(1);
exp_->getInstrument().getIonDetectors()[0].setType((IonDetector::Type) cvStringToEnum_(4, attributeAsString_(attributes, s_value_), "msDetector"));
}
else if (tag == "msResolution")
{
exp_->getInstrument().getMassAnalyzers()[0].setResolutionMethod((MassAnalyzer::ResolutionMethod) cvStringToEnum_(5, attributeAsString_(attributes, s_value_), "msResolution"));
}
else if (tag == "dataProcessing")
{
data_processing_.push_back( DataProcessingPtr(new DataProcessing));
String boolean = "";
optionalAttributeAsString_(boolean, attributes, s_deisotoped_);
if (boolean == "true" || boolean == "1")
{
data_processing_.back()->getProcessingActions().insert(DataProcessing::DEISOTOPING);
}
boolean = "";
optionalAttributeAsString_(boolean, attributes, s_chargedeconvoluted_);
if (boolean == "true" || boolean == "1")
{
data_processing_.back()->getProcessingActions().insert(DataProcessing::CHARGE_DECONVOLUTION);
}
double cutoff = 0.0;
optionalAttributeAsDouble_(cutoff, attributes, s_intensitycutoff_);
if (cutoff != 0.0)
{
data_processing_.back()->setMetaValue("#intensity_cutoff", cutoff);
}
boolean = "";
optionalAttributeAsString_(boolean, attributes, s_centroided_);
if (boolean == "true" || boolean == "1")
{
data_processing_.back()->getProcessingActions().insert(DataProcessing::PEAK_PICKING);
}
}
else if (tag == "nameValue")
{
String name = "";
optionalAttributeAsString_(name, attributes, s_name_);
if (name == "")
return;
String value = "";
optionalAttributeAsString_(value, attributes, s_value_);
String& parent_tag = *(open_tags_.end() - 2);
if (parent_tag == "msInstrument")
{
exp_->getInstrument().setMetaValue(name, value);
}
else if (parent_tag == "scan")
{
spectrum_data_.back().spectrum.setMetaValue(name, value);
}
else
{
std::cout << " Warning: Unexpected tag 'nameValue' in tag '" << parent_tag << "'" << "\n";
}
}
else if (tag == "processingOperation")
{
String name = "";
optionalAttributeAsString_(name, attributes, s_name_);
if (name == "")
return;
String value = "";
optionalAttributeAsString_(value, attributes, s_value_);
data_processing_.back()->setMetaValue(name, value);
}
//std::cout << " -- !Start -- " << "\n";
}
template <typename MapType>
void MzXMLHandler<MapType>::endElement(const XMLCh* const /*uri*/, const XMLCh* const /*local_name*/, const XMLCh* const qname)
{
OPENMS_PRECONDITION(nesting_level_ >= 0, "Nesting level needs to be zero or more")
//std::cout << " -- End -- " << sm_.convert(qname) << " -- " << "\n";
static const XMLCh* s_mzxml = xercesc::XMLString::transcode("mzXML");
static const XMLCh* s_scan = xercesc::XMLString::transcode("scan");
open_tags_.pop_back();
if (equal_(qname, s_mzxml))
{
// Flush the remaining data
populateSpectraWithData_();
// End of mzXML
logger_.endProgress();
}
else if (equal_(qname, s_scan))
{
// End of scan: go up one nesting level
// Check whether to populate spectra when on highest nesting level
nesting_level_--;
OPENMS_PRECONDITION(nesting_level_ >= 0, "Nesting level needs to be zero or more")
if (nesting_level_ == 0 && spectrum_data_.size() >= options_.getMaxDataPoolSize())
{
populateSpectraWithData_();
}
}
//std::cout << " -- End -- " << "\n";
sm_.clear();
}
template <typename MapType>
void MzXMLHandler<MapType>::characters(const XMLCh* const chars, const XMLSize_t length)
{
//Abort if this spectrum should be skipped
if (skip_spectrum_)
return;
if (open_tags_.back() == "peaks")
{
//chars may be split to several chunks => concatenate them
if (options_.getFillData())
{
// Since we convert a Base64 string here, it can only contain plain ASCII
sm_.appendASCII(chars, length, spectrum_data_.back().char_rest_);
}
}
else if (open_tags_.back() == "offset" || open_tags_.back() == "indexOffset" || open_tags_.back() == "sha1")
{
}
else if (open_tags_.back() == "precursorMz")
{
char* transcoded_chars = sm_.convert(chars);
double mz_pos = asDouble_(transcoded_chars);
//precursor m/z
spectrum_data_.back().spectrum.getPrecursors().back().setMZ(mz_pos);
//update window bounds - center them around the m/z pos
double window_width = spectrum_data_.back().spectrum.getPrecursors().back().getIsolationWindowLowerOffset();
if (window_width != 0.0)
{
spectrum_data_.back().spectrum.getPrecursors().back().setIsolationWindowLowerOffset(0.5 * window_width);
spectrum_data_.back().spectrum.getPrecursors().back().setIsolationWindowUpperOffset(0.5 * window_width);
}
}
else if (open_tags_.back() == "comment")
{
char* transcoded_chars = sm_.convert(chars);
String parent_tag = *(open_tags_.end() - 2);
//std::cout << "- Comment of parent " << parent_tag << "\n";
if (parent_tag == "msInstrument")
{
exp_->getInstrument().setMetaValue("#comment", String(transcoded_chars));
}
else if (parent_tag == "dataProcessing")
{
//this is currently ignored
}
else if (parent_tag == "scan")
{
spectrum_data_.back().spectrum.setComment(transcoded_chars);
}
else if (String(transcoded_chars).trim() != "")
{
warning(LOAD, String("Unhandled comment '") + transcoded_chars + "' in element '" + open_tags_.back() + "'");
}
}
else
{
char* transcoded_chars = sm_.convert(chars);
if (String(transcoded_chars).trim() != "")
{
warning(LOAD, String("Unhandled character content '") + transcoded_chars + "' in element '" + open_tags_.back() + "'");
}
}
}
template <typename MapType>
void MzXMLHandler<MapType>::writeTo(std::ostream& os)
{
//determine how many spectra there are (count only those with peaks)
UInt count_tmp_ = 0;
for (Size s = 0; s < cexp_->size(); s++)
{
const SpectrumType& spec = (*cexp_)[s];
if (spec.size() != 0)
++count_tmp_;
}
if (count_tmp_ == 0)
++count_tmp_;
logger_.startProgress(0, cexp_->size(), "storing mzXML file");
os << "<?xml version=\"1.0\" encoding=\"ISO-8859-1\"?>\n"
<< "<mzXML xmlns=\"http://sashimi.sourceforge.net/schema_revision/mzXML_2.1\" "
<< "xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" "
<< "xsi:schemaLocation=\"http://sashimi.sourceforge.net/schema_revision/mzXML_2.1 "
<< "http://sashimi.sourceforge.net/schema_revision/mzXML_2.1/mzXML_idx_2.1.xsd\">\n"
<< "\t<msRun scanCount=\"" << count_tmp_ << "\">\n";
//----------------------------------------------------------------------------------------
// parent files
//----------------------------------------------------------------------------------------
if (cexp_->getSourceFiles().empty())
{
os << "\t\t<parentFile fileName=\"\" fileType=\"processedData\" fileSha1=\"0000000000000000000000000000000000000000\"/>\n";
}
else
{
for (Size i = 0; i < cexp_->getSourceFiles().size(); ++i)
{
const SourceFile& sf = cexp_->getSourceFiles()[i];
os << "\t\t<parentFile fileName=\"" << sf.getNameOfFile() << "\" fileType=\"";
//file type is an enum in mzXML => search for 'raw' string
String tmp_string = sf.getFileType();
tmp_string.toLower();
if (tmp_string.hasSubstring("raw"))
{
os << "RAWData";
}
else
{
os << "processedData";
}
//Sha1 checksum must have 40 characters => create a fake if it is unknown
os << "\" fileSha1=\"";
tmp_string = sf.getChecksum();
if (sf.getChecksum().size() != 40 || sf.getChecksumType() != SourceFile::SHA1)
{
os << "0000000000000000000000000000000000000000";
}
else
{
os << sf.getChecksum();
}
os << "\"/>\n";
}
}
//----------------------------------------------------------------------------------------
//instrument
//----------------------------------------------------------------------------------------
if (cexp_->getInstrument() != Instrument() || cexp_->getContacts().size() != 0)
{
const Instrument& inst = cexp_->getInstrument();
os << "\t\t<msInstrument>\n"
<< "\t\t\t<msManufacturer category=\"msManufacturer\" value=\"" << inst.getVendor() << "\"/>\n" << "\t\t\t<msModel category=\"msModel\" value=\"" << inst.getModel() << "\"/>\n";
if (inst.getIonSources().empty() || !inst.getIonSources()[0].getIonizationMethod())
{
os << "\t\t\t<msIonisation category=\"msIonisation\" value=\"\"/>\n";
}
else
{
os << "\t\t\t<msIonisation category=\"msIonisation\" value=\"" << cv_terms_[2][inst.getIonSources()[0].getIonizationMethod()] << "\"/>\n";
}
const std::vector<MassAnalyzer>& analyzers = inst.getMassAnalyzers();
if (analyzers.empty() || !analyzers[0].getResolutionMethod())
{
os << "\t\t\t<msMassAnalyzer category=\"msMassAnalyzer\" value=\"\"/>\n";
}
else
{
os << "\t\t\t<msMassAnalyzer category=\"msMassAnalyzer\" value=\"" << cv_terms_[3][analyzers[0].getType()] << "\"/>\n";
}
if (inst.getIonDetectors().empty() || !inst.getIonDetectors()[0].getType())
{
os << "\t\t\t<msDetector category=\"msDetector\" value=\"\"/>\n";
}
else
{
os << "\t\t\t<msDetector category=\"msDetector\" value=\"" << cv_terms_[4][inst.getIonDetectors()[0].getType()] << "\"/>\n";
}
os << "\t\t\t<software type=\"acquisition\" name=\"" << inst.getSoftware().getName() << "\" version=\"" << inst.getSoftware().getVersion() << "\"/>\n";
if (analyzers.empty() || !analyzers[0].getResolutionMethod())
{
os << "\t\t\t<msResolution category=\"msResolution\" value=\"\"/>\n";
}
else
{
os << "\t\t\t<msResolution category=\"msResolution\" value=\"" << cv_terms_[5][analyzers[0].getResolutionMethod()] << "\"/>\n";
}
if (cexp_->getContacts().size() > 0)
{
const ContactPerson& cont = cexp_->getContacts()[0];
os << "\t\t\t<operator first=\"" << cont.getFirstName() << "\" last=\"" << cont.getLastName() << "\"";
if (cont.getEmail() != "")
{
os << " email=\"" << cont.getEmail() << "\"";
}
if (cont.getURL() != "")
{
os << " URI=\"" << cont.getURL() << "\"";
}
if (cont.metaValueExists("#phone"))
{
os << " phone=\"" << writeXMLEscape(cont.getMetaValue("#phone").toString()) << "\"";
}
os << "/>\n";
}
writeUserParam_(os, inst, 3);
if (inst.metaValueExists("#comment"))
{
os << "\t\t\t<comment>" << writeXMLEscape(inst.getMetaValue("#comment")) << "</comment>\n";
}
os << "\t\t</msInstrument>\n";
}
//----------------------------------------------------------------------------------------
//data processing (the information of the first spectrum is assigned to the whole file)
//----------------------------------------------------------------------------------------
if (cexp_->size() == 0 || (*cexp_)[0].getDataProcessing().empty())
{
os << "\t\t<dataProcessing>\n"
<< "\t\t\t<software type=\"processing\" name=\"\" version=\"\"/>\n"
<< "\t\t</dataProcessing>\n";
}
else
{
for (Size i = 0; i < (*cexp_)[0].getDataProcessing().size(); ++i)
{
const DataProcessing& data_processing = * (*cexp_)[0].getDataProcessing()[i].get();
os << "\t\t<dataProcessing deisotoped=\""
<< data_processing.getProcessingActions().count(DataProcessing::DEISOTOPING)
<< "\" chargeDeconvoluted=\""
<< data_processing.getProcessingActions().count(DataProcessing::CHARGE_DECONVOLUTION)
<< "\" centroided=\""
<< data_processing.getProcessingActions().count(DataProcessing::PEAK_PICKING)
<< "\"";
if (data_processing.metaValueExists("#intensity_cutoff"))
{
os << " intensityCutoff=\"" << writeXMLEscape(data_processing.getMetaValue("#intensity_cutoff").toString()) << "\"";
}
os << ">\n"
<< "\t\t\t<software type=\"";
if (data_processing.metaValueExists("#type"))
{
os << writeXMLEscape(data_processing.getMetaValue("#type").toString());
}
else
{
os << "processing";
}
os << "\" name=\"" << data_processing.getSoftware().getName()
<< "\" version=\"" << data_processing.getSoftware().getVersion();
if (data_processing.getCompletionTime() != DateTime())
{
os << "\" completionTime=\"" << data_processing.getCompletionTime().get().substitute(' ', 'T');
}
os << "\"/>\n";
writeUserParam_(os, data_processing, 3, "processingOperation");
os << "\t\t</dataProcessing>\n";
}
}
//check if the nativeID of all spectra are numbers or numbers prefixed with 'scan='
//If not we need to renumber all spectra.
bool all_numbers = true;
bool all_empty = true;
bool all_prefixed_numbers = true;
for (Size s = 0; s < cexp_->size(); s++)
{
String native_id = (*cexp_)[s].getNativeID();
if (!native_id.hasPrefix("scan="))
{
all_prefixed_numbers = false;
}
else
{
native_id = native_id.substr(5);
}
try
{
native_id.toInt();
}
catch (Exception::ConversionError&)
{
all_numbers = false;
all_prefixed_numbers = false;
if (native_id != "")
{
all_empty = false;
}
}
}
//If we need to renumber and the nativeIDs were not empty, warn the user
if (!all_numbers && !all_empty)
{
warning(STORE, "Not all spectrum native IDs are numbers or correctly prefixed with 'scan='. The spectra are renumbered and the native IDs are lost!");
}
// write scans
std::stack<UInt> open_scans;
for (Size s = 0; s < cexp_->size(); s++)
{
logger_.setProgress(s);
const SpectrumType& spec = (*cexp_)[s];
UInt ms_level = spec.getMSLevel();
open_scans.push(ms_level);
Size spectrum_id = s + 1;
if (all_prefixed_numbers)
{
spectrum_id = spec.getNativeID().substr(5).toInt();
}
else if (all_numbers)
{
spectrum_id = spec.getNativeID().toInt();
}
os << String(ms_level + 1, '\t')
<< "<scan num=\"" << spectrum_id << "\" msLevel=\""
<< ms_level << "\" peaksCount=\""
<< spec.size() << "\" polarity=\"";
if (spec.getInstrumentSettings().getPolarity() == IonSource::POSITIVE)
{
os << "+";
}
else if (spec.getInstrumentSettings().getPolarity() == IonSource::NEGATIVE)
{
os << "-";
}
else
{
os << "any";
}
//scan type
switch (spec.getInstrumentSettings().getScanMode())
{
case InstrumentSettings::UNKNOWN:
break;
case InstrumentSettings::MASSSPECTRUM:
case InstrumentSettings::MS1SPECTRUM:
case InstrumentSettings::MSNSPECTRUM:
if (spec.getInstrumentSettings().getZoomScan())
{
os << "\" scanType=\"zoom";
}
else
{
os << "\" scanType=\"Full";
}
break;
case InstrumentSettings::SIM:
os << "\" scanType=\"SIM";
break;
case InstrumentSettings::SRM:
os << "\" scanType=\"SRM";
break;
case InstrumentSettings::CRM:
os << "\" scanType=\"CRM";
break;
default:
os << "\" scanType=\"Full";
warning(STORE, String("Scan type '") + InstrumentSettings::NamesOfScanMode[spec.getInstrumentSettings().getScanMode()] + "' not supported by mzXML. Using 'Full' scan mode!");
}
// filter line
if (spec.metaValueExists("filter string") )
{
os << "\" filterLine=\"";
os << writeXMLEscape ( (String)spec.getMetaValue("filter string") );
}
// base peak mz (used by some programs like MAVEN), according to xsd:
// "m/z of the base peak (most intense peak)"
os << "\" basePeakMz=\"";
double basePeakInt = 0;
double basePeakMz = 0;
for (Size j = 0; j < spec.size(); j++)
{
if (spec[j].getIntensity() > basePeakInt)
{
basePeakInt = spec[j].getIntensity();
basePeakMz = spec[j].getMZ();
}
}
os << basePeakMz;
// retention time
os << "\" retentionTime=\"";
if (spec.getRT() < 0)
os << "-";
os << "PT" << std::fabs(spec.getRT()) << "S\"";
if (!spec.getInstrumentSettings().getScanWindows().empty())
{
os << " startMz=\"" << spec.getInstrumentSettings().getScanWindows()[0].begin << "\" endMz=\"" << spec.getInstrumentSettings().getScanWindows()[0].end << "\"";
}
if (spec.getInstrumentSettings().getScanWindows().size() > 1)
{
warning(STORE, "The MzXML format can store only one scan window for each scan. Only the first one is stored!");
}
// end of "scan" attributes
os << ">\n";
for (Size i = 0; i < spec.getPrecursors().size(); ++i)
{
const Precursor& precursor = spec.getPrecursors()[i];
//intensity
os << String(ms_level + 2, '\t') << "<precursorMz precursorIntensity=\"" << precursor.getIntensity();
//charge
if (precursor.getCharge() != 0)
os << "\" precursorCharge=\"" << precursor.getCharge();
//window size
if (precursor.getIsolationWindowLowerOffset() + precursor.getIsolationWindowUpperOffset() > 0.0)
os << "\" windowWideness=\"" << (precursor.getIsolationWindowUpperOffset() + precursor.getIsolationWindowLowerOffset());
//m/z
os << "\">" << precursor.getMZ() << "</precursorMz>\n";
}
if (!spec.empty())
{
os << String(ms_level + 2, '\t') << "<peaks precision=\"32\"" << " byteOrder=\"network\" pairOrder=\"m/z-int\">";
//std::cout << "Writing scan " << s << "\n";
std::vector<float> tmp;
for (Size i = 0; i < spec.size(); i++)
{
tmp.push_back(spec[i].getMZ());
tmp.push_back(spec[i].getIntensity());
}
String encoded;
decoder_.encode(tmp, Base64::BYTEORDER_BIGENDIAN, encoded);
os << encoded << "</peaks>\n";
}
else
{
os << String(ms_level + 2, '\t') << "<peaks precision=\"32\"" << " byteOrder=\"network\" pairOrder=\"m/z-int\" xsi:nil=\"true\"/>\n";
}
writeUserParam_(os, spec, ms_level + 2);
if (spec.getComment() != "")
{
os << String(ms_level + 2, '\t') << "<comment>" << spec.getComment() << "</comment>\n";
}
//check MS level of next scan and close scans (scans can be nested)
UInt next_ms_level = 0;
if (s < cexp_->size() - 1)
{
next_ms_level = ((*cexp_)[s + 1]).getMSLevel();
}
//std::cout << "scan: " << s << " this: " << ms_level << " next: " << next_ms_level << "\n";
if (next_ms_level <= ms_level)
{
for (Size i = 0; i <= ms_level - next_ms_level && !open_scans.empty(); ++i)
{
os << String(ms_level - i + 1, '\t') << "</scan>\n";
open_scans.pop();
}
}
}
os << "\t</msRun>\n"
<< "\t<indexOffset>0</indexOffset>\n"
<< "</mzXML>\n";
logger_.endProgress();
spec_write_counter_ = 1;
}
} // namespace Internal
} // namespace OpenMS
#endif
|
triplet_grid.c | /* Copyright (C) 2015 Atsushi Togo */
/* All rights reserved. */
/* These codes were originally parts of spglib, but only develped */
/* and used for phono3py. Therefore these were moved from spglib to */
/* phono3py. This file is part of phonopy. */
/* Redistribution and use in source and binary forms, with or without */
/* modification, are permitted provided that the following conditions */
/* are met: */
/* * Redistributions of source code must retain the above copyright */
/* notice, this list of conditions and the following disclaimer. */
/* * Redistributions in binary form must reproduce the above copyright */
/* notice, this list of conditions and the following disclaimer in */
/* the documentation and/or other materials provided with the */
/* distribution. */
/* * Neither the name of the phonopy project nor the names of its */
/* contributors may be used to endorse or promote products derived */
/* from this software without specific prior written permission. */
/* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */
/* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */
/* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */
/* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */
/* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */
/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */
/* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */
/* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */
/* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */
/* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */
/* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
/* POSSIBILITY OF SUCH DAMAGE. */
#include <stddef.h>
#include <stdlib.h>
#include "bzgrid.h"
#include "grgrid.h"
#include "lagrid.h"
#include "triplet.h"
#include "triplet_grid.h"
static long get_ir_triplets_at_q(long *map_triplets,
long *map_q,
const long grid_point,
const long D_diag[3],
const RotMats *rot_reciprocal,
const long swappable);
static long get_ir_triplets_at_q_perm_q1q2(long *map_triplets,
const long *map_q,
const long grid_point,
const long D_diag[3],
const RotMats *rot_reciprocal_q,
const long num_ir_q);
static long get_ir_triplets_at_q_noperm(long *map_triplets,
const long *map_q,
const long grid_point,
const long D_diag[3],
const RotMats *rot_reciprocal_q);
static long get_BZ_triplets_at_q(long (*triplets)[3],
const long grid_point,
const ConstBZGrid *bzgrid,
const long *map_triplets);
static void get_BZ_triplets_at_q_type1(long (*triplets)[3],
const long grid_point,
const ConstBZGrid *bzgrid,
const long *ir_q1_gps,
const long num_ir);
static void get_BZ_triplets_at_q_type2(long (*triplets)[3],
const long grid_point,
const ConstBZGrid *bzgrid,
const long *ir_q1_gps,
const long num_ir);
static double get_squared_distance(const long G[3],
const double LQD_inv[3][3]);
static void get_LQD_inv(double LQD_inv[3][3], const ConstBZGrid *bzgrid);
static RotMats *get_reciprocal_point_group_with_q(const RotMats *rot_reciprocal,
const long D_diag[3],
const long grid_point);
static RotMats *get_reciprocal_point_group(const long (*rec_rotations_in)[3][3],
const long num_rot,
const long is_time_reversal,
const long is_transpose);
long tpk_get_ir_triplets_at_q(long *map_triplets,
long *map_q,
const long grid_point,
const long D_diag[3],
const long is_time_reversal,
const long (*rec_rotations_in)[3][3],
const long num_rot,
const long swappable)
{
long num_ir;
RotMats *rotations;
rotations = get_reciprocal_point_group(rec_rotations_in,
num_rot,
is_time_reversal,
0);
if (rotations == NULL)
{
return 0;
}
num_ir = get_ir_triplets_at_q(map_triplets,
map_q,
grid_point,
D_diag,
rotations,
swappable);
bzg_free_RotMats(rotations);
rotations = NULL;
return num_ir;
}
long tpk_get_BZ_triplets_at_q(long (*triplets)[3],
const long grid_point,
const ConstBZGrid *bzgrid,
const long *map_triplets)
{
return get_BZ_triplets_at_q(triplets,
grid_point,
bzgrid,
map_triplets);
}
static long get_ir_triplets_at_q(long *map_triplets,
long *map_q,
const long grid_point,
const long D_diag[3],
const RotMats *rot_reciprocal,
const long swappable)
{
long i, num_ir_q, num_ir_triplets;
long PS[3];
RotMats *rot_reciprocal_q;
rot_reciprocal_q = NULL;
for (i = 0; i < 3; i++)
{
PS[i] = 0;
}
/* Search irreducible q-points (map_q) with a stabilizer. */
rot_reciprocal_q = get_reciprocal_point_group_with_q(rot_reciprocal,
D_diag,
grid_point);
grg_get_ir_grid_map(map_q,
rot_reciprocal_q->mat,
rot_reciprocal_q->size,
D_diag,
PS);
num_ir_q = 0;
for (i = 0; i < D_diag[0] * D_diag[1] * D_diag[2]; i++)
{
if (map_q[i] == i)
{
num_ir_q++;
}
}
if (swappable)
{
num_ir_triplets = get_ir_triplets_at_q_perm_q1q2(map_triplets,
map_q,
grid_point,
D_diag,
rot_reciprocal_q,
num_ir_q);
}
else
{
num_ir_triplets = get_ir_triplets_at_q_noperm(map_triplets,
map_q,
grid_point,
D_diag,
rot_reciprocal_q);
}
bzg_free_RotMats(rot_reciprocal_q);
rot_reciprocal_q = NULL;
return num_ir_triplets;
}
static long get_ir_triplets_at_q_perm_q1q2(long *map_triplets,
const long *map_q,
const long grid_point,
const long D_diag[3],
const RotMats *rot_reciprocal_q,
const long num_ir_q)
{
long i, j, num_grid, num_ir_triplets, ir_gp, count;
long adrs0[3], adrs1[3], adrs2[3];
long *ir_gps_at_q, *q_2;
ir_gps_at_q = NULL;
q_2 = NULL;
num_ir_triplets = 0;
num_grid = D_diag[0] * D_diag[1] * D_diag[2];
if ((q_2 = (long *)malloc(sizeof(long) * num_ir_q)) == NULL)
{
warning_print("Memory could not be allocated.");
goto ret;
}
if ((ir_gps_at_q = (long *)malloc(sizeof(long) * num_ir_q)) == NULL)
{
warning_print("Memory could not be allocated.");
goto ret;
}
count = 0;
for (i = 0; i < num_grid; i++)
{
if (map_q[i] == i)
{
ir_gps_at_q[count] = i;
count++;
}
}
grg_get_grid_address_from_index(adrs0, grid_point, D_diag);
#ifdef PHPYOPENMP
#pragma omp parallel for private(j, adrs1, adrs2)
#endif
for (i = 0; i < num_ir_q; i++)
{
grg_get_grid_address_from_index(adrs1, ir_gps_at_q[i], D_diag);
for (j = 0; j < 3; j++)
{ /* q'' */
adrs2[j] = -adrs0[j] - adrs1[j];
}
q_2[i] = grg_get_grid_index(adrs2, D_diag);
}
/* map_q[q_2[i]] is in ir_gps_at_q. */
/* If map_q[q_2[i]] < ir_gps_at_q[i], this should be already */
/* stored. So the counter is not incremented. */
for (i = 0; i < num_ir_q; i++)
{
ir_gp = ir_gps_at_q[i];
if (map_q[q_2[i]] < ir_gp)
{
map_triplets[ir_gp] = map_q[q_2[i]];
}
else
{
map_triplets[ir_gp] = ir_gp;
num_ir_triplets++;
}
}
#ifdef PHPYOPENMP
#pragma omp parallel for
#endif
for (i = 0; i < num_grid; i++)
{
map_triplets[i] = map_triplets[map_q[i]];
}
ret:
if (q_2)
{
free(q_2);
q_2 = NULL;
}
if (ir_gps_at_q)
{
free(ir_gps_at_q);
ir_gps_at_q = NULL;
}
return num_ir_triplets;
}
static long get_ir_triplets_at_q_noperm(long *map_triplets,
const long *map_q,
const long grid_point,
const long D_diag[3],
const RotMats *rot_reciprocal_q)
{
long i, num_grid, num_ir_triplets;
num_ir_triplets = 0;
num_grid = D_diag[0] * D_diag[1] * D_diag[2];
for (i = 0; i < num_grid; i++)
{
if (map_q[i] == i)
{
map_triplets[i] = i;
num_ir_triplets++;
}
else
{
map_triplets[i] = map_triplets[map_q[i]];
}
}
return num_ir_triplets;
}
static long get_BZ_triplets_at_q(long (*triplets)[3],
const long grid_point,
const ConstBZGrid *bzgrid,
const long *map_triplets)
{
long i, num_ir;
long *ir_q1_gps;
ir_q1_gps = NULL;
num_ir = 0;
if ((ir_q1_gps = (long *)malloc(sizeof(long) * bzgrid->size)) == NULL)
{
warning_print("Memory could not be allocated.");
goto ret;
}
for (i = 0; i < bzgrid->size; i++)
{
if (map_triplets[i] == i)
{
ir_q1_gps[num_ir] = i;
num_ir++;
}
}
if (bzgrid->type == 1)
{
get_BZ_triplets_at_q_type1(triplets,
grid_point,
bzgrid,
ir_q1_gps,
num_ir);
}
else
{
get_BZ_triplets_at_q_type2(triplets,
grid_point,
bzgrid,
ir_q1_gps,
num_ir);
}
free(ir_q1_gps);
ir_q1_gps = NULL;
ret:
return num_ir;
}
static void get_BZ_triplets_at_q_type1(long (*triplets)[3],
const long grid_point,
const ConstBZGrid *bzgrid,
const long *ir_q1_gps,
const long num_ir)
{
long i, j, gp2, num_gp, num_bzgp, bz0, bz1, bz2;
long bzgp[3], G[3];
long bz_adrs0[3], bz_adrs1[3], bz_adrs2[3];
const long *gp_map;
const long(*bz_adrs)[3];
double d2, min_d2, tolerance;
double LQD_inv[3][3];
gp_map = bzgrid->gp_map;
bz_adrs = bzgrid->addresses;
get_LQD_inv(LQD_inv, bzgrid);
/* This tolerance is used to be consistent to BZ reduction in bzgrid. */
tolerance = bzg_get_tolerance_for_BZ_reduction((BZGrid *)bzgrid);
for (i = 0; i < 3; i++)
{
bz_adrs0[i] = bz_adrs[grid_point][i];
}
num_gp = bzgrid->D_diag[0] * bzgrid->D_diag[1] * bzgrid->D_diag[2];
num_bzgp = num_gp * 8;
#ifdef PHPYOPENMP
#pragma omp parallel for private(j, gp2, bzgp, G, bz_adrs1, bz_adrs2, d2, min_d2, bz0, bz1, bz2)
#endif
for (i = 0; i < num_ir; i++)
{
for (j = 0; j < 3; j++)
{
bz_adrs1[j] = bz_adrs[ir_q1_gps[i]][j];
bz_adrs2[j] = -bz_adrs0[j] - bz_adrs1[j];
}
gp2 = grg_get_grid_index(bz_adrs2, bzgrid->D_diag);
/* Negative value is the signal to initialize min_d2 later. */
min_d2 = -1;
for (bz0 = 0;
bz0 < gp_map[num_bzgp + grid_point + 1] - gp_map[num_bzgp + grid_point] + 1;
bz0++)
{
if (bz0 == 0)
{
bzgp[0] = grid_point;
}
else
{
bzgp[0] = num_gp + gp_map[num_bzgp + grid_point] + bz0 - 1;
}
for (bz1 = 0;
bz1 < gp_map[num_bzgp + ir_q1_gps[i] + 1] - gp_map[num_bzgp + ir_q1_gps[i]] + 1;
bz1++)
{
if (bz1 == 0)
{
bzgp[1] = ir_q1_gps[i];
}
else
{
bzgp[1] = num_gp + gp_map[num_bzgp + ir_q1_gps[i]] + bz1 - 1;
}
for (bz2 = 0;
bz2 < gp_map[num_bzgp + gp2 + 1] - gp_map[num_bzgp + gp2] + 1;
bz2++)
{
if (bz2 == 0)
{
bzgp[2] = gp2;
}
else
{
bzgp[2] = num_gp + gp_map[num_bzgp + gp2] + bz2 - 1;
}
for (j = 0; j < 3; j++)
{
G[j] = bz_adrs[bzgp[0]][j] + bz_adrs[bzgp[1]][j] + bz_adrs[bzgp[2]][j];
}
if (G[0] == 0 && G[1] == 0 && G[2] == 0)
{
for (j = 0; j < 3; j++)
{
triplets[i][j] = bzgp[j];
}
goto found;
}
d2 = get_squared_distance(G, LQD_inv);
if (d2 < min_d2 - tolerance || min_d2 < 0)
{
min_d2 = d2;
for (j = 0; j < 3; j++)
{
triplets[i][j] = bzgp[j];
}
}
}
}
}
found:;
}
}
static void get_BZ_triplets_at_q_type2(long (*triplets)[3],
const long grid_point,
const ConstBZGrid *bzgrid,
const long *ir_q1_gps,
const long num_ir)
{
long i, j, gp0, gp2;
long bzgp[3], G[3];
long bz_adrs0[3], bz_adrs1[3], bz_adrs2[3];
const long *gp_map;
const long(*bz_adrs)[3];
double d2, min_d2, tolerance;
double LQD_inv[3][3];
gp_map = bzgrid->gp_map;
bz_adrs = bzgrid->addresses;
get_LQD_inv(LQD_inv, bzgrid);
/* This tolerance is used to be consistent to BZ reduction in bzgrid. */
tolerance = bzg_get_tolerance_for_BZ_reduction((BZGrid *)bzgrid);
for (i = 0; i < 3; i++)
{
bz_adrs0[i] = bz_adrs[grid_point][i];
}
gp0 = grg_get_grid_index(bz_adrs0, bzgrid->D_diag);
#ifdef PHPYOPENMP
#pragma omp parallel for private(j, gp2, bzgp, G, bz_adrs1, bz_adrs2, d2, min_d2)
#endif
for (i = 0; i < num_ir; i++)
{
for (j = 0; j < 3; j++)
{
bz_adrs1[j] = bz_adrs[gp_map[ir_q1_gps[i]]][j];
bz_adrs2[j] = -bz_adrs0[j] - bz_adrs1[j];
}
gp2 = grg_get_grid_index(bz_adrs2, bzgrid->D_diag);
/* Negative value is the signal to initialize min_d2 later. */
min_d2 = -1;
for (bzgp[0] = gp_map[gp0]; bzgp[0] < gp_map[gp0 + 1]; bzgp[0]++)
{
for (bzgp[1] = gp_map[ir_q1_gps[i]];
bzgp[1] < gp_map[ir_q1_gps[i] + 1]; bzgp[1]++)
{
for (bzgp[2] = gp_map[gp2]; bzgp[2] < gp_map[gp2 + 1]; bzgp[2]++)
{
for (j = 0; j < 3; j++)
{
G[j] = bz_adrs[bzgp[0]][j] + bz_adrs[bzgp[1]][j] + bz_adrs[bzgp[2]][j];
}
if (G[0] == 0 && G[1] == 0 && G[2] == 0)
{
for (j = 0; j < 3; j++)
{
triplets[i][j] = bzgp[j];
}
goto found;
}
d2 = get_squared_distance(G, LQD_inv);
if (d2 < min_d2 - tolerance || min_d2 < 0)
{
min_d2 = d2;
for (j = 0; j < 3; j++)
{
triplets[i][j] = bzgp[j];
}
}
}
}
}
found:;
}
}
static double get_squared_distance(const long G[3],
const double LQD_inv[3][3])
{
double d, d2;
long i;
d2 = 0;
for (i = 0; i < 3; i++)
{
d = LQD_inv[i][0] * G[0] + LQD_inv[i][1] * G[1] + LQD_inv[i][2] * G[2];
d2 += d * d;
}
return d2;
}
static void get_LQD_inv(double LQD_inv[3][3], const ConstBZGrid *bzgrid)
{
long i, j, k;
/* LQD^-1 */
for (i = 0; i < 3; i++)
{
for (j = 0; j < 3; j++)
{
for (k = 0; k < 3; k++)
{
LQD_inv[i][k] = bzgrid->reclat[i][j] * bzgrid->Q[j][k] / bzgrid->D_diag[k];
}
}
}
}
/* Return NULL if failed */
static RotMats *get_reciprocal_point_group_with_q(const RotMats *rot_reciprocal,
const long D_diag[3],
const long grid_point)
{
long i, num_rot, gp_rot;
long *ir_rot;
long adrs[3], adrs_rot[3];
RotMats *rot_reciprocal_q;
ir_rot = NULL;
rot_reciprocal_q = NULL;
num_rot = 0;
grg_get_grid_address_from_index(adrs, grid_point, D_diag);
if ((ir_rot = (long *)malloc(sizeof(long) * rot_reciprocal->size)) == NULL)
{
warning_print("Memory of ir_rot could not be allocated.");
return NULL;
}
for (i = 0; i < rot_reciprocal->size; i++)
{
ir_rot[i] = -1;
}
for (i = 0; i < rot_reciprocal->size; i++)
{
lagmat_multiply_matrix_vector_l3(adrs_rot, rot_reciprocal->mat[i], adrs);
gp_rot = grg_get_grid_index(adrs_rot, D_diag);
if (gp_rot == grid_point)
{
ir_rot[num_rot] = i;
num_rot++;
}
}
if ((rot_reciprocal_q = bzg_alloc_RotMats(num_rot)) != NULL)
{
for (i = 0; i < num_rot; i++)
{
lagmat_copy_matrix_l3(rot_reciprocal_q->mat[i],
rot_reciprocal->mat[ir_rot[i]]);
}
}
free(ir_rot);
ir_rot = NULL;
return rot_reciprocal_q;
}
static RotMats *get_reciprocal_point_group(const long (*rec_rotations_in)[3][3],
const long num_rot,
const long is_time_reversal,
const long is_transpose)
{
long i, num_rot_out;
long rec_rotations_out[48][3][3];
RotMats *rec_rotations;
num_rot_out = grg_get_reciprocal_point_group(rec_rotations_out,
rec_rotations_in,
num_rot,
is_time_reversal,
is_transpose);
if (num_rot_out == 0)
{
return NULL;
}
rec_rotations = bzg_alloc_RotMats(num_rot_out);
for (i = 0; i < num_rot_out; i++)
{
lagmat_copy_matrix_l3(rec_rotations->mat[i], rec_rotations_out[i]);
}
return rec_rotations;
}
|
pre_utilities.h | #ifndef PRE_UTILITES_H
#define PRE_UTILITES_H
/* System includes */
#include <limits>
#include <iostream>
#include <iomanip>
#include <fstream>
#include <vector>
#include <stdlib.h>
#include <time.h>
#include <string>
/* External includes */
#ifdef _OPENMP
#include <omp.h>
#endif
/* Project includes */
#include "includes/define.h"
#include "utilities/timer.h"
#include "includes/variables.h"
#include "utilities/openmp_utils.h"
#include "cluster_information.h"
#include "custom_elements/spheric_continuum_particle.h"
namespace Kratos
{
class PreUtilities
{
public:
typedef ModelPart::ElementsContainerType ElementsArrayType;
typedef ModelPart::NodesContainerType::ContainerType NodesContainerType;
typedef GlobalPointersVector<Element> ParticleWeakVectorType;
typedef GlobalPointersVector<Element>::iterator ParticleWeakIteratorType;
KRATOS_CLASS_POINTER_DEFINITION(PreUtilities);
/// Default constructor
PreUtilities() {}
PreUtilities(ModelPart& rModelPart)
{
//mInitialCenterOfMassAndMass = CalculateCenterOfMass(rModelPart);
//mInitialMass = CalculateTotalMass(rModelPart);
}
/// Destructor
virtual ~PreUtilities() {}
void SetClusterInformationInProperties(std::string const& name,
pybind11::list& list_of_coordinates,
pybind11::list& list_of_radii,
double size,
double volume,
pybind11::list& inertias,
Properties::Pointer& p_properties) {
ClusterInformation cl_info;
cl_info.mName = name;
array_1d<double,3> coords(3,0.0);
for (int i = 0; i < (int)pybind11::len(list_of_coordinates); i++) {
pybind11::list list(list_of_coordinates[i]);
coords[0] = pybind11::cast<double>(list[0]);
coords[1] = pybind11::cast<double>(list[1]);
coords[2] = pybind11::cast<double>(list[2]);
cl_info.mListOfCoordinates.push_back(coords);
}
for (int i = 0; i < (int)pybind11::len(list_of_radii); i++) {
cl_info.mListOfRadii.push_back(pybind11::cast<double>(list_of_radii[i]));
}
//TODO: check the sizes (should be the same)
cl_info.mSize = size;
cl_info.mVolume = volume;
cl_info.mInertias[0] = pybind11::cast<double>(inertias[0]);
cl_info.mInertias[1] = pybind11::cast<double>(inertias[1]);
cl_info.mInertias[2] = pybind11::cast<double>(inertias[2]);
p_properties->SetValue(CLUSTER_INFORMATION, cl_info);
}
void FillAnalyticSubModelPartUtility(ModelPart& rSpheresModelPart, ModelPart& rAnalyticSpheresModelPart){
ElementsArrayType& pElements = rSpheresModelPart.GetCommunicator().LocalMesh().Elements();
std::vector<std::vector<std::size_t> > thread_vectors_of_ids;
int mNumberOfThreads = OpenMPUtils::GetNumThreads();
thread_vectors_of_ids.resize(mNumberOfThreads);
#pragma omp parallel for
for (int k = 0; k < (int)pElements.size(); k++) {
ElementsArrayType::iterator it = pElements.ptr_begin() + k;
int analytic_particle_id = it->Id();
thread_vectors_of_ids[OpenMPUtils::ThisThread()].push_back(analytic_particle_id);
}
std::vector<std::size_t> vector_of_ids;
for (int i = 0; i < mNumberOfThreads; i++) {
vector_of_ids.insert(vector_of_ids.end(), thread_vectors_of_ids[i].begin(), thread_vectors_of_ids[i].end());
}
rAnalyticSpheresModelPart.AddElements(vector_of_ids);
}
// non-OMP version
// void FillAnalyticSubModelPartUtility(ModelPart& rSpheresModelPart, ModelPart& rAnalyticSpheresModelPart){
// ElementsArrayType& pElements = rSpheresModelPart.GetCommunicator().LocalMesh().Elements();
// std::vector<long unsigned int> vector_of_ids;
// for (int k = 0; k < (int)pElements.size(); k++) {
// ElementsArrayType::iterator it = pElements.ptr_begin() + k;
// int analytic_particle_id = it->Id();
// vector_of_ids.push_back(analytic_particle_id);
// }
// rAnalyticSpheresModelPart.AddElements(vector_of_ids);
// }
void BreakBondUtility(ModelPart& rSpheresModelPart){
ElementsArrayType& pElements = rSpheresModelPart.GetCommunicator().LocalMesh().Elements();
#pragma omp parallel for
for (int k = 0; k < (int)pElements.size(); k++) {
ElementsArrayType::iterator it = pElements.ptr_begin() + k;
Element* p_element = &(*it);
SphericContinuumParticle* p_sphere = dynamic_cast<SphericContinuumParticle*>(p_element);
if (p_sphere->mNeighbourElements[k] == NULL) continue;
double x_node = p_sphere->GetGeometry()[0].Coordinates()[0];
double y_node = p_sphere->GetGeometry()[0].Coordinates()[1];
double z_node = p_sphere->GetGeometry()[0].Coordinates()[2];
double radius = 0.0225; // radi
if ((x_node*x_node + z_node*z_node >= radius*radius && y_node < 0.01) || (x_node*x_node + z_node*z_node >= radius*radius && y_node > 0.07)){ // 1- geometry condition
unsigned int number_of_neighbors = p_sphere->mContinuumInitialNeighborsSize;
for (unsigned int i = 0; i < number_of_neighbors; i++)
{
SphericContinuumParticle* neighbour_iterator = dynamic_cast<SphericContinuumParticle*>(p_sphere->mNeighbourElements[i]);
double x_node_it = neighbour_iterator->GetGeometry()[0].Coordinates()[0];
double z_node_it = neighbour_iterator->GetGeometry()[0].Coordinates()[2];
double radius_it = 0.0225; // radi de la entalla en el shear test.
if (x_node_it*x_node_it + z_node_it*z_node_it < radius_it*radius_it){ // 2- geometry condition
//int& failure_type = p_sphere->mIniNeighbourFailureId[i];
//failure_type = 1;
p_sphere->Set(TO_ERASE, true);
neighbour_iterator->Set(TO_ERASE, true);
//noalias(other_to_me_vector) = p_sphere->GetGeometry()[0].Coordinates() - p_sphere->mNeighbourElements[i]->GetGeometry()[0].Coordinates();
//noalias(initial_other_to_me_vector) = p_sphere->GetGeometry()[0].GetInitialPosition() - p_sphere->mNeighbourElements[i]->GetGeometry()[0].GetInitialPosition();
}
}
} else if ((x_node*x_node + z_node*z_node < radius*radius && y_node < 0.01) || (x_node*x_node + z_node*z_node < radius*radius && y_node > 0.07)) {
unsigned int number_of_neighbors = p_sphere->mContinuumInitialNeighborsSize;
for (unsigned int i = 0; i < number_of_neighbors; i++)
{
SphericContinuumParticle* neighbour_iterator = dynamic_cast<SphericContinuumParticle*>(p_sphere->mNeighbourElements[i]);
double x_node_it = neighbour_iterator->GetGeometry()[0].Coordinates()[0];
double z_node_it = neighbour_iterator->GetGeometry()[0].Coordinates()[2];
double radius_it = 0.0225; // radi de la entalla en el shear test.
if (x_node_it*x_node_it + z_node_it*z_node_it > radius_it*radius_it){ // 2- geometry condition
//int& failure_type = p_sphere->mIniNeighbourFailureId[i];
//failure_type = 1;
p_sphere->Set(TO_ERASE, true);
neighbour_iterator->Set(TO_ERASE, true);
}
}
}
}
}
void CreateCartesianSpecimenMdpa(std::string filename) {
// We have a prismatic specimen of dimensions 1m x 1m x 2m
const double side = 0.15;
int divisions;
KRATOS_WARNING("DEM") << "\nEnter the number of divisions: ";
std::cin >> divisions;
if (!divisions) {
KRATOS_WARNING("DEM") << "\nCannot divide by zero. Program stopped.\n\n";
exit(EXIT_FAILURE);
}
const double radius = 0.5 * side / divisions;
int node_counter = 0;
std::vector<int> skin_nodes;
std::vector<int> top_nodes;
std::vector<int> bottom_nodes;
filename += "DEM.mdpa";
//
std::ifstream infile(filename);
if(infile.good()) {
while(1){
KRATOS_WARNING("DEM") << "\nThe file already exists. Do you want to overwrite it? (y/n) ";
char yn;
std::cin >> yn;
if(yn == 'n') {
KRATOS_WARNING("DEM") << "\nStopped.\n\n";
exit(EXIT_FAILURE);
}
if(yn=='y') break;
}
}
KRATOS_INFO("DEM") << "\nGenerating mesh...\n\n";
clock_t initial_time, end_time;
initial_time = clock();
std::ofstream outputfile(filename, std::ios_base::out);
outputfile << "Begin ModelPartData\nEnd ModelPartData\n\n";
outputfile << "Begin Properties 1\n";
outputfile << "PARTICLE_DENSITY 2550.0\n";
outputfile << "YOUNG_MODULUS 35e9\n";
outputfile << "POISSON_RATIO 0.20\n";
outputfile << "FRICTION 0.5773502691896257\n";
outputfile << "PARTICLE_COHESION 0.0\n";
outputfile << "COEFFICIENT_OF_RESTITUTION 0.2\n";
outputfile << "PARTICLE_MATERIAL 1\n";
outputfile << "ROLLING_FRICTION 0.01\n";
outputfile << "ROLLING_FRICTION_WITH_WALLS 0.01\n";
outputfile << "DEM_CONTINUUM_CONSTITUTIVE_LAW_NAME DEM_Dempack\n";
outputfile << "DEM_DISCONTINUUM_CONSTITUTIVE_LAW_NAME DEM_D_Linear_viscous_Coulomb\n";
outputfile << "SLOPE_LIMIT_COEFF_C1 24\n";
outputfile << "SLOPE_LIMIT_COEFF_C2 28\n";
outputfile << "SLOPE_LIMIT_COEFF_C3 1\n";
outputfile << "SLOPE_FRACTION_N1 1\n";
outputfile << "SLOPE_FRACTION_N2 1\n";
outputfile << "SLOPE_FRACTION_N3 35e9\n";
outputfile << "YOUNG_MODULUS_PLASTIC 1000\n";
outputfile << "PLASTIC_YIELD_STRESS 0.2\n";
outputfile << "DAMAGE_FACTOR 1\n";
outputfile << "SHEAR_ENERGY_COEF 1\n";
outputfile << "CONTACT_TAU_ZERO 5\n";
outputfile << "CONTACT_SIGMA_MIN 1\n";
outputfile << "CONTACT_INTERNAL_FRICC 20\n";
outputfile << "End Properties\n";
outputfile << "\nBegin Nodes\n";
// Relative sizes according to axes:
int ai=1;
int aj=2;
int ak=1;
//Generation of the samble
for (int k = 0; k < ai*divisions; k++) {
for (int j = 0; j < aj* divisions; j++) {
for (int i = 0; i < ak*divisions; i++) {
outputfile << ++node_counter << " " << (1 + 2 * i) * radius - 0.5*side << " " << (1 + 2 * j) * radius << " " << (1 + 2 * k) * radius - 0.5*side << '\n';
if ((i == 0) || (j == 0) || (k == 0) || (i == ai* divisions - 1) || (j == aj*divisions - 1) || (k == ak*divisions - 1)) skin_nodes.push_back(node_counter);
if (k == 0) bottom_nodes.push_back(node_counter);
if (k == 2 * divisions - 1) top_nodes.push_back(node_counter);
}
}
}
//
outputfile << "End Nodes\n";
outputfile << "\nBegin Elements SphericContinuumParticle3D\n";
for (int i = 1; i <= node_counter; i++) outputfile << i << " 1 " << i << '\n';
outputfile << "End Elements\n";
outputfile << "\nBegin NodalData RADIUS\n";
for (int i = 1; i <= node_counter; i++) outputfile << i << " 0 " << radius << '\n';
outputfile << "End NodalData\n";
outputfile << "\nBegin NodalData COHESIVE_GROUP // whole specimen\n";
for (int i = 1; i <= node_counter; i++) outputfile << i << " 0 1\n";
outputfile << "End NodalData\n";
//outputfile << "\nBegin NodalData COHESIVE_GROUP // bottom nodes\n";
//for (std::vector<int>::iterator it_bottom = bottom_nodes.begin(); it_bottom != bottom_nodes.end(); it_bottom++) outputfile << *it_bottom << " 0 1\n";
//outputfile << "End NodalData\n\nBegin NodalData COHESIVE_GROUP // top nodes\n";
//for (std::vector<int>::iterator it_top = top_nodes.begin(); it_top != top_nodes.end(); it_top++) outputfile << *it_top << " 0 1\n";
//outputfile << "End NodalData\n";
outputfile << "\nBegin NodalData SKIN_SPHERE\n";
for (std::vector<int>::iterator it_skin = skin_nodes.begin(); it_skin != skin_nodes.end(); it_skin++) outputfile << *it_skin << " 0 1\n";
outputfile << "End NodalData\n\n";
/*outputfile << "Begin Mesh 1 // bottom nodes\n Begin MeshData\n VELOCITY_START_TIME 0.0\n";
outputfile << " FORCE_INTEGRATION_GROUP 0\n VELOCITY_STOP_TIME 100.0\n TOP 0\n";
outputfile << " IMPOSED_VELOCITY_Z_VALUE 0.0005\n BOTTOM 0\n End MeshData\n Begin MeshNodes\n";
for (std::vector<int>::iterator it_bottom = bottom_nodes.begin(); it_bottom != bottom_nodes.end(); it_bottom++) outputfile << " " << *it_bottom << '\n';
outputfile << " End MeshNodes\nEnd Mesh\n\n";
outputfile << "Begin Mesh 2 // top nodes\n Begin MeshData\n VELOCITY_START_TIME 0.0\n";
outputfile << " FORCE_INTEGRATION_GROUP 0\n VELOCITY_STOP_TIME 100.0\n TOP 0\n";
outputfile << " IMPOSED_VELOCITY_Z_VALUE -0.0005\n BOTTOM 0\n End MeshData\n Begin MeshNodes\n";
for (std::vector<int>::iterator it_top = top_nodes.begin(); it_top != top_nodes.end(); it_top++) outputfile << " " << *it_top << '\n';
outputfile << " End MeshNodes\nEnd Mesh\n";*/
outputfile.close();
end_time = clock();
double elapsed_time = (double(end_time) - double(initial_time)) / CLOCKS_PER_SEC;
KRATOS_INFO("DEM") << "\nfinished!\n\n";
KRATOS_INFO("DEM") << "\nTotal number of elements: " << node_counter << '\n';
KRATOS_INFO("DEM") << "\nTime required to create the mdpa file: " << elapsed_time << " seconds\n\n";
}
void MeasureTopHeight(ModelPart& rModelPart, double& subtotal, double& weight)
{
/*
ElementsArrayType& pElements = rModelPart.Elements();
for (ElementsArrayType::iterator it= pElements.begin(); it!=pElements.end(); ++it)
{
if( it->GetGeometry()[0].FastGetSolutionStepValue(GROUP_ID) == 1 )
{
ParticleWeakVectorType& mrNeighbours = it->GetValue(NEIGHBOUR_ELEMENTS);
for(ParticleWeakIteratorType ineighbour = mrNeighbours.begin();
ineighbour != mrNeighbours.end(); ineighbour++)
{
if( ineighbour->GetGeometry()[0].FastGetSolutionStepValue(GROUP_ID) != 1 )
{
subtotal += it->GetGeometry()[0].Coordinates()[1]*it->GetGeometry()[0].FastGetSolutionStepValue(RADIUS);
weight += it->GetGeometry()[0].FastGetSolutionStepValue(RADIUS);
break;
}
}
}
}
*/
}
void MeasureBotHeight(ModelPart& rModelPart, double& subtotal, double& weight)
{
/*
ElementsArrayType& pElements = rModelPart.Elements();
for (ElementsArrayType::iterator it= pElements.begin(); it!=pElements.end(); ++it)
{
if( it->GetGeometry()[0].FastGetSolutionStepValue(GROUP_ID) == 2 )
{
ParticleWeakVectorType& mrNeighbours = it->GetValue(NEIGHBOUR_ELEMENTS);
for(ParticleWeakIteratorType ineighbour = mrNeighbours.begin();
ineighbour != mrNeighbours.end(); ineighbour++)
{
if( ineighbour->GetGeometry()[0].FastGetSolutionStepValue(GROUP_ID) != 2 )
{
subtotal += it->GetGeometry()[0].Coordinates()[1]*it->GetGeometry()[0].FastGetSolutionStepValue(RADIUS);
weight += it->GetGeometry()[0].FastGetSolutionStepValue(RADIUS);
break;
}
}
}
}
*/
}
void MarkToEraseParticlesOutsideRadius(ModelPart& r_model_part, const double max_radius, const array_1d<double, 3>& center, const double tolerance_for_erasing) {
auto& pNodes = r_model_part.GetCommunicator().LocalMesh().Nodes();
#pragma omp parallel for
for (int k = 0; k < (int)pNodes.size(); k++) {
auto it = pNodes.begin() + k;
const array_1d<double, 3>& coords = it->Coordinates();
array_1d<double, 3> vector_distance_to_center;
noalias(vector_distance_to_center) = coords - center;
const double distance_to_center = MathUtils<double>::Norm3(vector_distance_to_center);
const double radius = it->FastGetSolutionStepValue(RADIUS);
if(distance_to_center + radius > max_radius + tolerance_for_erasing) {
it->Set(TO_ERASE, true);
}
}
}
void ApplyConcentricForceOnParticles(ModelPart& r_model_part, const array_1d<double, 3>& center, const double density_for_artificial_gravity) {
auto& pElements = r_model_part.GetCommunicator().LocalMesh().Elements();
#pragma omp parallel for
for (int k = 0; k < (int)pElements.size(); k++) {
auto it = pElements.begin() + k;
auto& node = it->GetGeometry()[0];
const array_1d<double, 3>& coords = node.Coordinates();
array_1d<double, 3> vector_particle_to_center;
noalias(vector_particle_to_center) = center - coords;
const double distance_to_center = MathUtils<double>::Norm3(vector_particle_to_center);
const double inv_dist = 1.0 / distance_to_center;
array_1d<double, 3> force;
SphericParticle* spheric_p_particle = dynamic_cast<SphericParticle*> (&*it);
const double volume = spheric_p_particle->CalculateVolume();
noalias(force) = inv_dist * vector_particle_to_center * volume * density_for_artificial_gravity;
node.FastGetSolutionStepValue(EXTERNAL_APPLIED_FORCE) = force;
}
}
void ResetSkinParticles(ModelPart& r_model_part) {
auto& pNodes = r_model_part.GetCommunicator().LocalMesh().Nodes();
#pragma omp parallel for
for (int k = 0; k < (int)pNodes.size(); k++) {
auto it = pNodes.begin() + k;
it->FastGetSolutionStepValue(SKIN_SPHERE) = 0.0;
}
}
void SetSkinParticlesInnerBoundary(ModelPart& r_model_part, const double inner_radius, const double detection_radius) {
auto& pNodes = r_model_part.GetCommunicator().LocalMesh().Nodes();
#pragma omp parallel for
for (int k = 0; k < (int)pNodes.size(); k++) {
auto it = pNodes.begin() + k;
const array_1d<double, 3>& coords = it->Coordinates();
array_1d<double, 3> vector_distance_to_center;
noalias(vector_distance_to_center) = coords;
const double distance_to_center = MathUtils<double>::Norm3(vector_distance_to_center);
if(distance_to_center < inner_radius + detection_radius) {
it->FastGetSolutionStepValue(SKIN_SPHERE) = 1.0;
}
}
}
void SetSkinParticlesOuterBoundary(ModelPart& r_model_part, const double outer_radius, const double detection_radius) {
auto& pNodes = r_model_part.GetCommunicator().LocalMesh().Nodes();
#pragma omp parallel for
for (int k = 0; k < (int)pNodes.size(); k++) {
auto it = pNodes.begin() + k;
const array_1d<double, 3>& coords = it->Coordinates();
array_1d<double, 3> vector_distance_to_center;
noalias(vector_distance_to_center) = coords;
const double distance_to_center = MathUtils<double>::Norm3(vector_distance_to_center);
const double radius = it->FastGetSolutionStepValue(RADIUS);
if(distance_to_center + radius > outer_radius - detection_radius) {
it->FastGetSolutionStepValue(SKIN_SPHERE) = 1.0;
}
}
}
array_1d<double, 3> GetInitialCenterOfMass()
{
return mInitialCenterOfMassAndMass;
}
/// Turn back information as a stemplate<class T, std::size_t dim> tring.
virtual std::string Info() const
{
return "";
}
/// Print information about this object.
virtual void PrintInfo(std::ostream& rOStream) const
{
}
/// Print object's data.
virtual void PrintData(std::ostream& rOStream) const
{
}
std::vector<unsigned int>& GetElementPartition() {return (mElementPartition);};
protected:
std::vector<unsigned int> mElementPartition;
private:
array_1d<double, 3> mInitialCenterOfMassAndMass;
double mInitialMass;
/// Assignment operator
PreUtilities & operator=(PreUtilities const& rOther);
}; // Class PreUtilities
/// output stream function
// template<std::size_t TDim>
// inline std::ostream& operator << (std::ostream& rOStream)
// {
// rThis.PrintInfo(rOStream);
// rOStream << std::endl;
// rThis.PrintData(rOStream);
//
// return rOStream;
// }
} // namespace Kratos
#endif // PRE_UTILITES_H
|
ex2.c | #include <stdio.h>
#include <math.h>
#include <omp.h>
#define N 10000000
int a[N];
int main(void) {
int i, tid, numThreads;
for (i = 0; i < N; i++) {
a[i] = 1;
}
long int sum = 0;
double t1, t2;
t1 = omp_get_wtime();
#pragma omp parallel default(shared) private(i, tid)
{
tid = omp_get_thread_num();
numThreads = omp_get_num_threads();
int start = tid * ceil((double)N / numThreads);
int end = fmin(N , (tid + 1) * ceil((double)N / numThreads));
for (i = start; i < end; i++) {
#pragma omp critical
sum += a[i];
}
}
t2 = omp_get_wtime();
printf("Sum=%ld, duration=%g\n", sum, t2-t1);
return 0;
}
|
GB_Global.c | //------------------------------------------------------------------------------
// GB_Global: global values in GraphBLAS
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// All Global storage is declared, initialized, and accessed here. The
// contents of the GB_Global struct are only accessible to functions in this
// file. Global storage is used to keep track of the GraphBLAS mode (blocking
// or non-blocking), for pointers to malloc/realloc/free functions,
// global matrix options, and other settings.
#include "GB_atomics.h"
//------------------------------------------------------------------------------
// Global storage: for all threads in a user application that uses GraphBLAS
//------------------------------------------------------------------------------
typedef struct
{
//--------------------------------------------------------------------------
// blocking/non-blocking mode, set by GrB_init
//--------------------------------------------------------------------------
GrB_Mode mode ; // GrB_NONBLOCKING or GrB_BLOCKING
bool GrB_init_called ; // true if GrB_init already called
//--------------------------------------------------------------------------
// threading control
//--------------------------------------------------------------------------
int nthreads_max ; // max number of threads to use
double chunk ; // chunk size for determining # threads to use
//--------------------------------------------------------------------------
// hypersparsity and CSR/CSC format control
//--------------------------------------------------------------------------
float bitmap_switch [GxB_NBITMAP_SWITCH] ; // default bitmap_switch
float hyper_switch ; // default hyper_switch for new matrices
bool is_csc ; // default CSR/CSC format for new matrices
//--------------------------------------------------------------------------
// abort function: only used for debugging
//--------------------------------------------------------------------------
void (* abort_function ) (void) ;
//--------------------------------------------------------------------------
// malloc/realloc/free: memory management functions
//--------------------------------------------------------------------------
// All threads must use the same malloc/realloc/free functions.
// They default to the ANSI C11 functions, but can be defined by GxB_init.
void * (* malloc_function ) (size_t) ; // required
void * (* realloc_function ) (void *, size_t) ; // may be NULL
void (* free_function ) (void *) ; // required
bool malloc_is_thread_safe ; // default is true
//--------------------------------------------------------------------------
// memory usage tracking: for testing and debugging only
//--------------------------------------------------------------------------
// malloc_tracking: default is false. There is no user-accessible API for
// setting this to true. If true, the following statistics are computed.
// If false, all of the following are unused.
// nmalloc: To aid in searching for memory leaks, GraphBLAS keeps track of
// the number of blocks of allocated that have not yet been freed. The
// count starts at zero. GB_malloc_memory and GB_calloc_memory increment
// this count, and free (of a non-NULL pointer) decrements it. realloc
// increments the count it if is allocating a new block, but it does this
// by calling GB_malloc_memory.
// malloc_debug: this is used for testing only (GraphBLAS/Tcov). If true,
// then use malloc_debug_count for testing memory allocation and
// out-of-memory conditions. If malloc_debug_count > 0, the value is
// decremented after each allocation of memory. If malloc_debug_count <=
// 0, the GB_*_memory routines pretend to fail; returning NULL and not
// allocating anything.
bool malloc_tracking ; // true if allocations are being tracked
int64_t nmalloc ; // number of blocks allocated but not freed
bool malloc_debug ; // if true, test memory handling
int64_t malloc_debug_count ; // for testing memory handling
//--------------------------------------------------------------------------
// for testing and development
//--------------------------------------------------------------------------
int64_t hack [2] ; // settings for testing/developement only
//--------------------------------------------------------------------------
// diagnostic output
//--------------------------------------------------------------------------
bool burble ; // controls GBURBLE output
GB_printf_function_t printf_func ; // pointer to printf
GB_flush_function_t flush_func ; // pointer to flush
bool print_one_based ; // if true, print 1-based indices
bool print_mem_shallow ; // if true, print # shallow bytes
//--------------------------------------------------------------------------
// timing: for code development only
//--------------------------------------------------------------------------
double timing [40] ;
//--------------------------------------------------------------------------
// for malloc debugging only
//--------------------------------------------------------------------------
#ifdef GB_DEBUG
#define GB_MEMTABLE_SIZE 10000
GB_void *memtable_p [GB_MEMTABLE_SIZE] ;
size_t memtable_s [GB_MEMTABLE_SIZE] ;
#endif
int nmemtable ;
//--------------------------------------------------------------------------
// internal memory pool
//--------------------------------------------------------------------------
// free_pool [k] is a pointer to a link list of freed blocks, all of size
// exactly equal to 2^k. The total number of blocks in the kth pool is
// given by free_pool_nblocks [k], and the upper bound on this is given by
// free_pool_limit [k]. If any additional blocks of size 2^k above that
// limit are freed by GB_dealloc_memory, they are not placed in the pool,
// but actually freed instead.
void *free_pool [64] ;
int64_t free_pool_nblocks [64] ;
int64_t free_pool_limit [64] ;
//--------------------------------------------------------------------------
// CUDA (DRAFT: in progress)
//--------------------------------------------------------------------------
int gpu_count ; // # of GPUs in the system
GrB_Desc_Value gpu_control ; // always, never, or default
double gpu_chunk ; // min problem size for using a GPU
// properties of each GPU:
GB_cuda_device gpu_properties [GB_CUDA_MAX_GPUS] ;
}
GB_Global_struct ;
GB_PUBLIC GB_Global_struct GB_Global ;
GB_Global_struct GB_Global =
{
// GraphBLAS mode
.mode = GrB_NONBLOCKING, // default is nonblocking
// initialization flag
.GrB_init_called = false, // GrB_init has not yet been called
// max number of threads and chunk size
.nthreads_max = 1,
.chunk = GB_CHUNK_DEFAULT,
// min dimension density
#define GB_BITSWITCH_1 ((float) 0.04)
#define GB_BITSWITCH_2 ((float) 0.05)
#define GB_BITSWITCH_3_to_4 ((float) 0.06)
#define GB_BITSWITCH_5_to_8 ((float) 0.08)
#define GB_BITSWITCH_9_to_16 ((float) 0.10)
#define GB_BITSWITCH_17_to_32 ((float) 0.20)
#define GB_BITSWITCH_33_to_64 ((float) 0.30)
#define GB_BITSWITCH_gt_than_64 ((float) 0.40)
// default format
.bitmap_switch = {
GB_BITSWITCH_1,
GB_BITSWITCH_2,
GB_BITSWITCH_3_to_4,
GB_BITSWITCH_5_to_8,
GB_BITSWITCH_9_to_16,
GB_BITSWITCH_17_to_32,
GB_BITSWITCH_33_to_64,
GB_BITSWITCH_gt_than_64 },
.hyper_switch = GB_HYPER_SWITCH_DEFAULT,
.is_csc = false, // default is GxB_BY_ROW
// abort function for debugging only
.abort_function = abort,
// malloc/realloc/free functions: default to ANSI C11 functions
.malloc_function = malloc,
.realloc_function = realloc,
.free_function = free,
.malloc_is_thread_safe = true,
// malloc tracking, for testing, statistics, and debugging only
.malloc_tracking = false,
.nmalloc = 0, // memory block counter
.malloc_debug = false, // do not test memory handling
.malloc_debug_count = 0, // counter for testing memory handling
// for testing and development only
.hack = {0, 0},
// diagnostics
.burble = false,
.printf_func = NULL,
.flush_func = NULL,
.print_one_based = false, // if true, print 1-based indices
.print_mem_shallow = false, // for @GrB interface only
.timing = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
// for malloc debugging only
.nmemtable = 0, // memtable is empty
// all free_pool lists start out empty
.free_pool = {
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL },
.free_pool_nblocks = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
// default limits on the number of free blocks in each list:
.free_pool_limit =
#ifdef _OPENMP
{ 0, // size 2^0 = 1 byte none
0, // size 2^1 = 2 none
0, // size 2^2 = 4 none
16483, // size 2^3 = 8 (2^14 blocks * 2^3 = 128 KB total)
16483, // size 2^4 = 16 bytes (2^14 blocks * 2^4 = 256 KB total)
16483, // size 2^5 = 32 (2^14 blocks * 2^5 = 512 KB total)
16483, // size 2^6 = 64 (2^14 blocks * 2^6 = 1 MB total)
16483, // size 2^7 = 128 (2^14 blocks * 2^7 = 2 MB total)
16483, // size 2^8 = 256 (2^14 blocks * 2^8 = 4 MB total)
8192, // size 2^9 = 512 (2^13 blocks * 2^9 = 4 MB total)
4096, // size 2^10 = 1 KB (2^12 blocks * 2^10 = 4 MB total)
2048, // size 2^11 = 2 KB (2^11 blocks * 2^11 = 4 MB total)
1024, // size 2^12 = 4 KB (2^10 blocks * 2^12 = 4 MB total)
512, // size 2^13 = 8 KB (2^9 blocks * 2^13 = 4 MB total)
256, // size 2^14 = 16 KB (2^8 blocks * 2^14 = 4 MB total)
128, // size 2^15 = 32 KB (2^7 blocks * 2^15 = 4 MB total)
// maximum total size = about 36 MB
// by default, no blocks larger than 32 KB are kept in the free_pool
0, // size 2^16 = 64 KB
0, // size 2^17 = 128 KB
0, // size 2^18 = 256 KB
0, // size 2^19 = 512 KB
0, // size 2^20 = 1 MB
0, // size 2^21
0, // size 2^22
0, // size 2^23
0, // size 2^24
0, // size 2^25
0, // size 2^26
0, // size 2^27
0, // size 2^28
0, // size 2^29
0, // size 2^30 (1 GB)
0, // size 2^31
0, // size 2^32
0, // size 2^33
0, // size 2^34
0, // size 2^35
0, // size 2^36
0, // size 2^37
0, // size 2^38
0, // size 2^39
// These larger sizes are of course unlikely to appear, but adding all
// 64 possibilities means that the free_pool does not need to check an
// upper bound.
0, // size 2^40 (1 TB)
0, // size 2^41
0, // size 2^42
0, // size 2^43
0, // size 2^44
0, // size 2^45
0, // size 2^46
0, // size 2^47
0, // size 2^48
0, // size 2^49
0, // size 2^50 (1 PB)
0, // size 2^51
0, // size 2^52
0, // size 2^53
0, // size 2^54
0, // size 2^55
0, // size 2^56
0, // size 2^57
0, // size 2^58
0, // size 2^59
0, // size 2^60 (1 exabyte)
0, // size 2^61
0, // size 2^62
0 }, // size 2^63 (4 exabytes!)
#else
// the free pool requires an OpenMP critical section,
// so disable it if OpenMP is not available.
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
#endif
// CUDA environment (DRAFT: in progress)
.gpu_count = 0, // # of GPUs in the system
.gpu_control = GxB_DEFAULT, // always, never, or default
.gpu_chunk = GB_GPU_CHUNK_DEFAULT, // min problem size for using a GPU
} ;
//==============================================================================
// GB_Global access functions
//==============================================================================
//------------------------------------------------------------------------------
// mode
//------------------------------------------------------------------------------
void GB_Global_mode_set (GrB_Mode mode)
{
GB_Global.mode = mode ;
}
GrB_Mode GB_Global_mode_get (void)
{
return (GB_Global.mode) ;
}
//------------------------------------------------------------------------------
// GrB_init_called
//------------------------------------------------------------------------------
GB_PUBLIC
void GB_Global_GrB_init_called_set (bool GrB_init_called)
{
GB_Global.GrB_init_called = GrB_init_called ;
}
GB_PUBLIC
bool GB_Global_GrB_init_called_get (void)
{
return (GB_Global.GrB_init_called) ;
}
//------------------------------------------------------------------------------
// nthreads_max
//------------------------------------------------------------------------------
GB_PUBLIC
void GB_Global_nthreads_max_set (int nthreads_max)
{
GB_Global.nthreads_max = GB_IMAX (nthreads_max, 1) ;
}
GB_PUBLIC
int GB_Global_nthreads_max_get (void)
{
return (GB_Global.nthreads_max) ;
}
//------------------------------------------------------------------------------
// OpenMP max_threads
//------------------------------------------------------------------------------
GB_PUBLIC
int GB_Global_omp_get_max_threads (void)
{
return (GB_OPENMP_MAX_THREADS) ;
}
//------------------------------------------------------------------------------
// chunk
//------------------------------------------------------------------------------
GB_PUBLIC
void GB_Global_chunk_set (double chunk)
{
if (chunk <= GxB_DEFAULT) chunk = GB_CHUNK_DEFAULT ;
GB_Global.chunk = fmax (chunk, 1) ;
}
GB_PUBLIC
double GB_Global_chunk_get (void)
{
return (GB_Global.chunk) ;
}
//------------------------------------------------------------------------------
// hyper_switch
//------------------------------------------------------------------------------
GB_PUBLIC
void GB_Global_hyper_switch_set (float hyper_switch)
{
GB_Global.hyper_switch = hyper_switch ;
}
GB_PUBLIC
float GB_Global_hyper_switch_get (void)
{
return (GB_Global.hyper_switch) ;
}
//------------------------------------------------------------------------------
// bitmap_switch
//------------------------------------------------------------------------------
GB_PUBLIC
void GB_Global_bitmap_switch_set (int k, float b)
{
k = GB_IMAX (k, 0) ;
k = GB_IMIN (k, 7) ;
GB_Global.bitmap_switch [k] = b ;
}
GB_PUBLIC
float GB_Global_bitmap_switch_get (int k)
{
k = GB_IMAX (k, 0) ;
k = GB_IMIN (k, 7) ;
return (GB_Global.bitmap_switch [k]) ;
}
GB_PUBLIC
float GB_Global_bitmap_switch_matrix_get (int64_t vlen, int64_t vdim)
{
int64_t d = GB_IMIN (vlen, vdim) ;
if (d <= 1) return (GB_Global.bitmap_switch [0]) ;
if (d <= 2) return (GB_Global.bitmap_switch [1]) ;
if (d <= 4) return (GB_Global.bitmap_switch [2]) ;
if (d <= 8) return (GB_Global.bitmap_switch [3]) ;
if (d <= 16) return (GB_Global.bitmap_switch [4]) ;
if (d <= 32) return (GB_Global.bitmap_switch [5]) ;
if (d <= 64) return (GB_Global.bitmap_switch [6]) ;
return (GB_Global.bitmap_switch [7]) ;
}
GB_PUBLIC
void GB_Global_bitmap_switch_default (void)
{
GB_Global.bitmap_switch [0] = GB_BITSWITCH_1 ;
GB_Global.bitmap_switch [1] = GB_BITSWITCH_2 ;
GB_Global.bitmap_switch [2] = GB_BITSWITCH_3_to_4 ;
GB_Global.bitmap_switch [3] = GB_BITSWITCH_5_to_8 ;
GB_Global.bitmap_switch [4] = GB_BITSWITCH_9_to_16 ;
GB_Global.bitmap_switch [5] = GB_BITSWITCH_17_to_32 ;
GB_Global.bitmap_switch [6] = GB_BITSWITCH_33_to_64 ;
GB_Global.bitmap_switch [7] = GB_BITSWITCH_gt_than_64 ;
}
//------------------------------------------------------------------------------
// is_csc
//------------------------------------------------------------------------------
void GB_Global_is_csc_set (bool is_csc)
{
GB_Global.is_csc = is_csc ;
}
bool GB_Global_is_csc_get (void)
{
return (GB_Global.is_csc) ;
}
//------------------------------------------------------------------------------
// abort_function
//------------------------------------------------------------------------------
GB_PUBLIC
void GB_Global_abort_function_set (void (* abort_function) (void))
{
GB_Global.abort_function = abort_function ;
}
GB_PUBLIC
void GB_Global_abort_function (void)
{
GB_Global.abort_function ( ) ;
}
//------------------------------------------------------------------------------
// malloc debuging
//------------------------------------------------------------------------------
// These functions keep a separate record of the pointers to all allocated
// blocks of memory and their sizes, just for sanity checks.
GB_PUBLIC
void GB_Global_memtable_dump (void)
{
#ifdef GB_DEBUG
printf ("\nmemtable dump: %d nmalloc " GBd "\n", GB_Global.nmemtable,
GB_Global.nmalloc) ;
for (int k = 0 ; k < GB_Global.nmemtable ; k++)
{
printf (" %4d: %12p : %ld\n", k,
GB_Global.memtable_p [k],
GB_Global.memtable_s [k]) ;
}
#endif
}
GB_PUBLIC
int GB_Global_memtable_n (void)
{
return (GB_Global.nmemtable) ;
}
GB_PUBLIC
void GB_Global_memtable_clear (void)
{
GB_Global.nmemtable = 0 ;
}
// add a pointer to the table of malloc'd blocks
GB_PUBLIC
void GB_Global_memtable_add (void *p, size_t size)
{
if (p == NULL) return ;
if (GB_Global.malloc_tracking)
{
GB_ATOMIC_UPDATE
GB_Global.nmalloc++ ;
}
#ifdef GB_DEBUG
bool fail = false ;
#ifdef GB_MEMDUMP
printf ("memtable add %p size %ld\n", p, size) ;
#endif
#pragma omp critical(GB_memtable)
{
int n = GB_Global.nmemtable ;
fail = (n > GB_MEMTABLE_SIZE) ;
if (!fail)
{
for (int i = 0 ; i < n ; i++)
{
if (p == GB_Global.memtable_p [i])
{
printf ("\nadd duplicate %p size %ld\n", p, size) ;
GB_Global_memtable_dump ( ) ;
printf ("Hey %d %p\n", i,p) ;
fail = true ;
break ;
}
}
}
if (!fail && p != NULL)
{
GB_Global.memtable_p [n] = p ;
GB_Global.memtable_s [n] = size ;
GB_Global.nmemtable++ ;
}
}
ASSERT (!fail) ;
#ifdef GB_MEMDUMP
GB_Global_memtable_dump ( ) ;
#endif
#endif
}
// get the size of a malloc'd block
GB_PUBLIC
size_t GB_Global_memtable_size (void *p)
{
size_t size = 0 ;
#ifdef GB_DEBUG
if (p == NULL) return (0) ;
bool found = false ;
#pragma omp critical(GB_memtable)
{
int n = GB_Global.nmemtable ;
for (int i = 0 ; i < n ; i++)
{
if (p == GB_Global.memtable_p [i])
{
size = GB_Global.memtable_s [i] ;
found = true ;
break ;
}
}
}
if (!found)
{
printf ("\nFAIL: %p not found\n", p) ;
GB_Global_memtable_dump ( ) ;
ASSERT (0) ;
}
#endif
return (size) ;
}
// test if a malloc'd block is in the table
GB_PUBLIC
bool GB_Global_memtable_find (void *p)
{
bool found = false ;
#ifdef GB_DEBUG
if (p == NULL) return (false) ;
#pragma omp critical(GB_memtable)
{
int n = GB_Global.nmemtable ;
for (int i = 0 ; i < n ; i++)
{
if (p == GB_Global.memtable_p [i])
{
found = true ;
break ;
}
}
}
#endif
return (found) ;
}
// remove a pointer from the table of malloc'd blocks
GB_PUBLIC
void GB_Global_memtable_remove (void *p)
{
if (p == NULL) return ;
if (GB_Global.malloc_tracking)
{
GB_ATOMIC_UPDATE
GB_Global.nmalloc-- ;
}
#ifdef GB_DEBUG
bool found = false ;
#ifdef GB_MEMDUMP
printf ("memtable remove %p ", p) ;
#endif
#pragma omp critical(GB_memtable)
{
int n = GB_Global.nmemtable ;
for (int i = 0 ; i < n ; i++)
{
if (p == GB_Global.memtable_p [i])
{
// found p in the table; remove it
GB_Global.memtable_p [i] = GB_Global.memtable_p [n-1] ;
GB_Global.memtable_s [i] = GB_Global.memtable_s [n-1] ;
GB_Global.nmemtable -- ;
found = true ;
break ;
}
}
}
if (!found)
{
printf ("remove %p NOT FOUND\n", p) ;
GB_Global_memtable_dump ( ) ;
}
ASSERT (found) ;
#ifdef GB_MEMDUMP
GB_Global_memtable_dump ( ) ;
#endif
#endif
}
//------------------------------------------------------------------------------
// malloc_function
//------------------------------------------------------------------------------
void GB_Global_malloc_function_set (void * (* malloc_function) (size_t))
{
GB_Global.malloc_function = malloc_function ;
}
void * GB_Global_malloc_function (size_t size)
{
void *p = NULL ;
if (GB_Global.malloc_is_thread_safe)
{
p = GB_Global.malloc_function (size) ;
}
else
{
#pragma omp critical(GB_malloc_protection)
{
p = GB_Global.malloc_function (size) ;
}
}
GB_Global_memtable_add (p, size) ;
return (p) ;
}
//------------------------------------------------------------------------------
// realloc_function
//------------------------------------------------------------------------------
void GB_Global_realloc_function_set
(
void * (* realloc_function) (void *, size_t)
)
{
GB_Global.realloc_function = realloc_function ;
}
bool GB_Global_have_realloc_function (void)
{
return (GB_Global.realloc_function != NULL) ;
}
void * GB_Global_realloc_function (void *p, size_t size)
{
void *pnew = NULL ;
if (GB_Global.malloc_is_thread_safe)
{
pnew = GB_Global.realloc_function (p, size) ;
}
else
{
#pragma omp critical(GB_malloc_protection)
{
pnew = GB_Global.realloc_function (p, size) ;
}
}
if (pnew != NULL)
{
GB_Global_memtable_remove (p) ;
GB_Global_memtable_add (pnew, size) ;
}
return (pnew) ;
}
//------------------------------------------------------------------------------
// free_function
//------------------------------------------------------------------------------
void GB_Global_free_function_set (void (* free_function) (void *))
{
GB_Global.free_function = free_function ;
}
void GB_Global_free_function (void *p)
{
if (GB_Global.malloc_is_thread_safe)
{
GB_Global.free_function (p) ;
}
else
{
#pragma omp critical(GB_malloc_protection)
{
GB_Global.free_function (p) ;
}
}
GB_Global_memtable_remove (p) ;
}
//------------------------------------------------------------------------------
// malloc_is_thread_safe
//------------------------------------------------------------------------------
GB_PUBLIC
void GB_Global_malloc_is_thread_safe_set (bool malloc_is_thread_safe)
{
GB_Global.malloc_is_thread_safe = malloc_is_thread_safe ;
}
GB_PUBLIC
bool GB_Global_malloc_is_thread_safe_get (void)
{
return (GB_Global.malloc_is_thread_safe) ;
}
//------------------------------------------------------------------------------
// malloc_tracking
//------------------------------------------------------------------------------
GB_PUBLIC
void GB_Global_malloc_tracking_set (bool malloc_tracking)
{
GB_Global.malloc_tracking = malloc_tracking ;
}
bool GB_Global_malloc_tracking_get (void)
{
return (GB_Global.malloc_tracking) ;
}
//------------------------------------------------------------------------------
// nmalloc
//------------------------------------------------------------------------------
void GB_Global_nmalloc_clear (void)
{
GB_ATOMIC_WRITE
GB_Global.nmalloc = 0 ;
}
GB_PUBLIC
int64_t GB_Global_nmalloc_get (void)
{
int64_t nmalloc ;
GB_ATOMIC_READ
nmalloc = GB_Global.nmalloc ;
return (nmalloc) ;
}
//------------------------------------------------------------------------------
// malloc_debug
//------------------------------------------------------------------------------
GB_PUBLIC
void GB_Global_malloc_debug_set (bool malloc_debug)
{
GB_ATOMIC_WRITE
GB_Global.malloc_debug = malloc_debug ;
}
bool GB_Global_malloc_debug_get (void)
{
bool malloc_debug ;
GB_ATOMIC_READ
malloc_debug = GB_Global.malloc_debug ;
return (malloc_debug) ;
}
//------------------------------------------------------------------------------
// malloc_debug_count
//------------------------------------------------------------------------------
GB_PUBLIC
void GB_Global_malloc_debug_count_set (int64_t malloc_debug_count)
{
GB_ATOMIC_WRITE
GB_Global.malloc_debug_count = malloc_debug_count ;
}
bool GB_Global_malloc_debug_count_decrement (void)
{
GB_ATOMIC_UPDATE
GB_Global.malloc_debug_count-- ;
int64_t malloc_debug_count ;
GB_ATOMIC_READ
malloc_debug_count = GB_Global.malloc_debug_count ;
return (malloc_debug_count <= 0) ;
}
//------------------------------------------------------------------------------
// hack: for setting an internal flag for testing and development only
//------------------------------------------------------------------------------
GB_PUBLIC
void GB_Global_hack_set (int k, int64_t hack)
{
GB_Global.hack [k] = hack ;
}
GB_PUBLIC
int64_t GB_Global_hack_get (int k)
{
return (GB_Global.hack [k]) ;
}
//------------------------------------------------------------------------------
// burble: for controlling the burble output
//------------------------------------------------------------------------------
void GB_Global_burble_set (bool burble)
{
GB_Global.burble = burble ;
}
GB_PUBLIC
bool GB_Global_burble_get (void)
{
return (GB_Global.burble) ;
}
GB_PUBLIC
GB_printf_function_t GB_Global_printf_get ( )
{
return (GB_Global.printf_func) ;
}
GB_PUBLIC
GB_flush_function_t GB_Global_flush_get ( )
{
return (GB_Global.flush_func) ;
}
GB_PUBLIC
void GB_Global_printf_set (GB_printf_function_t pr_func)
{
GB_Global.printf_func = pr_func ;
}
GB_PUBLIC
void GB_Global_flush_set (GB_flush_function_t fl_func)
{
GB_Global.flush_func = fl_func ;
}
//------------------------------------------------------------------------------
// for printing matrices in 1-based index notation (@GrB and Julia)
//------------------------------------------------------------------------------
GB_PUBLIC
void GB_Global_print_one_based_set (bool onebased)
{
GB_Global.print_one_based = onebased ;
}
GB_PUBLIC
bool GB_Global_print_one_based_get (void)
{
return (GB_Global.print_one_based) ;
}
//------------------------------------------------------------------------------
// for printing matrix in @GrB interface
//------------------------------------------------------------------------------
GB_PUBLIC
void GB_Global_print_mem_shallow_set (bool mem_shallow)
{
GB_Global.print_mem_shallow = mem_shallow ;
}
GB_PUBLIC
bool GB_Global_print_mem_shallow_get (void)
{
return (GB_Global.print_mem_shallow) ;
}
//------------------------------------------------------------------------------
// CUDA (DRAFT: in progress)
//------------------------------------------------------------------------------
void GB_Global_gpu_control_set (GrB_Desc_Value gpu_control)
{
// set the GPU control to always, never, or default
if (GB_Global.gpu_count > 0)
{
// one or more GPUs are available: set gpu_control to
// always, never, or default.
if (gpu_control == GxB_GPU_ALWAYS || gpu_control == GxB_GPU_NEVER)
{
GB_Global.gpu_control = gpu_control ;
}
else
{
GB_Global.gpu_control = GxB_DEFAULT ;
}
}
else
{
// no GPUs available: never use a GPU
GB_Global.gpu_control = GxB_GPU_NEVER ;
}
}
GrB_Desc_Value GB_Global_gpu_control_get (void)
{
// get the GPU control parameter
return (GB_Global.gpu_control) ;
}
void GB_Global_gpu_chunk_set (double gpu_chunk)
{
// set the GPU chunk factor
if (gpu_chunk < 1) gpu_chunk = GB_GPU_CHUNK_DEFAULT ;
GB_Global.gpu_chunk = gpu_chunk ;
}
double GB_Global_gpu_chunk_get (void)
{
// get the GPU chunk factor
return (GB_Global.gpu_chunk) ;
}
bool GB_Global_gpu_count_set (bool enable_cuda)
{
// set the # of GPUs in the system;
// this function is only called once, by GB_init.
#if defined ( GBCUDA )
if (enable_cuda)
{
return (GB_cuda_get_device_count (&GB_Global.gpu_count)) ;
}
else
#endif
{
// no GPUs available, or available but not requested
GB_Global.gpu_count = 0 ;
return (true) ;
}
}
int GB_Global_gpu_count_get (void)
{
// get the # of GPUs in the system
return (GB_Global.gpu_count) ;
}
#define GB_GPU_DEVICE_CHECK(error) \
if (device < 0 || device >= GB_Global.gpu_count) return (error) ;
size_t GB_Global_gpu_memorysize_get (int device)
{
// get the memory size of a specific GPU
GB_GPU_DEVICE_CHECK (0) ; // memory size zero if invalid GPU
return (GB_Global.gpu_properties [device].total_global_memory) ;
}
int GB_Global_gpu_sm_get (int device)
{
// get the # of SMs in a specific GPU
GB_GPU_DEVICE_CHECK (0) ; // zero if invalid GPU
return (GB_Global.gpu_properties [device].number_of_sms) ;
}
bool GB_Global_gpu_device_pool_size_set( int device, size_t size)
{
GB_GPU_DEVICE_CHECK (0) ; // zero if invalid GPU
GB_Global.gpu_properties [device].pool_size = (int) size ;
return( true);
}
bool GB_Global_gpu_device_max_pool_size_set( int device, size_t size)
{
GB_GPU_DEVICE_CHECK (0) ; // zero if invalid GPU
GB_Global.gpu_properties[device].max_pool_size = (int) size ;
return( true);
}
bool GB_Global_gpu_device_memory_resource_set( int device, void *resource)
{
GB_GPU_DEVICE_CHECK (0) ; // zero if invalid GPU
GB_Global.gpu_properties[device].memory_resource = resource;
return( true);
}
void* GB_Global_gpu_device_memory_resource_get( int device )
{
GB_GPU_DEVICE_CHECK (0) ; // zero if invalid GPU
return ( GB_Global.gpu_properties [device].memory_resource ) ;
//NOTE: this returns a void*, needs to be cast to be used
}
bool GB_Global_gpu_device_properties_get (int device)
{
// get all properties of a specific GPU;
// this function is only called once per GPU, by GB_init.
GB_GPU_DEVICE_CHECK (false) ; // fail if invalid GPU
#if defined ( GBCUDA )
return (GB_cuda_get_device_properties (device,
&(GB_Global.gpu_properties [device]))) ;
#else
// if no GPUs exist, they cannot be queried
return (false) ;
#endif
}
//------------------------------------------------------------------------------
// timing: for code development only
//------------------------------------------------------------------------------
GB_PUBLIC
void GB_Global_timing_clear_all (void)
{
for (int k = 0 ; k < 40 ; k++)
{
GB_Global.timing [k] = 0 ;
}
}
GB_PUBLIC
void GB_Global_timing_clear (int k)
{
GB_Global.timing [k] = 0 ;
}
GB_PUBLIC
void GB_Global_timing_set (int k, double t)
{
GB_Global.timing [k] = t ;
}
GB_PUBLIC
void GB_Global_timing_add (int k, double t)
{
GB_Global.timing [k] += t ;
}
GB_PUBLIC
double GB_Global_timing_get (int k)
{
return (GB_Global.timing [k]) ;
}
//------------------------------------------------------------------------------
// free_pool: fast access to free memory blocks
//------------------------------------------------------------------------------
// each free block contains a pointer to the next free block. This requires
// the free block to be at least 8 bytes in size.
#define GB_NEXT(p) ((void **) p) [0]
// free_pool_init: initialize the free_pool
GB_PUBLIC
void GB_Global_free_pool_init (bool clear)
{
#ifdef _OPENMP
#pragma omp critical(GB_free_pool)
{
if (clear)
{
// clear the free pool
for (int k = 0 ; k < 64 ; k++)
{
GB_Global.free_pool [k] = NULL ;
GB_Global.free_pool_nblocks [k] = 0 ;
}
}
// set the default free_pool_limit
for (int k = 0 ; k < 64 ; k++)
{
GB_Global.free_pool_limit [k] = 0 ;
}
int64_t n = 16384 ;
for (int k = 3 ; k <= 8 ; k++)
{
GB_Global.free_pool_limit [k] = n ;
}
for (int k = 9 ; k <= 19 ; k++)
{
n = n/2 ;
GB_Global.free_pool_limit [k] = n ;
}
}
#else
// OpenMP not available: disable the free pool
for (int k = 0 ; k < 64 ; k++)
{
GB_Global.free_pool [k] = NULL ;
GB_Global.free_pool_nblocks [k] = 0 ;
}
#endif
}
#ifdef GB_DEBUG
// check if a block is valid
static inline void GB_Global_free_pool_check (void *p, int k, char *where)
{
// check the size of the block
ASSERT (k >= 3 && k < 64) ;
ASSERT (p != NULL) ;
size_t size = GB_Global_memtable_size (p) ;
ASSERT (size == ((size_t) 1) << k) ;
}
#endif
// free_pool_get: get a block from the free_pool, or return NULL if none
GB_PUBLIC
void *GB_Global_free_pool_get (int k)
{
#ifdef _OPENMP
void *p = NULL ;
ASSERT (k >= 3 && k < 64) ;
#pragma omp critical(GB_free_pool)
{
p = GB_Global.free_pool [k] ;
if (p != NULL)
{
// remove the block from the kth free_pool
GB_Global.free_pool_nblocks [k]-- ;
GB_Global.free_pool [k] = GB_NEXT (p) ;
}
}
if (p != NULL)
{
// clear the next pointer inside the block, since the block needs
// to be all zero
#ifdef GB_DEBUG
GB_Global_free_pool_check (p, k, "get") ;
#endif
}
return (p) ;
#else
// OpenMP not available: free pool not in use
return (NULL) ;
#endif
}
// free_pool_put: put a block in the free_pool, unless it is full
GB_PUBLIC
bool GB_Global_free_pool_put (void *p, int k)
{
#ifdef _OPENMP
#ifdef GB_DEBUG
GB_Global_free_pool_check (p, k, "put") ;
#endif
bool returned_to_pool = false ;
#pragma omp critical(GB_free_pool)
{
returned_to_pool =
(GB_Global.free_pool_nblocks [k] <
GB_Global.free_pool_limit [k]) ;
if (returned_to_pool)
{
// add the block to the head of the free_pool list
GB_Global.free_pool_nblocks [k]++ ;
GB_NEXT (p) = GB_Global.free_pool [k] ;
GB_Global.free_pool [k] = p ;
}
}
return (returned_to_pool) ;
#else
return (false) ;
#endif
}
// free_pool_dump: check the validity of the free_pool
GB_PUBLIC
void GB_Global_free_pool_dump (int pr)
{
#ifdef _OPENMP
#ifdef GB_DEBUG
bool fail = false ;
#pragma omp critical(GB_free_pool)
{
for (int k = 0 ; k < 64 && !fail ; k++)
{
int64_t nblocks = GB_Global.free_pool_nblocks [k] ;
int64_t limit = GB_Global.free_pool_limit [k] ;
if (nblocks != 0 && pr > 0)
{
printf ("pool %2d: " GBd " blocks, " GBd " limit\n",
k, nblocks, limit) ;
}
int64_t nblocks_actual = 0 ;
void *p = GB_Global.free_pool [k] ;
for ( ; p != NULL && !fail ; p = GB_NEXT (p))
{
if (pr > 1) printf (" %16p ", p) ;
size_t size = GB_Global_memtable_size (p) ;
if (pr > 1) printf ("size: %ld\n", size) ;
nblocks_actual++ ;
fail = fail || (size != ((size_t) 1) << k) ;
if (fail && pr > 0) printf (" fail\n") ;
fail = fail || (nblocks_actual > nblocks) ;
}
if (nblocks_actual != nblocks)
{
if (pr > 0) printf ("fail: # blocks " GBd " " GBd " \n",
nblocks_actual, nblocks) ;
fail = true ;
}
}
}
ASSERT (!fail) ;
#endif
#endif
}
// free_pool_limit_get: get the limit on the # of blocks in the kth pool
GB_PUBLIC
int64_t GB_Global_free_pool_limit_get (int k)
{
#ifdef _OPENMP
int64_t nblocks = 0 ;
if (k >= 3 && k < 64)
{
#pragma omp critical(GB_free_pool)
{
nblocks = GB_Global.free_pool_limit [k] ;
}
}
return (nblocks) ;
#else
return (0) ;
#endif
}
// free_pool_limit_set: set the limit on the # of blocks in the kth pool
GB_PUBLIC
void GB_Global_free_pool_limit_set (int k, int64_t nblocks)
{
if (k >= 3 && k < 64)
{
#ifdef _OPENMP
#pragma omp critical(GB_free_pool)
{
GB_Global.free_pool_limit [k] = nblocks ;
}
#else
{
GB_Global.free_pool_limit [k] = 0 ;
}
#endif
}
}
// free_pool_nblocks_total: total # of blocks in free_pool (for debug only)
GB_PUBLIC
int64_t GB_Global_free_pool_nblocks_total (void)
{
int64_t nblocks = 0 ;
#ifdef _OPENMP
#pragma omp critical(GB_free_pool)
{
for (int k = 0 ; k < 64 ; k++)
{
nblocks += GB_Global.free_pool_nblocks [k] ;
}
}
#endif
return (nblocks) ;
}
//------------------------------------------------------------------------------
// get_wtime: return current wallclock time
//------------------------------------------------------------------------------
GB_PUBLIC
double GB_Global_get_wtime (void)
{
return (GB_OPENMP_GET_WTIME) ;
}
|
csr_spmv.c | /*
* Sparse matrix-vector multiplication for matrices in the compressed
* sparse row (CSR) storage format.
*/
#include "mmio.h"
/* Include redundant execution header. */
#include "../../../include/ourRMTlib.h"
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
/**
* `csr_matrix_from_matrix_market()` converts a matrix in the
* coordinate (COO) format, that is used in the Matrix Market file
* format, to a sparse matrix in the compressed sparse row (CSR)
* storage format.
*/
int csr_matrix_from_matrix_market(
int num_rows,
int num_columns,
int num_nonzeros,
const int * unsorted_row_indices,
const int * unsorted_column_indices,
const double * unsorted_values,
int ** out_row_ptr,
int ** out_column_indices,
double ** out_values)
{
/* Allocate storage for row pointers. */
int * row_ptr = (int *) malloc((num_rows+1) * sizeof(int));
if (!row_ptr) {
fprintf(stderr, "%s(): %s\n", __FUNCTION__, strerror(errno));
return errno;
}
/* Allocate storage for the column indices of each non-zero. */
int * column_indices = (int *) malloc(num_nonzeros * sizeof(int));
if (!column_indices) {
fprintf(stderr, "%s(): %s\n", __FUNCTION__, strerror(errno));
free(row_ptr);
return errno;
}
/* Allocate storage for the value of each non-zero. */
double * values = (double *) malloc(num_nonzeros * sizeof(double));
if (!values) {
fprintf(stderr, "%s(): %s\n", __FUNCTION__, strerror(errno));
free(row_ptr);
free(column_indices);
return errno;
}
/* Initialise the allocated arrays with zeros. */
#pragma omp parallel for
for (int i = 0; i <= num_rows; i++)
row_ptr[i] = 0;
#pragma omp parallel for
for (int k = 0; k < num_nonzeros; k++) {
column_indices[k] = 0;
values[k] = 0;
}
/* Count the number of non-zeros in each row. */
for (int k = 0; k < num_nonzeros; k++)
row_ptr[unsorted_row_indices[k]+1]++;
for (int i = 1; i <= num_rows; i++)
row_ptr[i] += row_ptr[i-1];
/* Sort column indices and non-zero values by their rows. */
for (int k = 0; k < num_nonzeros; k++) {
int i = unsorted_row_indices[k];
column_indices[row_ptr[i]] = unsorted_column_indices[k];
values[row_ptr[i]] = unsorted_values[k];
row_ptr[i]++;
}
/* Adjust the row pointers after sorting. */
for (int i = num_rows; i > 0; i--)
row_ptr[i] = row_ptr[i-1];
row_ptr[0] = 0;
/*
* Sort the non-zeros within each row by their column indices.
* Here, a simple insertion sort algorithm is used.
*/
#pragma omp parallel for
for (int i = 0; i < num_rows; i++) {
int num_nonzeros = row_ptr[i+1] - row_ptr[i];
for (int k = 0; k < num_nonzeros; k++) {
int column_index = column_indices[row_ptr[i]+k];
double value = values[row_ptr[i]+k];
int j = k-1;
while (j >= 0 && column_indices[row_ptr[i]+j] > column_index) {
column_indices[row_ptr[i]+j+1] = column_indices[row_ptr[i]+j];
values[row_ptr[i]+j+1] = values[row_ptr[i]+j];
j--;
}
column_indices[row_ptr[i]+j+1] = column_index;
values[row_ptr[i]+j+1] = value;
}
}
*out_column_indices = column_indices;
*out_row_ptr = row_ptr;
*out_values = values;
return 0;
}
/**
* `csr_matrix_spmv()` computes the multiplication of a sparse matrix
* in the compressed sparse row (CSR) format with a dense vector,
* referred to as the source vector, to produce another dense vector,
* called the destination vector.
*/
int csr_matrix_spmv(
int num_rows,
int num_columns,
int num_nonzeros,
const int * row_ptr,
const int * column_indices,
const double * values,
const double * x,
double * y)
{
int check_threads=0;
#pragma omp parallel for
for (int i = 0; i < num_rows; i++) {
int ID = omp_get_thread_num();
//printf(" hello(%d) \n", ID);
if(ID==0 && check_threads==0){
int nthreads=omp_get_num_threads();
fprintf(stderr,"matrix_multiply: Number of threads = %d\n",nthreads);
check_threads=1;
}
double z = 0.0;
for (int k = row_ptr[i]; k < row_ptr[i+1]; k++)
z += values[k] * x[column_indices[k]];
y[i] += z;
}
return 0;
}
/**
* `timespec_duration()` is the duration, in seconds, elapsed between
* two given time points.
*/
static double timespec_duration(
struct timespec t0,
struct timespec t1)
{
return (t1.tv_sec - t0.tv_sec) +
(t1.tv_nsec - t0.tv_nsec) * 1e-9;
}
int main(int argc, char * argv[])
{
int err, check_threads=0;
int num_runs = 100;
if (argc < 3) {
fprintf(stderr, "Usage: %s FILE, repeat\n", argv[0]);
return EXIT_FAILURE;
}
num_runs = atoi(argv[2]);
start_timer();
/* Read a matrix from a file in the matrix market format. */
int num_rows;
int num_columns;
int num_nonzeros;
int * unsorted_row_indices;
int * unsorted_column_indices;
double * unsorted_values;
err = mm_read_unsymmetric_sparse(
argv[1], &num_rows, &num_columns, &num_nonzeros,
&unsorted_values, &unsorted_row_indices, &unsorted_column_indices);
if (err)
return EXIT_FAILURE;
/* Convert to a compressed sparse row format. */
int * row_ptr;
int * column_indices;
double * values;
err = csr_matrix_from_matrix_market(
num_rows, num_columns, num_nonzeros,
unsorted_row_indices, unsorted_column_indices, unsorted_values,
&row_ptr, &column_indices, &values);
if (err) {
free(unsorted_values);
free(unsorted_column_indices);
free(unsorted_row_indices);
return EXIT_FAILURE;
}
free(unsorted_values);
free(unsorted_column_indices);
free(unsorted_row_indices);
/* Generate some sparse vector to use as the source vector for a
* matrix-vector multiplication. */
double * x = (double *) malloc(num_columns * sizeof(double));
if (!x) {
fprintf(stderr, "%s(): %s\n", __FUNCTION__, strerror(errno));
free(values);
free(row_ptr);
free(column_indices);
return EXIT_FAILURE;
}
#pragma omp parallel for
for (int j = 0; j < num_columns; j++)
x[j] = 1.;
/* Allocate storage for a destination vector for a matrix-vector
* multiplication. */
double * y = (double *) malloc(num_rows * sizeof(double));
if (!y) {
fprintf(stderr, "%s(): %s\n", __FUNCTION__, strerror(errno));
free(x);
free(values);
free(row_ptr);
free(column_indices);
return EXIT_FAILURE;
}
#pragma omp parallel for
for (int i = 0; i < num_rows; i++){
y[i] = 0.;
}
fprintf(stdout, "num_rows:%d num_columns:%d num_nonzeros:%d Repeat:%d\n", num_rows,num_columns,num_nonzeros, num_runs);
//#pragma omp parallel
// {
/*
struct timespec t0, t1;
#pragma omp master
clock_gettime(CLOCK_MONOTONIC, &t0);*/
for (int i = 0; i < num_runs; i++) {
# ifdef ENABLE_RMT
activateRMT("f-L-i-i-i-1i-1i-1d-1d-1dC", &csr_matrix_spmv, 8, num_rows, num_columns, num_nonzeros,
row_ptr, (num_rows+1),
column_indices, num_nonzeros,
values, num_nonzeros,
x, num_columns,
y, num_rows);
# else
/* Compute the sparse matrix-vector multiplication. */
csr_matrix_spmv(
num_rows, num_columns, num_nonzeros,
row_ptr, column_indices, values, x, y);
# endif
}
/*
#pragma omp master
{
clock_gettime(CLOCK_MONOTONIC, &t1);
fprintf(stderr, "Time: %.3f\n",
timespec_duration(t0, t1) / num_runs);
}*/
//}
//#if 0
/* Write the results to standard output. */
for (int i = 0; i < num_rows-1; i++)
fprintf(stdout, "%12g\n", y[i]);
//#endif
end_timer();
free(y);
free(x);
free(values);
free(column_indices);
free(row_ptr);
return EXIT_SUCCESS;
}
|
GB_unaryop__minv_bool_bool.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_bool_bool
// op(A') function: GB_tran__minv_bool_bool
// C type: bool
// A type: bool
// cast: ;
// unaryop: cij = true
#define GB_ATYPE \
bool
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
;
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = true ;
// casting
#define GB_CASTING(z, x) \
; ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_bool_bool
(
bool *restrict Cx,
const bool *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_bool_bool
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
flags.c | #include <stdio.h>
int arr[100];
int nt =12;
int main()
{
#pragma omp target teams distribute parallel for num_threads(nt)
for (int i=0; i<100;i++)
arr[i] =i;
//Verify
int errors = 0;
for (int i=0; i<100;i++){
if(arr[i] != i)
errors++;
}
if(!errors){
fprintf(stderr, "Success\n");
return 0;
} else{
fprintf(stderr, "Failed\nErrors: %d\n", errors);
return 1;
}
}
|
train2.c | #define _GNU_SOURCE
#include <syscall.h>
#include <sched.h>
#include "graph.h"
#include "mainFunctions.h"
#include "powerperformacetracking.h"
#include "print.h"
#include <stdlib.h>
#include<unistd.h>
#define NO_OF_ARGS 2
#define REPEAT 25
long long iters[8];
struct timeval start, end;
// We define all additional paramemter here
void setaffinity() {
/* #pragma omp parallel
{
cpu_set_t newcpu;
int threadid = omp_get_thread_num();
CPU_ZERO(&newcpu);
CPU_SET ( threadid , &newcpu) ;
int __t = sched_setaffinity ( syscall ( SYS_gettid ) , sizeof ( newcpu ) , &newcpu ) ;
assert(__t == 0);
}
*/
}
void updateMultipleArrayPerIteration(graph *G, int id) {
printf("The update multiple Array %d \n", id);
node_t * G_member = (int*)malloc (G->numNodes * sizeof(int));
srand(0);
int i;
for(i = 0; i< G->numNodes; i++) {
G_member[i] = rand() % G->numNodes;
}
char title[50];
sprintf(title, "multiple_%d.csv",id);
gettimeofday(&start, NULL);
inittracking(title);
int tShared = 0;
for(int abc=0; abc < REPEAT; abc ++) {
#pragma omp parallel
{
int threadid = omp_get_thread_num();
// iters[threadid] = 0;
int t = 0;
#pragma omp for schedule(dynamic, 1024)
for (node_t u1 = 0; u1 < G->numNodes; u1 ++)
for (edge_t j_idx = G->begin[u1];j_idx < G->begin[u1+1] ; j_idx ++) {
// iters[threadid]++;
node_t j = G->node_idx [j_idx];
for (edge_t k_idx = G->begin[j];k_idx < G->begin[j+1] ; k_idx ++) {
node_t k = G_member[j];
if(k > G->numNodes/2) {
t++;
}
}
node_t j_comp = ( G->numNodes - (j+1));
for (edge_t k_idx = G->begin[j_comp];k_idx < G->begin[j_comp+1] ; k_idx ++) {
node_t k = G_member[j];
if(k > G ->numNodes/2) {
t++;
}
}
}
//printf("dummy %d \n",t);
//printf("The num iters thread id %d = %lld \n", threadid, iters[threadid]);
#pragma omp atomic
tShared += t;
}
tShared /= 1000;
}
printf("TSHared %d\n", tShared);
endtracking();
gettimeofday(&end, NULL);
printTiming(ALGO_KERNEL,((end.tv_sec - start.tv_sec)*1000 + ((double)(end.tv_usec - start.tv_usec))/1000));
free(G_member);
}
#define numTimes 7
int runalgo(int argc,char** argv) {
int i;
//setaffinity();
graph* G = readGraph(argv[1], argv[2]);
for(i = 0;i< numTimes; i++) {
printf("Run %d \n", i);
updateMultipleArrayPerIteration(G,i);
sleep(2);
}
return 0;
}
inline void kernel(graph *G) {
}
|
GB_unop__identity_fp64_int32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_fp64_int32
// op(A') function: GB_unop_tran__identity_fp64_int32
// C type: double
// A type: int32_t
// cast: double cij = (double) aij
// unaryop: cij = aij
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
double z = (double) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = (double) aij ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FP64 || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_fp64_int32
(
double *Cx, // Cx and Ax may be aliased
const int32_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (int32_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int32_t aij = Ax [p] ;
double z = (double) aij ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int32_t aij = Ax [p] ;
double z = (double) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_fp64_int32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
fib.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
#include <unistd.h>
#define MAX 33 //41
int Fibonacci(int n)
{ int x, y;
if (n < 2)
return n;
else {
x = Fibonacci(n - 1);
y = Fibonacci(n - 2);
return (x + y);
} }
int FibonacciTask(int n)
{ int x, y;
if (n < 2)
return n;
else {
#pragma omp task shared(x)
x = Fibonacci(n - 1);
#pragma omp task shared(y)
y = Fibonacci(n - 2);
#pragma omp taskwait
return (x + y);
} }
int main(int argc, char * argv[])
{int FibNumber[MAX] = {0};
struct timeval time_start, time_end;
int i = 0;
// openmp related print message
printf("CPU_ONLN= %d\n", sysconf(_SC_NPROCESSORS_ONLN));
printf("Number of CPUs=%d\n", omp_get_num_procs());
printf("Number of max threads=%d\n", omp_get_max_threads());
printf("Number of executing thread=%d\n", omp_get_thread_num());
printf("Number of threads=%d\n", omp_get_num_threads());
omp_set_num_threads( omp_get_num_procs() );
gettimeofday(&time_start, NULL);
#pragma omp parallel
{
#pragma omp single private(i)
for(i = 1; i < MAX; i++) {
FibNumber[i] = FibonacciTask(i);
} }
gettimeofday(&time_end, NULL);
time_end.tv_usec = time_end.tv_usec-time_start.tv_usec;
time_end.tv_sec = time_end.tv_sec-time_start.tv_sec;
time_end.tv_usec += (time_end.tv_sec*1000000);
printf("Execution time of The Fibonacci Numbers with OpenMP : %lf sec\n", time_end.tv_usec / 1000000.0);
for(i = 0; i < MAX; i++)
printf("%d ", FibNumber[i]);
printf("\n-------------------------------\n");
return 0;
}
|
utils.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file utils.h
* \brief Basic utilility functions.
*/
#ifndef MXNET_COMMON_UTILS_H_
#define MXNET_COMMON_UTILS_H_
#include <dmlc/logging.h>
#include <dmlc/omp.h>
#include <nnvm/graph.h>
#include <nnvm/node.h>
#include <mxnet/imperative.h>
#include <mxnet/engine.h>
#include <mxnet/ndarray.h>
#include <mxnet/storage.h>
#include <mxnet/op_attr_types.h>
#include <mxnet/graph_attr_types.h>
#include <nnvm/graph_attr_types.h>
#include <memory>
#include <vector>
#include <type_traits>
#include <utility>
#include <random>
#include <string>
#include <thread>
#include <algorithm>
#include <functional>
#include <limits>
#include "../operator/mxnet_op.h"
#if MXNET_USE_ONEDNN == 1
#include "../operator/nn/dnnl/dnnl_base-inl.h"
#endif
#if defined(_WIN32) || defined(_WIN64) || defined(__WINDOWS__)
#include <windows.h>
#else
#include <unistd.h>
#endif
namespace mxnet {
namespace common {
#if defined(_WIN32) || defined(_WIN64) || defined(__WINDOWS__)
inline size_t current_process_id() {
return ::GetCurrentProcessId();
}
#else
inline size_t current_process_id() {
return getpid();
}
#endif
/*!
* \brief IndPtr should be non-negative, in non-decreasing order, start with 0
* and end with value equal with size of indices.
*/
struct csr_indptr_check {
template <typename DType, typename IType>
MSHADOW_XINLINE static void Map(int i,
DType* out,
const IType* indptr,
const nnvm::dim_t end,
const nnvm::dim_t idx_size) {
if (indptr[i + 1] < 0 || indptr[i + 1] < indptr[i] || (i == 0 && indptr[i] != 0) ||
(i == end - 1 && indptr[end] != idx_size))
*out = kCSRIndPtrErr;
}
};
/*!
* \brief Indices should be non-negative, less than the number of columns
* and in ascending order per row.
*/
struct csr_idx_check {
template <typename DType, typename IType, typename RType>
MSHADOW_XINLINE static void Map(int i,
DType* out,
const IType* idx,
const RType* indptr,
const nnvm::dim_t ncols) {
for (RType j = indptr[i]; j < indptr[i + 1]; j++) {
if (idx[j] >= ncols || idx[j] < 0 || (j < indptr[i + 1] - 1 && idx[j] >= idx[j + 1])) {
*out = kCSRIdxErr;
break;
}
}
}
};
/*!
* \brief Indices of RSPNDArray should be non-negative,
* less than the size of first dimension and in ascending order
*/
struct rsp_idx_check {
template <typename DType, typename IType>
MSHADOW_XINLINE static void Map(int i,
DType* out,
const IType* idx,
const nnvm::dim_t end,
const nnvm::dim_t nrows) {
if ((i < end && idx[i + 1] <= idx[i]) || idx[i] < 0 || idx[i] >= nrows)
*out = kRSPIdxErr;
}
};
template <typename xpu>
void CheckFormatWrapper(const RunContext& rctx,
const NDArray& input,
const TBlob& err_cpu,
const bool full_check);
/*!
* \brief Check the validity of CSRNDArray.
* \param rctx Execution context.
* \param input Input NDArray of CSRStorage.
* \param err_cpu Error number on cpu.
* \param full_check If true, rigorous check, O(N) operations,
* otherwise basic check, O(1) operations.
*/
template <typename xpu>
void CheckFormatCSRImpl(const RunContext& rctx,
const NDArray& input,
const TBlob& err_cpu,
const bool full_check) {
using namespace op::mxnet_op;
CHECK_EQ(input.storage_type(), kCSRStorage) << "CheckFormatCSRImpl is for CSRNDArray";
const mxnet::TShape shape = input.shape();
const mxnet::TShape idx_shape = input.aux_shape(csr::kIdx);
const mxnet::TShape indptr_shape = input.aux_shape(csr::kIndPtr);
const mxnet::TShape storage_shape = input.storage_shape();
if ((shape.ndim() != 2) ||
(idx_shape.ndim() != 1 || indptr_shape.ndim() != 1 || storage_shape.ndim() != 1) ||
(indptr_shape[0] != shape[0] + 1) || (idx_shape[0] != storage_shape[0])) {
MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, {
DType* err = err_cpu.dptr<DType>();
*err = kCSRShapeErr;
});
return;
}
if (full_check) {
MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, {
MSHADOW_IDX_TYPE_SWITCH(input.aux_type(csr::kIndPtr), RType, {
MSHADOW_IDX_TYPE_SWITCH(input.aux_type(csr::kIdx), IType, {
mshadow::Stream<xpu>* s = rctx.get_stream<xpu>();
NDArray ret_xpu = NDArray(mshadow::Shape1(1), rctx.get_ctx(), false, err_cpu.type_flag_);
TBlob val_xpu = ret_xpu.data();
Kernel<set_to_int<kNormalErr>, xpu>::Launch(s, val_xpu.Size(), val_xpu.dptr<DType>());
Kernel<csr_indptr_check, xpu>::Launch(s,
indptr_shape[0] - 1,
val_xpu.dptr<DType>(),
input.aux_data(csr::kIndPtr).dptr<RType>(),
indptr_shape[0] - 1,
idx_shape[0]);
// no need to check indices if indices are empty
if (idx_shape[0] != 0) {
Kernel<csr_idx_check, xpu>::Launch(s,
indptr_shape[0] - 1,
val_xpu.dptr<DType>(),
input.aux_data(csr::kIdx).dptr<IType>(),
input.aux_data(csr::kIndPtr).dptr<RType>(),
shape[1]);
}
mshadow::Copy(err_cpu.get<cpu, 1, DType>(), val_xpu.get<xpu, 1, DType>(s), s);
});
});
});
}
}
/*!
* \brief Check the validity of RowSparseNDArray.
* \param rctx Execution context.
* \param input Input NDArray of RowSparseStorage.
* \param err_cpu Error number on cpu.
* \param full_check If true, rigorous check, O(N) operations,
* otherwise basic check, O(1) operations.
*/
template <typename xpu>
void CheckFormatRSPImpl(const RunContext& rctx,
const NDArray& input,
const TBlob& err_cpu,
const bool full_check) {
using namespace op::mxnet_op;
CHECK_EQ(input.storage_type(), kRowSparseStorage) << "CheckFormatRSPImpl is for RSPNDArray";
const mxnet::TShape idx_shape = input.aux_shape(rowsparse::kIdx);
if (idx_shape[0] != input.storage_shape()[0]) {
MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, {
DType* err = err_cpu.dptr<DType>();
*err = kRSPShapeErr;
});
return;
}
if (idx_shape[0] == 0) {
return;
}
if (full_check) {
MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, {
MSHADOW_IDX_TYPE_SWITCH(input.aux_type(rowsparse::kIdx), IType, {
mshadow::Stream<xpu>* s = rctx.get_stream<xpu>();
NDArray ret_xpu = NDArray(mshadow::Shape1(1), rctx.get_ctx(), false, err_cpu.type_flag_);
TBlob val_xpu = ret_xpu.data();
Kernel<set_to_int<kNormalErr>, xpu>::Launch(s, val_xpu.Size(), val_xpu.dptr<DType>());
Kernel<rsp_idx_check, xpu>::Launch(s,
idx_shape[0],
val_xpu.dptr<DType>(),
input.aux_data(rowsparse::kIdx).dptr<IType>(),
idx_shape[0] - 1,
input.shape()[0]);
mshadow::Copy(err_cpu.get<cpu, 1, DType>(), val_xpu.get<xpu, 1, DType>(s), s);
});
});
}
}
template <typename xpu>
void CheckFormatImpl(const RunContext& rctx,
const NDArray& input,
const TBlob& err_cpu,
const bool full_check) {
int stype = input.storage_type();
if (stype == kCSRStorage) {
CheckFormatCSRImpl<xpu>(rctx, input, err_cpu, full_check);
} else if (stype == kRowSparseStorage) {
CheckFormatRSPImpl<xpu>(rctx, input, err_cpu, full_check);
} else if (stype == kDefaultStorage) {
// no-op for default storage
} else {
LOG(FATAL) << "Unknown storage type " << stype;
}
}
/*! \brief Pick rows specified by user input index array from a row sparse ndarray
* and save them in the output sparse ndarray.
*/
template <typename xpu>
void SparseRetainOpForwardRspWrapper(mshadow::Stream<xpu>* s,
const NDArray& input_nd,
const TBlob& idx_data,
const OpReqType req,
NDArray* output_nd);
/* \brief Casts tensor storage type to the new type.
*/
template <typename xpu>
void CastStorageDispatch(const OpContext& ctx, const NDArray& input, const NDArray& output);
/*! \brief returns true if all storage types in `vstorage` are the same as target `stype`.
* false is returned for empty inputs.
*/
inline bool ContainsOnlyStorage(const StorageTypeVector& vstorage, const NDArrayStorageType stype) {
if (!vstorage.empty()) {
for (const auto& i : vstorage) {
if (i != stype)
return false;
}
return true;
}
return false;
}
/*! \brief returns true if all storage types in `vstorage` are the same as target `stype1`
* or `stype2'. Sets boolean if both found.
* false is returned for empty inputs.
*/
inline bool ContainsOnlyStorage(const StorageTypeVector& vstorage,
const NDArrayStorageType stype1,
const NDArrayStorageType stype2,
bool* has_both) {
if (has_both) {
*has_both = false;
}
if (!vstorage.empty()) {
uint8_t has = 0;
for (const auto i : vstorage) {
if (i == stype1) {
has |= 1;
} else if (i == stype2) {
has |= 2;
} else {
return false;
}
}
if (has_both) {
*has_both = has == 3;
}
return true;
}
return false;
}
/*! \brief returns true if the storage types of arrays in `ndarrays`
* are the same as target `stype`. false is returned for empty inputs.
*/
inline bool ContainsOnlyStorage(const std::vector<NDArray>& ndarrays,
const NDArrayStorageType stype) {
if (!ndarrays.empty()) {
for (const auto& nd : ndarrays) {
if (nd.storage_type() != stype) {
return false;
}
}
return true;
}
return false;
}
/*! \brief returns true if the storage types of arrays in `ndarrays`
* are the same as targets `stype1` or `stype2`. false is returned for empty inputs.
*/
inline bool ContainsOnlyStorage(const std::vector<NDArray>& ndarrays,
const NDArrayStorageType stype1,
const NDArrayStorageType stype2,
bool* has_both) {
if (has_both) {
*has_both = false;
}
if (!ndarrays.empty()) {
uint8_t has = 0;
for (const auto& nd : ndarrays) {
const NDArrayStorageType stype = nd.storage_type();
if (stype == stype1) {
has |= 1;
} else if (stype == stype2) {
has |= 2;
} else {
return false;
}
}
if (has_both) {
*has_both = has == 3;
}
return true;
}
return false;
}
/*! \brief returns true if storage type of any array in `ndarrays`
* is the same as the target `stype`. false is returned for empty inputs.
*/
inline bool ContainsStorageType(const std::vector<NDArray>& ndarrays,
const NDArrayStorageType stype) {
if (!ndarrays.empty()) {
for (const auto& nd : ndarrays) {
if (nd.storage_type() == stype) {
return true;
}
}
}
return false;
}
/*! \brief returns true if any storage type `ndstype` in `ndstypes`
* is the same as the target `stype`. false is returned for empty inputs.
*/
inline bool ContainsStorageType(const std::vector<int>& ndstypes, const NDArrayStorageType stype) {
if (!ndstypes.empty()) {
for (const auto& ndstype : ndstypes) {
if (ndstype == stype) {
return true;
}
}
}
return false;
}
/*! \brief get string representation of dispatch_mode */
inline std::string dispatch_mode_string(const DispatchMode x) {
switch (x) {
case DispatchMode::kFCompute:
return "fcompute";
case DispatchMode::kFComputeEx:
return "fcompute_ex";
case DispatchMode::kFComputeFallback:
return "fcompute_fallback";
case DispatchMode::kVariable:
return "variable";
case DispatchMode::kUndefined:
return "undefined";
}
return "unknown";
}
/*! \brief get string representation of storage_type */
inline std::string stype_string(const int x) {
switch (x) {
case kDefaultStorage:
return "default";
case kCSRStorage:
return "csr";
case kRowSparseStorage:
return "row_sparse";
}
return "unknown";
}
/*! \brief get string representation of device type */
inline std::string dev_type_string(const int dev_type) {
switch (dev_type) {
case Context::kCPU:
return "cpu";
case Context::kGPU:
return "gpu";
case Context::kCPUPinned:
return "cpu_pinned";
case Context::kCPUShared:
return "cpu_shared";
}
return "unknown";
}
inline std::string attr_value_string(const nnvm::NodeAttrs& attrs,
const std::string& attr_name,
std::string default_val = "") {
if (attrs.dict.find(attr_name) == attrs.dict.end()) {
return default_val;
}
return attrs.dict.at(attr_name);
}
/*! \brief get string representation of the operator stypes */
inline std::string operator_stype_string(const nnvm::NodeAttrs& attrs,
const int dev_mask,
const std::vector<int>& in_attrs,
const std::vector<int>& out_attrs) {
std::ostringstream os;
os << "operator = " << attrs.op->name << "\ninput storage types = [";
for (const int attr : in_attrs) {
os << stype_string(attr) << ", ";
}
os << "]\n"
<< "output storage types = [";
for (const int attr : out_attrs) {
os << stype_string(attr) << ", ";
}
os << "]\n"
<< "params = {";
for (auto kv : attrs.dict) {
os << "\"" << kv.first << "\" : " << kv.second << ", ";
}
os << "}\n"
<< "context.dev_mask = " << dev_type_string(dev_mask);
return os.str();
}
/*! \brief get string representation of the operator */
inline std::string operator_string(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs) {
std::string result = "";
std::vector<int> in_stypes;
std::vector<int> out_stypes;
in_stypes.reserve(inputs.size());
out_stypes.reserve(outputs.size());
auto xform = [](const NDArray arr) -> int { return arr.storage_type(); };
std::transform(inputs.begin(), inputs.end(), std::back_inserter(in_stypes), xform);
std::transform(outputs.begin(), outputs.end(), std::back_inserter(out_stypes), xform);
result += operator_stype_string(attrs, ctx.run_ctx.ctx.dev_mask(), in_stypes, out_stypes);
return result;
}
/*! \brief log message once. Intended for storage fallback warning messages. */
inline void LogOnce(const std::string& message) {
typedef dmlc::ThreadLocalStore<std::unordered_set<std::string>> LogStore;
auto log_store = LogStore::Get();
if (log_store->find(message) == log_store->end()) {
LOG(INFO) << message;
log_store->insert(message);
}
}
/*! \brief log storage fallback event
*/
inline void LogStorageFallback(const nnvm::NodeAttrs& attrs,
const int dev_mask,
const std::vector<int>* in_attrs,
const std::vector<int>* out_attrs) {
static bool log = dmlc::GetEnv("MXNET_STORAGE_FALLBACK_LOG_VERBOSE", true);
if (!log)
return;
const std::string op_str = operator_stype_string(attrs, dev_mask, *in_attrs, *out_attrs);
std::ostringstream os;
const char* warning =
"\nThe operator with default storage type will be dispatched "
"for execution. You're seeing this warning message because the operator above is unable "
"to process the given ndarrays with specified storage types, context and parameter. "
"Temporary dense ndarrays are generated in order to execute the operator. "
"This does not affect the correctness of the programme. "
"You can set environment variable MXNET_STORAGE_FALLBACK_LOG_VERBOSE to "
"0 to suppress this warning.";
os << "\nStorage type fallback detected:\n" << op_str << warning;
LogOnce(os.str());
#if MXNET_USE_ONEDNN == 1
if (!DNNLEnvSet())
common::LogOnce(
"MXNET_ONEDNN_ENABLED flag is off. "
"You can re-enable by setting MXNET_ONEDNN_ENABLED=1");
if (GetDNNLCacheSize() != -1)
common::LogOnce(
"MXNET_ONEDNN_CACHE_NUM is set."
"Should only be set if "
"your model has variable input shapes, "
"as cache size may grow unbounded");
#endif
}
// heuristic to dermine number of threads per GPU
inline int GetNumThreadsPerGPU() {
// This is resource efficient option.
return dmlc::GetEnv("MXNET_GPU_WORKER_NTHREADS", 2);
}
// heuristic to get number of matching colors.
// this decides how much parallelism we can get in each GPU.
inline int GetExecNumMatchColor() {
// This is resource efficient option.
int num_match_color = dmlc::GetEnv("MXNET_EXEC_NUM_TEMP", 1);
return std::min(num_match_color, GetNumThreadsPerGPU());
}
template <typename T, typename V>
V ParallelAccumulate(const T* a, const int n, V start) {
V sum = start;
#pragma omp parallel for reduction(+ : sum)
for (int i = 0; i < n; ++i) {
sum += a[i];
}
return sum;
}
/*!
* \brief
* Helper function for ParallelSort.
* DO NOT call this function directly.
* Use the interface ParallelSort instead.
* Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h
*/
template <typename RandomIt, typename Compare>
void ParallelSortHelper(RandomIt first, size_t len, size_t grainsize, const Compare& comp) {
if (len < grainsize) {
std::sort(first, first + len, comp);
} else {
std::thread thr(ParallelSortHelper<RandomIt, Compare>, first, len / 2, grainsize, comp);
ParallelSortHelper(first + len / 2, len - len / 2, grainsize, comp);
thr.join();
std::inplace_merge(first, first + len / 2, first + len, comp);
}
}
/*!
* \brief
* Sort the elements in the range [first, last) into the ascending order defined by
* the comparator comp.
* If the length of the range [first, last) is greater than a certain threshold,
* the range will be recursively divided into two and assign two threads
* to sort each half range.
* Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h
*/
template <typename RandomIt, typename Compare>
void ParallelSort(RandomIt first, RandomIt last, size_t num_threads, Compare comp) {
const auto num = std::distance(first, last);
size_t grainsize = std::max(num / num_threads + 5, static_cast<size_t>(1024 * 16));
ParallelSortHelper(first, num, grainsize, comp);
}
/*!
* \brief
* Sort the elements in the range [first, last) into ascending order.
* The elements are compared using the default < operator.
* If the length of the range [first, last) is greater than a certain threshold,
* the range will be recursively divided into two and assign two threads
* to sort each half range.
* Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h
*/
template <typename RandomIt>
void ParallelSort(RandomIt first, RandomIt last, size_t num_threads) {
ParallelSort(
first, last, num_threads, std::less<typename std::iterator_traits<RandomIt>::value_type>());
}
/*!
* \brief Random Engine
*/
typedef std::mt19937 RANDOM_ENGINE;
/*!
* \brief Helper functions.
*/
namespace helper {
/*!
* \brief Helper for non-array type `T`.
*/
template <class T>
struct UniqueIf {
/*!
* \brief Type of `T`.
*/
using SingleObject = std::unique_ptr<T>;
};
/*!
* \brief Helper for an array of unknown bound `T`.
*/
template <class T>
struct UniqueIf<T[]> {
/*!
* \brief Type of `T`.
*/
using UnknownBound = std::unique_ptr<T[]>;
};
/*!
* \brief Helper for an array of known bound `T`.
*/
template <class T, size_t kSize>
struct UniqueIf<T[kSize]> {
/*!
* \brief Type of `T`.
*/
using KnownBound = void;
};
} // namespace helper
/*!
* \brief Constructs an object of type `T` and wraps it in a
* `std``::``unique_ptr`.
* \param args List of arguments with which an instance of `T` will be
* constructed.
* \return `std``::``unique_ptr` of an instance of type `T`.
*
* Constructs a non-array type `T`. The arguments `args` are passed to the
* constructor of `T`. The function does not participate in the overload
* resolution if `T` is an array type.
*/
template <class T, class... Args>
typename helper::UniqueIf<T>::SingleObject MakeUnique(Args&&... args) {
return std::unique_ptr<T>(new T(std::forward<Args>(args)...));
}
/*!
* \brief Constructs an object of type `T` and wraps it in a
* `std``::``unique_ptr`.
* \param n The size of the array to construct.
* \return `std``::``unique_ptr` of an instance of type `T`.
*
* Constructs an array of unknown bound `T`. The function does not participate
* in the overload resolution unless `T` is an array of unknown bound.
*/
template <class T>
typename helper::UniqueIf<T>::UnknownBound MakeUnique(size_t n) {
using U = typename std::remove_extent<T>::type;
return std::unique_ptr<T>(new U[n]{});
}
/*!
* \brief Constructs an object of type `T` and wraps it in a
* `std``::``unique_ptr`.
* \param args List of arguments with which an instance of `T` will be
* constructed.
*
* Constructs an arrays of known bound is disallowed.
*/
template <class T, class... Args>
typename helper::UniqueIf<T>::KnownBound MakeUnique(Args&&... args) = delete;
template <typename FCompType>
FCompType GetFCompute(const nnvm::Op* op, const std::string& name, const Context& ctx) {
static auto& fcompute_cpu = nnvm::Op::GetAttr<FCompType>(name + "<cpu>");
static auto& fcompute_gpu = nnvm::Op::GetAttr<FCompType>(name + "<gpu>");
if (ctx.dev_mask() == cpu::kDevMask) {
return fcompute_cpu.get(op, nullptr);
} else if (ctx.dev_mask() == gpu::kDevMask) {
return fcompute_gpu.get(op, nullptr);
} else {
LOG(FATAL) << "Unknown device mask " << ctx.dev_mask();
return nullptr;
}
}
/*!
* \brief Return the max integer value representable in the type `T` without loss of precision.
*/
template <typename T>
constexpr size_t MaxIntegerValue() {
return std::is_integral<T>::value ? std::numeric_limits<T>::max()
: size_t(2) << (std::numeric_limits<T>::digits - 1);
}
template <>
constexpr size_t MaxIntegerValue<mshadow::half::half_t>() {
return size_t(2) << 10;
}
template <>
constexpr size_t MaxIntegerValue<mshadow::bfloat::bf16_t>() {
return size_t(2) << 14;
}
MSHADOW_XINLINE int ilog2ul(size_t a) {
int k = 1;
while (a >>= 1)
++k;
return k;
}
MSHADOW_XINLINE int ilog2ui(unsigned int a) {
int k = 1;
while (a >>= 1)
++k;
return k;
}
/*!
* \brief Return an NDArray of all zeros.
*/
inline NDArray InitZeros(const NDArrayStorageType stype,
const mxnet::TShape& shape,
const Context& ctx,
const int dtype) {
// NDArray with default storage
if (stype == kDefaultStorage) {
NDArray ret(shape, ctx, false, dtype);
ret = 0;
return ret;
}
// NDArray with non-default storage. Storage allocation is always delayed.
return NDArray(stype, shape, ctx, true, dtype);
}
/*!
* \brief Helper to add a NDArray of zeros to a std::vector.
*/
inline void EmplaceBackZeros(const NDArrayStorageType stype,
const mxnet::TShape& shape,
const Context& ctx,
const int dtype,
std::vector<NDArray>* vec) {
// NDArray with default storage
if (stype == kDefaultStorage) {
vec->emplace_back(shape, ctx, false, dtype);
vec->back() = 0;
} else {
// NDArray with non-default storage. Storage allocation is always delayed.
vec->emplace_back(stype, shape, ctx, true, dtype);
}
}
/*!
* \brief parallelize copy by OpenMP.
*/
template <typename DType>
inline void ParallelCopy(DType* dst, const DType* src, index_t size) {
static index_t copy_block_size = dmlc::GetEnv("MXNET_CPU_PARALLEL_SIZE", 200000);
if (size >= copy_block_size) {
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (index_t i = 0; i < size; ++i) {
dst[i] = src[i];
}
} else {
#pragma GCC diagnostic push
#if __GNUC__ >= 8
#pragma GCC diagnostic ignored "-Wclass-memaccess"
#endif
std::memcpy(dst, src, sizeof(DType) * size);
#pragma GCC diagnostic pop
}
}
/*!
* \breif parallelize add by OpenMP
*/
template <typename DType>
inline void ParallelAdd(DType* dst, const DType* src, index_t size) {
static index_t add_block_size = dmlc::GetEnv("MXNET_CPU_PARALLEL_SIZE", 200000);
if (size >= add_block_size) {
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (index_t i = 0; i < size; ++i) {
dst[i] += src[i];
}
} else {
for (index_t i = 0; i < size; ++i) {
dst[i] += src[i];
}
}
}
/*!
* \brief If numpy compatibility is turned off (default), the shapes passed in
* by users follow the legacy shape definition:
* 1. 0 ndim means the shape is completely unknown.
* 2. 0 dim size means the dim size is unknown.
* We need to convert those shapes to use the numpy shape definition:
* 1. 0 ndim means it's a scalar tensor.
* 2. -1 ndim means the shape is unknown.
* 3. 0 dim size means no elements in that dimension.
* 4. -1 dim size means the dimension's size is unknown.
* so that operator's infer shape function can work in backend.
* \param shape to be converted.
* Note: It is possible that the shape to be converted is already
* numpy compatible. For example, when a subgraph operator's infer
* shape function is called from the infer shape pass of the whole
* graph, its input/output shapes have been converted to numpy
* compatible shapes.
*/
inline void ConvertToNumpyShape(mxnet::TShape* shape) {
if (shape->ndim() == 0) { // legacy shape ndim = 0 means unknown
*shape = mxnet::TShape(); // unknown shape ndim = -1
} else {
for (int j = 0; j < shape->ndim(); ++j) {
if ((*shape)[j] == 0) { // legacy shape dim_size = 0 means unknown
(*shape)[j] = -1; // unknown dim size = -1
}
}
}
}
inline void ConvertToNumpyShape(mxnet::ShapeVector* shapes) {
for (size_t i = 0; i < shapes->size(); ++i) {
ConvertToNumpyShape(&(shapes->at(i)));
}
}
/*!
* \brief This is function is used to convert shapes returned by
* the infer shape functions/pass to the legacy shape definition.
*/
inline void ConvertToLegacyShape(mxnet::TShape* shape) {
if (!mxnet::ndim_is_known(*shape)) {
*shape = mxnet::TShape(0, -1);
} else {
for (int j = 0; j < shape->ndim(); ++j) {
if (!mxnet::dim_size_is_known(*shape, j)) {
(*shape)[j] = 0;
}
}
}
}
inline void ConvertToLegacyShape(mxnet::ShapeVector* shapes) {
for (size_t i = 0; i < shapes->size(); ++i) {
ConvertToLegacyShape(&(shapes->at(i)));
}
}
void ExecuteMonInputCallback(
const nnvm::IndexedGraph& idx,
const std::vector<NDArray*>& state_arrays,
size_t nid,
const std::function<void(const char*, const char*, void*)>& monitor_callback);
void ExecuteMonOutputCallback(
const nnvm::IndexedGraph& idx,
const std::vector<NDArray*>& state_arrays,
size_t nid,
const std::function<void(const char*, const char*, void*)>& monitor_callback);
inline mxnet::TShape CanonicalizeAxes(const mxnet::TShape& src) {
// convert negative axes to positive values
const int ndim = src.ndim();
mxnet::TShape axes = src;
for (int i = 0; i < ndim; ++i) {
if (axes[i] < 0) {
axes[i] += ndim;
}
CHECK(axes[i] >= 0 && axes[i] < ndim)
<< "axes[" << i << "]=" << axes[i] << " exceeds the range [" << 0 << ", " << ndim << ")";
}
return axes;
}
inline bool is_float(const int dtype) {
return dtype == mshadow::kFloat32 || dtype == mshadow::kFloat64 || dtype == mshadow::kFloat16;
}
inline bool is_int(const int dtype) {
return dtype == mshadow::kUint8 || dtype == mshadow::kInt8 || dtype == mshadow::kInt32 ||
dtype == mshadow::kInt64;
}
inline int get_more_precise_type(const int type1, const int type2) {
if (type1 == type2)
return type1;
if (is_float(type1) && is_float(type2)) {
if (type1 == mshadow::kFloat64 || type2 == mshadow::kFloat64) {
return mshadow::kFloat64;
}
if (type1 == mshadow::kFloat32 || type2 == mshadow::kFloat32) {
return mshadow::kFloat32;
}
return mshadow::kFloat16;
} else if (is_float(type1) || is_float(type2)) {
return is_float(type1) ? type1 : type2;
}
if (type1 == mshadow::kInt64 || type2 == mshadow::kInt64) {
return mshadow::kInt64;
}
if (type1 == mshadow::kInt32 || type2 == mshadow::kInt32) {
return mshadow::kInt32;
}
CHECK(!((type1 == mshadow::kUint8 && type2 == mshadow::kInt8) ||
(type1 == mshadow::kInt8 && type2 == mshadow::kUint8)))
<< "1 is UInt8 and 1 is Int8 should not get here";
if (type1 == mshadow::kUint8 || type2 == mshadow::kUint8) {
return mshadow::kUint8;
}
return mshadow::kInt8;
}
inline int np_binary_out_infer_type(const int type1, const int type2) {
if ((type1 == mshadow::kUint8 && type2 == mshadow::kInt8) ||
(type1 == mshadow::kInt8 && type2 == mshadow::kUint8)) {
return mshadow::kInt32;
}
return get_more_precise_type(type1, type2);
}
inline const std::string NodeAttrsGetProfilerScope(const nnvm::NodeAttrs& attrs) {
// obtain the profiler scope name, if assigned previously
std::string profiler_scope = MXNET_STORAGE_DEFAULT_PROFILER_SCOPE_CSTR;
const std::unordered_map<std::string, std::string>& node_attrs_dict = attrs.dict;
const std::unordered_map<std::string, std::string>::const_iterator profiler_scope_iter =
node_attrs_dict.find("__profiler_scope__");
if (profiler_scope_iter != node_attrs_dict.end()) {
profiler_scope = profiler_scope_iter->second;
}
return profiler_scope;
}
inline int GetDefaultDtype() {
return Imperative::Get()->is_np_default_dtype() ? mshadow::kFloat64 : mshadow::kFloat32;
}
inline int GetDefaultDtype(int dtype) {
if (dtype != -1)
return dtype;
return Imperative::Get()->is_np_default_dtype() ? mshadow::kFloat64 : mshadow::kFloat32;
}
struct MShadowTypeInfo {
std::string name;
int size;
int acc_size;
MShadowTypeInfo(const std::string name, const int size, const int acc_size)
: name(std::move(name)), size(size), acc_size(acc_size) {}
MShadowTypeInfo(const std::string name, const int size) : MShadowTypeInfo(name, size, size) {}
};
MShadowTypeInfo mshadow_type_info(const int type_flag);
inline bool AlignedMemAlloc(void** ptr, size_t size, size_t alignment) {
#if _MSC_VER
*ptr = _aligned_malloc(size, alignment);
if (*ptr == nullptr)
return false;
#else
int res = posix_memalign(ptr, alignment, size);
if (res != 0)
return false;
#endif
return true;
}
inline void AlignedMemFree(void* ptr) {
#if _MSC_VER
_aligned_free(ptr);
#else
free(ptr);
#endif
}
inline index_t div_round(const index_t a, const index_t b) {
return (a + b - 1) / b;
}
inline bool IsPower2(size_t N) {
return ((N & (N - 1)) == 0) && N != 0;
}
inline size_t RoundToPower2(size_t N) {
size_t ret = 1;
size_t copyN = N;
while (N >= 2) {
ret *= 2;
N /= 2;
}
if (ret < copyN) {
ret *= 2;
}
return ret;
}
} // namespace common
} // namespace mxnet
#endif // MXNET_COMMON_UTILS_H_
|
GB_binop.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB)
// A.*B function (eWiseMult): GB (_AemultB_08)
// A.*B function (eWiseMult): GB (_AemultB_02)
// A.*B function (eWiseMult): GB (_AemultB_04)
// A.*B function (eWiseMult): GB (_AemultB_bitmap)
// A*D function (colscale): GB (_AxD)
// D*A function (rowscale): GB (_DxB)
// C+=B function (dense accum): GB (_Cdense_accumB)
// C+=b function (dense accum): GB (_Cdense_accumb)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum)
// C=scalar+B GB (_bind1st)
// C=scalar+B' GB (_bind1st_tran)
// C=A+scalar GB (_bind2nd)
// C=A'+scalar GB (_bind2nd_tran)
// C type: GB_ctype
// A type: GB_atype
// A pattern? GB_a_is_pattern
// B type: GB_btype
// B pattern? GB_b_is_pattern
// BinaryOp: GB_binaryop(cij,aij,bij,i,j)
#define GB_ATYPE \
GB_atype
#define GB_BTYPE \
GB_btype
#define GB_CTYPE \
GB_ctype
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
GB_atype_is_btype
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
GB_ctype_is_atype
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
GB_ctype_is_btype
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
GB_geta(aij,Ax,pA,A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
GB_a_is_pattern \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
GB_getb(bij,Bx,pB,B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
GB_b_is_pattern \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
GB_ctype t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
GB_copy_a_to_c(cij,Ax,pA,A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
GB_copy_b_to_c(cij,Bx,pB,B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
GB_binaryop(z,x,y,i,j) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
GB_binaryop_flip
// op is second
#define GB_OP_IS_SECOND \
GB_op_is_second
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
GB_disable
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
if_is_binop_subset
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
endif_is_binop_subset
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
if_C_dense_update
{
#include "GB_dense_subassign_23_template.c"
}
endif_C_dense_update
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
if_C_dense_update
{
// get the scalar b for C += b, of type GB_btype
GB_btype bwork = (*((GB_btype *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
endif_C_dense_update
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
if_binop_is_semiring_multiplier
GrB_Info GB (_AxD)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_ctype *restrict Cx = (GB_ctype *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
endif_binop_is_semiring_multiplier
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
if_binop_is_semiring_multiplier
GrB_Info GB (_DxB)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_ctype *restrict Cx = (GB_ctype *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
endif_binop_is_semiring_multiplier
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
GB_atype alpha_scalar ;
GB_btype beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((GB_atype *) alpha_scalar_in)) ;
beta_scalar = (*((GB_btype *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
if_binop_emult_is_enabled
GrB_Info GB (_AemultB_08)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
endif_binop_emult_is_enabled
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
if_binop_emult_is_enabled
GrB_Info GB (_AemultB_02)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
endif_binop_emult_is_enabled
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
if_binop_emult_is_enabled
GrB_Info GB (_AemultB_04)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
endif_binop_emult_is_enabled
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
if_binop_emult_is_enabled
GrB_Info GB (_AemultB_bitmap)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
endif_binop_emult_is_enabled
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
if_binop_bind_is_enabled
GrB_Info GB (_bind1st)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_ctype *Cx = (GB_ctype *) Cx_output ;
GB_atype x = (*((GB_atype *) x_input)) ;
GB_btype *Bx = (GB_btype *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
GB_getb(bij, Bx, p, false) ;
GB_binaryop(Cx [p], x, bij, 0, 0) ;
}
return (GrB_SUCCESS) ;
#endif
}
endif_binop_bind_is_enabled
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
if_binop_bind_is_enabled
GrB_Info GB (_bind2nd)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
GB_ctype *Cx = (GB_ctype *) Cx_output ;
GB_atype *Ax = (GB_atype *) Ax_input ;
GB_btype y = (*((GB_btype *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
GB_geta(aij, Ax, p, false) ;
GB_binaryop(Cx [p], aij, y, 0, 0) ;
}
return (GrB_SUCCESS) ;
#endif
}
endif_binop_bind_is_enabled
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
if_binop_bind_is_enabled
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GB_getb(aij, Ax, pA, false) ; \
GB_binaryop(Cx [pC], x, aij, 0, 0) ; \
}
GrB_Info GB (_bind1st_tran)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
GB_btype
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_atype x = (*((const GB_atype *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
GB_atype
}
endif_binop_bind_is_enabled
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
if_binop_bind_is_enabled
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GB_geta(aij, Ax, pA, false) ; \
GB_binaryop(Cx [pC], aij, y, 0, 0) ; \
}
GrB_Info GB (_bind2nd_tran)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_btype y = (*((const GB_btype *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
endif_binop_bind_is_enabled
#endif
|
fdtd2d.c | /**
* fdtd2d.c: This file was adapted from PolyBench/GPU 1.0 test suite
* to run on GPU with OpenMP 4.0 pragmas and OpenCL driver.
*
* http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*
* Contacts: Marcio M Pereira <mpereira@ic.unicamp.br>
* Rafael Cardoso F Sousa <rafael.cardoso@students.ic.unicamp.br>
* Luís Felipe Mattos <ra107822@students.ic.unicamp.br>
*/
#include <assert.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <unistd.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "BenchmarksUtil.h"
// define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 10.05
/* Problem size. */
#ifdef RUN_TEST
#define SIZE 1100
#elif RUN_BENCHMARK
#define SIZE 9600
#else
#define SIZE 1000
#endif
#define tmax 500
#define NX SIZE
#define NY SIZE
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
void init_arrays(DATA_TYPE *_fict_, DATA_TYPE *ex, DATA_TYPE *ey,
DATA_TYPE *hz) {
int i, j;
for (i = 0; i < tmax; i++) {
_fict_[i] = (DATA_TYPE)i;
}
for (i = 0; i < NX; i++) {
for (j = 0; j < NY; j++) {
ex[i * NY + j] = ((DATA_TYPE)i * (j + 1) + 1) / NX;
ey[i * NY + j] = ((DATA_TYPE)(i - 1) * (j + 2) + 2) / NX;
hz[i * NY + j] = ((DATA_TYPE)(i - 9) * (j + 4) + 3) / NX;
}
}
}
void init_array_hz(DATA_TYPE *hz) {
int i, j;
for (i = 0; i < NX; i++) {
for (j = 0; j < NY; j++) {
hz[i * NY + j] = ((DATA_TYPE)(i - 9) * (j + 4) + 3) / NX;
}
}
}
int compareResults(DATA_TYPE *hz1, DATA_TYPE *hz2) {
int i, j, fail;
fail = 0;
for (i = 0; i < NX; i++) {
for (j = 0; j < NY; j++) {
if (percentDiff(hz1[i * NY + j], hz2[i * NY + j]) >
PERCENT_DIFF_ERROR_THRESHOLD) {
fail++;
}
}
}
// Print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f "
"Percent: %d\n",
PERCENT_DIFF_ERROR_THRESHOLD, fail);
return fail;
}
void runFdtd(DATA_TYPE *_fict_, DATA_TYPE *ex, DATA_TYPE *ey, DATA_TYPE *hz) {
int t, i, j;
for (t = 0; t < tmax; t++) {
for (j = 0; j < NY; j++) {
ey[0 * NY + j] = _fict_[t];
}
for (i = 1; i < NX; i++) {
for (j = 0; j < NY; j++) {
ey[i * NY + j] =
ey[i * NY + j] - 0.5 * (hz[i * NY + j] - hz[(i - 1) * NY + j]);
}
}
for (i = 0; i < NX; i++) {
for (j = 1; j < NY; j++) {
ex[i * (NY + 1) + j] = ex[i * (NY + 1) + j] -
0.5 * (hz[i * NY + j] - hz[i * NY + (j - 1)]);
}
}
for (i = 0; i < NX; i++) {
for (j = 0; j < NY; j++) {
hz[i * NY + j] =
hz[i * NY + j] -
0.7 * (ex[i * (NY + 1) + (j + 1)] - ex[i * (NY + 1) + j] +
ey[(i + 1) * NY + j] - ey[i * NY + j]);
}
}
}
}
void runFdtd_OMP(DATA_TYPE *_fict_, DATA_TYPE *ex, DATA_TYPE *ey,
DATA_TYPE *hz) {
int t, i, j;
#pragma omp target data map(to : _fict_[ : tmax], ex[ : (NX *(NY + 1))], ey[ : ((NX + 1) * NY)]) map(tofrom : hz[ : (NX *(NY + 1))]) device(DEVICE_ID)
{
for (t = 0; t < tmax; t++) {
#pragma omp target teams distribute parallel for device(DEVICE_ID)
for (j = 0; j < NY; j++) {
ey[0 * NY + j] = _fict_[t];
}
#pragma omp target teams distribute parallel for collapse(2) device(DEVICE_ID)
for (i = 1; i < NX; i++) {
for (j = 0; j < NY; j++) {
ey[i * NY + j] =
ey[i * NY + j] - 0.5 * (hz[i * NY + j] - hz[(i - 1) * NY + j]);
}
}
#pragma omp target teams distribute parallel for collapse(2) device(DEVICE_ID)
for (i = 0; i < NX; i++) {
for (j = 1; j < NY; j++) {
ex[i * (NY + 1) + j] = ex[i * (NY + 1) + j] -
0.5 * (hz[i * NY + j] - hz[i * NY + (j - 1)]);
}
}
#pragma omp target teams distribute parallel for collapse(2) device(DEVICE_ID)
for (i = 0; i < NX; i++) {
for (j = 0; j < NY; j++) {
hz[i * NY + j] =
hz[i * NY + j] -
0.7 * (ex[i * (NY + 1) + (j + 1)] - ex[i * (NY + 1) + j] +
ey[(i + 1) * NY + j] - ey[i * NY + j]);
}
}
}
}
}
int main() {
double t_start, t_end;
int fail = 0;
DATA_TYPE *_fict_;
DATA_TYPE *ex;
DATA_TYPE *ey;
DATA_TYPE *hz;
DATA_TYPE *hz_outputFromGpu;
_fict_ = (DATA_TYPE *)malloc(tmax * sizeof(DATA_TYPE));
ex = (DATA_TYPE *)malloc(NX * (NY + 1) * sizeof(DATA_TYPE));
ey = (DATA_TYPE *)malloc((NX + 1) * NY * sizeof(DATA_TYPE));
hz = (DATA_TYPE *)malloc(NX * NY * sizeof(DATA_TYPE));
hz_outputFromGpu = (DATA_TYPE *)malloc(NX * NY * sizeof(DATA_TYPE));
fprintf(stdout, "<< 2-D Finite Different Time Domain Kernel >>\n");
init_arrays(_fict_, ex, ey, hz);
init_array_hz(hz_outputFromGpu);
t_start = rtclock();
runFdtd_OMP(_fict_, ex, ey, hz_outputFromGpu);
t_end = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);
#ifdef RUN_TEST
t_start = rtclock();
runFdtd(_fict_, ex, ey, hz);
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);
fail = compareResults(hz, hz_outputFromGpu);
#endif
free(_fict_);
free(ex);
free(ey);
free(hz);
free(hz_outputFromGpu);
return fail;
}
|
oyranos_cmm_oyra_image_expose.c | /** @file oyranos_cmm_oyra_image_expose.c
*
* Oyranos is an open source Color Management System
*
* @par Copyright:
* 2016 (C) Kai-Uwe Behrmann
*
* @brief expose module for Oyranos
* @internal
* @author Kai-Uwe Behrmann <ku.b@gmx.de>
* @par License:
* new BSD <http://www.opensource.org/licenses/BSD-3-Clause>
* @since 2016/04/11
*/
#include "oyCMMapi4_s.h"
#include "oyCMMapi7_s.h"
#include "oyCMMui_s.h"
#include "oyConnectorImaging_s.h"
#include "oyRectangle_s.h"
#include "oyRectangle_s_.h"
#include "oyranos_cmm.h"
#include "oyranos_cmm_oyra.h"
#include "oyranos_helper.h"
#include "oyranos_i18n.h"
#include "oyranos_string.h"
#include <math.h>
#include <stdarg.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#ifdef HAVE_POSIX
#include <stdint.h> /* UINT32_MAX */
#endif
/* OY_IMAGE_EXPOSE_REGISTRATION */
/* OY_IMAGE_EXPOSE_REGISTRATION ----------------------------------------------*/
void oySensibleClip ( double * c, icColorSpaceSignature sig, int range_max, double expose )
{
int max = 0, max_pos = 0,
mid, mid_pos,
min = range_max, min_pos = 0,
i,
n = oyICCColorSpaceGetChannelCount(sig);
if(sig == icSigLabData ||
sig == icSigYCbCrData)
n = 1;
for(i = 0; i < n; ++i)
{
if(max < c[i]) { max = c[i]; max_pos = i; }
if(min > c[i]) { min = c[i]; min_pos = i; }
}
if( min * expose > range_max)
for(i = 0; i < n; ++i)
c[i] = range_max;
else if(max * expose <= range_max)
for(i = 0; i < n; ++i)
c[i] *= expose;
else if(n > 1)
{
double exposed_min = min * expose;
double mid_part;
double exposed_mid;
mid_pos = min_pos != 0 && max_pos != 0 ? 0 : min_pos != 1 && max_pos != 1 ? 1 : 2;
mid = c[mid_pos];
mid_part = (double)( mid - min )/(double)( max - min );
c[min_pos] = exposed_min + 0.5;
exposed_mid = exposed_min + mid_part * (range_max - exposed_min);
c[mid_pos] = exposed_mid + 0.5;
c[max_pos] = range_max;
}
}
/** @brief implement oyCMMFilter_GetNext_f()
*
* @version Oyranos: 0.9.6
* @date 2016/04/04
* @since 2013/06/10 (Oyranos: 0.9.5)
*/
int oyraFilter_ImageExposeRun ( oyFilterPlug_s * requestor_plug,
oyPixelAccess_s * ticket )
{
int result = 0, error = 0;
oyFilterSocket_s * socket = 0;
oyFilterNode_s * input_node = 0,
* node = 0;
oyFilterPlug_s * plug = 0;
oyImage_s * image = 0;
int dirty = 0;
socket = oyFilterPlug_GetSocket( requestor_plug );
node = oyFilterSocket_GetNode( socket );
image = (oyImage_s*)oyFilterSocket_GetData( socket );
if(!image)
{
result = 1;
goto clean_expose1;
}
if(oy_debug)
oyra_msg( oyMSG_WARN, (oyStruct_s*)ticket, OY_DBG_FORMAT_
"image [%d](%d)\n",OY_DBG_ARGS_,oyStruct_GetId((oyStruct_s*)image),oyImage_GetWidth(image) );
{
oyRectangle_s * ticket_roi = oyPixelAccess_GetArrayROI( ticket );
double expose = 1.0;
oyOptions_s * node_opts = oyFilterNode_GetOptions( node, 0 );
if(!node_opts)
dirty = 1;
if(dirty)
{
result = dirty;
goto clean_expose2;
}
plug = oyFilterNode_GetPlug( node, 0 );
/* select node */
input_node = oyFilterNode_GetPlugNode( node, 0 );
/* find filters own expose factor */
error = oyOptions_FindDouble( node_opts,
"//" OY_TYPE_STD "/expose/expose",
0, &expose );
if(error) WARNc2_S("%s %d", _("found issues"),error);
if(oy_debug > 2)
oyra_msg( oyMSG_WARN, (oyStruct_s*)ticket, OY_DBG_FORMAT_
"%s expose: %f",OY_DBG_ARGS_, oyPixelAccess_Show(ticket), expose);
if(expose != 1.0)
{
oyImage_s * output_image = oyPixelAccess_GetOutputImage( ticket );
oyArray2d_s * array_out = oyPixelAccess_GetArray( ticket );
oyProfile_s * p = oyImage_GetProfile( output_image );
icColorSpaceSignature sig = oyProfile_GetSignature( p, oySIGNATURE_COLOR_SPACE );
int layout_dst = oyImage_GetPixelLayout( output_image, oyLAYOUT );
int channels_dst = oyToChannels_m( layout_dst );
int byte_swap = oyToByteswap_m( layout_dst );
int ticket_array_pix_width;
/* avoid division by zero */
if(!channels_dst) channels_dst = 1;
ticket_array_pix_width = oyArray2d_GetWidth( array_out ) / channels_dst;
{
int w,h,x,y, i, start_x,start_y;
unsigned int max = 1;
oyRectangle_s * ticket_roi = oyPixelAccess_GetArrayROI( ticket );
oyRectangle_s_ roi_= {oyOBJECT_RECTANGLE_S,0,0,0, 0,0,0,0};
oyRectangle_s * roi = (oyRectangle_s*)&roi_;
uint8_t ** array_out_data;
/* get pixel layout infos for copying */
oyDATATYPE_e data_type_out = oyToDataType_m( layout_dst );
int bps_out = oyDataTypeGetSize( data_type_out );
/* get the source pixels */
result = oyFilterNode_Run( input_node, plug, ticket );
/* get the channel buffers */
array_out_data = oyArray2d_GetData( array_out );
w = oyArray2d_GetWidth( array_out ) / channels_dst;
h = oyArray2d_GetHeight( array_out );
oyRectangle_SetByRectangle( roi, ticket_roi );
oyRectangle_Scale( roi, ticket_array_pix_width );
start_x = OY_ROUND(roi_.x);
start_y = OY_ROUND(roi_.y);
switch(data_type_out)
{
case oyUINT8: max = 255; break;
case oyUINT16: max = 65535; break;
case oyUINT32: max = UINT32_MAX; break;
default: break;
}
/* expose the samples */
#if defined(USE_OPENMP)
#pragma omp parallel for private(x,y,i)
#endif
for(y = start_y; y < h; ++y)
{
for(x = start_x; x < w; ++x)
{
if( (sig == icSigRgbData ||
sig == icSigXYZData ||
sig == icSigLabData ||
sig == icSigYCbCrData)
&& channels_dst >= 3)
{
double rgb[3], v;
for(i = 0; i < 3; ++i)
{
switch(data_type_out)
{
case oyUINT8:
rgb[i] = array_out_data[y][x*channels_dst*bps_out + i*bps_out];
break;
case oyUINT16:
{
uint16_t v = *((uint16_t*)&array_out_data[y][x*channels_dst*bps_out + i*bps_out]);
if(byte_swap) v = oyByteSwapUInt16(v);
rgb[i] = v;
}
break;
case oyUINT32:
{
uint32_t v = *((uint32_t*)&array_out_data[y][x*channels_dst*bps_out + i*bps_out]);
if(byte_swap) v = oyByteSwapUInt32(v);
rgb[i] = v;
}
break;
case oyHALF:
v = *((uint16_t*)&array_out_data[y][x*channels_dst*bps_out + i*bps_out]);
rgb[i] = v;
break;
case oyFLOAT:
v = *((float*)&array_out_data[y][x*channels_dst*bps_out + i*bps_out]);
rgb[i] = v;
break;
case oyDOUBLE:
v = *((double*)&array_out_data[y][x*channels_dst*bps_out + i*bps_out]);
rgb[i] = v;
break;
}
}
oySensibleClip ( rgb, sig, max, expose );
for(i = 0; i < 3; ++i)
{
v = rgb[i];
switch(data_type_out)
{
case oyUINT8:
array_out_data[y][x*channels_dst*bps_out + i*bps_out] = v;
break;
case oyUINT16:
{ uint16_t u16 = v;
*((uint16_t*)&array_out_data[y][x*channels_dst*bps_out + i*bps_out]) = byte_swap ? oyByteSwapUInt16(u16) : u16;
}
break;
case oyUINT32:
{ uint32_t u32 = v;
*((uint32_t*)&array_out_data[y][x*channels_dst*bps_out + i*bps_out]) = byte_swap ? oyByteSwapUInt16(u32) : u32;
}
break;
case oyHALF:
*((uint16_t*)&array_out_data[y][x*channels_dst*bps_out + i*bps_out]) = v;
break;
case oyFLOAT:
*((float*)&array_out_data[y][x*channels_dst*bps_out + i*bps_out]) = v;
break;
case oyDOUBLE:
*((double*)&array_out_data[y][x*channels_dst*bps_out + i*bps_out]) = v;
break;
}
}
}
else
for(i = 0; i < channels_dst; ++i)
{
int v;
switch(data_type_out)
{
case oyUINT8:
v = array_out_data[y][x*channels_dst*bps_out + i*bps_out] * expose;
if(v > 255) v = 255;
array_out_data[y][x*channels_dst*bps_out + i*bps_out] = v;
break;
case oyUINT16:
v = *((uint16_t*)&array_out_data[y][x*channels_dst*bps_out + i*bps_out]);
if(byte_swap) v = oyByteSwapUInt16(v);
v *= expose;
if(v > 65535) v = 65535;
*((uint16_t*)&array_out_data[y][x*channels_dst*bps_out + i*bps_out]) = byte_swap ? oyByteSwapUInt16(v) : v;
break;
case oyUINT32:
*((uint32_t*)&array_out_data[y][x*channels_dst*bps_out + i*bps_out]) *= expose;
break;
case oyHALF:
*((uint16_t*)&array_out_data[y][x*channels_dst*bps_out + i*bps_out]) *= expose;
break;
case oyFLOAT:
*((float*)&array_out_data[y][x*channels_dst*bps_out + i*bps_out]) *= expose;
break;
case oyDOUBLE:
*((double*)&array_out_data[y][x*channels_dst*bps_out + i*bps_out]) *= expose;
break;
}
}
}
}
}
oyArray2d_Release( &array_out );
oyImage_Release( &output_image );
oyProfile_Release( &p );
} else /* expose == 1.0 */
{
result = oyFilterNode_Run( input_node, plug, ticket );
}
clean_expose2:
oyOptions_Release( &node_opts );
oyFilterPlug_Release( &plug );
oyRectangle_Release( &ticket_roi );
oyFilterNode_Release( &input_node );
}
clean_expose1:
oyImage_Release( &image );
oyFilterSocket_Release( &socket );
oyFilterNode_Release( &node );
return result;
}
#define OY_IMAGE_EXPOSE_REGISTRATION OY_TOP_SHARED OY_SLASH OY_DOMAIN_INTERNAL OY_SLASH OY_TYPE_STD OY_SLASH "expose"
/** @brief oyra oyCMMapi7_s implementation
*
* a filter providing a expose image filter
*
* @version Oyranos: 0.9.5
* @since 2013/06/14 (Oyranos: 0.9.5)
* @date 2013/06/14
*/
oyCMMapi_s * oyraApi7ImageExposeCreate(void)
{
oyCMMapi7_s * expose7;
int32_t cmm_version[3] = {OYRANOS_VERSION_A,OYRANOS_VERSION_B,OYRANOS_VERSION_C},
module_api[3] = {OYRANOS_VERSION_A,OYRANOS_VERSION_B,OYRANOS_VERSION_C};
static oyDATATYPE_e data_types[7] = {oyUINT8, oyUINT16, oyUINT32,
oyHALF, oyFLOAT, oyDOUBLE, 0};
oyConnectorImaging_s * plug = oyConnectorImaging_New(0),
* socket = oyConnectorImaging_New(0);
static oyConnectorImaging_s * plugs[2] = {0,0},
* sockets[2] = {0,0};
plugs[0] = plug;
sockets[0] = socket;
oyConnectorImaging_SetDataTypes( plug, data_types, 6 );
oyConnectorImaging_SetReg( plug, "//" OY_TYPE_STD "/manipulator.data" );
oyConnectorImaging_SetMatch( plug, oyFilterSocket_MatchImagingPlug );
oyConnectorImaging_SetTexts( plug, oyCMMgetImageConnectorPlugText,
oy_image_connector_texts );
oyConnectorImaging_SetIsPlug( plug, 1 );
oyConnectorImaging_SetCapability( plug, oyCONNECTOR_IMAGING_CAP_MAX_COLOR_OFFSET, -1 );
oyConnectorImaging_SetCapability( plug, oyCONNECTOR_IMAGING_CAP_MIN_CHANNELS_COUNT, 1 );
oyConnectorImaging_SetCapability( plug, oyCONNECTOR_IMAGING_CAP_MAX_CHANNELS_COUNT, 255 );
oyConnectorImaging_SetCapability( plug, oyCONNECTOR_IMAGING_CAP_MIN_COLOR_COUNT, 1 );
oyConnectorImaging_SetCapability( plug, oyCONNECTOR_IMAGING_CAP_MAX_COLOR_COUNT, 255 );
oyConnectorImaging_SetCapability( plug, oyCONNECTOR_IMAGING_CAP_CAN_INTERWOVEN, 1 );
oyConnectorImaging_SetCapability( plug, oyCONNECTOR_IMAGING_CAP_CAN_PREMULTIPLIED_ALPHA, 1 );
oyConnectorImaging_SetCapability( plug, oyCONNECTOR_IMAGING_CAP_CAN_NONPREMULTIPLIED_ALPHA, 1 );
oyConnectorImaging_SetCapability( plug, oyCONNECTOR_IMAGING_CAP_ID, 1 );
oyConnectorImaging_SetDataTypes( socket, data_types, 6 );
oyConnectorImaging_SetReg( socket, "//" OY_TYPE_STD "/manipulator.data" );
oyConnectorImaging_SetMatch( socket, oyFilterSocket_MatchImagingPlug );
oyConnectorImaging_SetTexts( socket, oyCMMgetImageConnectorSocketText,
oy_image_connector_texts );
oyConnectorImaging_SetIsPlug( socket, 0 );
oyConnectorImaging_SetCapability( socket, oyCONNECTOR_IMAGING_CAP_MAX_COLOR_OFFSET, -1 );
oyConnectorImaging_SetCapability( socket, oyCONNECTOR_IMAGING_CAP_MIN_CHANNELS_COUNT, 1 );
oyConnectorImaging_SetCapability( socket, oyCONNECTOR_IMAGING_CAP_MAX_CHANNELS_COUNT, 255 );
oyConnectorImaging_SetCapability( socket, oyCONNECTOR_IMAGING_CAP_MIN_COLOR_COUNT, 1 );
oyConnectorImaging_SetCapability( socket, oyCONNECTOR_IMAGING_CAP_MAX_COLOR_COUNT, 255 );
oyConnectorImaging_SetCapability( socket, oyCONNECTOR_IMAGING_CAP_CAN_INTERWOVEN, 1 );
oyConnectorImaging_SetCapability( socket, oyCONNECTOR_IMAGING_CAP_CAN_PREMULTIPLIED_ALPHA, 1 );
oyConnectorImaging_SetCapability( socket, oyCONNECTOR_IMAGING_CAP_CAN_NONPREMULTIPLIED_ALPHA, 1 );
oyConnectorImaging_SetCapability( socket, oyCONNECTOR_IMAGING_CAP_ID, 1 );
expose7 = oyCMMapi7_Create ( oyraCMMInit, oyraCMMMessageFuncSet,
OY_IMAGE_EXPOSE_REGISTRATION,
cmm_version, module_api,
NULL,
oyraFilter_ImageExposeRun,
(oyConnector_s**)plugs, 1, 0,
(oyConnector_s**)sockets, 1, 0,
0, 0 );
return (oyCMMapi_s*) expose7;
}
const char * oyraApi4UiImageExposeGetText (
const char * select,
oyNAME_e type,
oyStruct_s * context OY_UNUSED )
{
if(strcmp(select,"name") == 0)
{
if(type == oyNAME_NICK)
return "image_expose";
else if(type == oyNAME_NAME)
return _("Image[expose]");
else if(type == oyNAME_DESCRIPTION)
return _("Expose Image Filter Object");
} else if(strcmp(select,"help") == 0)
{
if(type == oyNAME_NICK)
return "help";
else if(type == oyNAME_NAME)
return _("The filter adapts pixel brightness.");
else if(type == oyNAME_DESCRIPTION)
{
static char * help_desc = NULL;
if(!help_desc)
oyStringAddPrintf( &help_desc, 0,0, "%s",
_("The filter expects a \"expose\" double option and will process the data accordingly.")
);
return help_desc;
}
} else if(strcmp(select,"category") == 0)
{
if(type == oyNAME_NICK)
return "category";
else if(type == oyNAME_NAME)
return _("Image/Simple Image[expose]");
else if(type == oyNAME_DESCRIPTION)
return _("The filter is used to reduce pixels.");
}
return 0;
}
/** @brief oyra oyCMMapi4_s implementation
*
* a filter providing a expose image filter
*
* @version Oyranos: 0.9.5
* @since 2013/06/14 (Oyranos: 0.9.5)
* @date 2013/06/14
*/
oyCMMapi_s * oyraApi4ImageExposeCreate(void)
{
static const char * oyra_api4_ui_image_expose_texts[] = {"name", "help", "category", 0};
oyCMMui_s * ui = oyCMMui_Create( "Image/Simple Image[expose]", /* category */
oyraApi4UiImageExposeGetText,
oyra_api4_ui_image_expose_texts, 0 );
int32_t cmm_version[3] = {OYRANOS_VERSION_A,OYRANOS_VERSION_B,OYRANOS_VERSION_C},
module_api[3] = {OYRANOS_VERSION_A,OYRANOS_VERSION_B,OYRANOS_VERSION_C};
oyCMMapi4_s * expose4 = oyCMMapi4_Create( oyraCMMInit, oyraCMMMessageFuncSet,
OY_IMAGE_EXPOSE_REGISTRATION,
cmm_version, module_api,
NULL,
NULL,
NULL,
ui,
NULL );
return (oyCMMapi_s*)expose4;
}
/* OY_IMAGE_EXPOSE_REGISTRATION ----------------------------------------------*/
/* ---------------------------------------------------------------------------*/
|
ast-dump-openmp-begin-declare-variant_8.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify -ast-dump %s | FileCheck %s
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify -ast-dump %s -x c++| FileCheck %s
// expected-no-diagnostics
#pragma omp begin declare variant match(device={kind(cpu)})
int also_before(void) {
return 1;
}
#pragma omp end declare variant
#pragma omp begin declare variant match(implementation={vendor(score(0):llvm)})
int also_after(void) {
return 0;
}
#pragma omp end declare variant
#pragma omp begin declare variant match(implementation={vendor(score(100):llvm)})
int also_before(void) {
return 0;
}
#pragma omp end declare variant
int also_after(void) {
return 2;
}
int test(void) {
// Should return 0.
return also_after() + also_before();
}
// Make sure:
// - we do see the ast nodes for the cpu kind
// - we do see the ast nodes for the llvm vendor
// - we pick the right callees
// CHECK: |-FunctionDecl [[ADDR_0:0x[a-z0-9]*]] <{{.*}}, col:21> col:5 implicit used also_before 'int ({{.*}})'
// CHECK-NEXT: | |-OMPDeclareVariantAttr [[ADDR_1:0x[a-z0-9]*]] <<invalid sloc>> Implicit device={kind(cpu)}
// CHECK-NEXT: | | `-DeclRefExpr [[ADDR_2:0x[a-z0-9]*]] <col:1> 'int ({{.*}})' Function [[ADDR_3:0x[a-z0-9]*]] 'also_before[device={kind(cpu)}]' 'int ({{.*}})'
// CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_4:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(score(100): llvm)}
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_5:0x[a-z0-9]*]] <line:17:1> 'int ({{.*}})' Function [[ADDR_6:0x[a-z0-9]*]] 'also_before[implementation={vendor(llvm)}]' 'int ({{.*}})'
// CHECK-NEXT: |-FunctionDecl [[ADDR_3]] <line:6:1, line:8:1> line:6:1 also_before[device={kind(cpu)}] 'int ({{.*}})'
// CHECK-NEXT: | `-CompoundStmt [[ADDR_7:0x[a-z0-9]*]] <col:23, line:8:1>
// CHECK-NEXT: | `-ReturnStmt [[ADDR_8:0x[a-z0-9]*]] <line:7:3, col:10>
// CHECK-NEXT: | `-IntegerLiteral [[ADDR_9:0x[a-z0-9]*]] <col:10> 'int' 1
// CHECK-NEXT: |-FunctionDecl [[ADDR_10:0x[a-z0-9]*]] <line:12:1, col:20> col:5 implicit used also_after 'int ({{.*}})'
// CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_11:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(score(0): llvm)}
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_12:0x[a-z0-9]*]] <col:1> 'int ({{.*}})' Function [[ADDR_13:0x[a-z0-9]*]] 'also_after[implementation={vendor(llvm)}]' 'int ({{.*}})'
// CHECK-NEXT: |-FunctionDecl [[ADDR_13]] <col:1, line:14:1> line:12:1 also_after[implementation={vendor(llvm)}] 'int ({{.*}})'
// CHECK-NEXT: | `-CompoundStmt [[ADDR_14:0x[a-z0-9]*]] <col:22, line:14:1>
// CHECK-NEXT: | `-ReturnStmt [[ADDR_15:0x[a-z0-9]*]] <line:13:3, col:10>
// CHECK-NEXT: | `-IntegerLiteral [[ADDR_16:0x[a-z0-9]*]] <col:10> 'int' 0
// CHECK-NEXT: |-FunctionDecl [[ADDR_6]] <line:17:1, line:19:1> line:17:1 also_before[implementation={vendor(llvm)}] 'int ({{.*}})'
// CHECK-NEXT: | `-CompoundStmt [[ADDR_17:0x[a-z0-9]*]] <col:23, line:19:1>
// CHECK-NEXT: | `-ReturnStmt [[ADDR_18:0x[a-z0-9]*]] <line:18:3, col:10>
// CHECK-NEXT: | `-IntegerLiteral [[ADDR_19:0x[a-z0-9]*]] <col:10> 'int' 0
// CHECK-NEXT: |-FunctionDecl [[ADDR_20:0x[a-z0-9]*]] prev [[ADDR_10]] <line:22:1, line:24:1> line:22:5 used also_after 'int ({{.*}})'
// CHECK-NEXT: | |-CompoundStmt [[ADDR_21:0x[a-z0-9]*]] <col:22, line:24:1>
// CHECK-NEXT: | | `-ReturnStmt [[ADDR_22:0x[a-z0-9]*]] <line:23:3, col:10>
// CHECK-NEXT: | | `-IntegerLiteral [[ADDR_23:0x[a-z0-9]*]] <col:10> 'int' 2
// CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_24:0x[a-z0-9]*]] <<invalid sloc>> Inherited Implicit implementation={vendor(score(0): llvm)}
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_12]] <line:12:1> 'int ({{.*}})' Function [[ADDR_13]] 'also_after[implementation={vendor(llvm)}]' 'int ({{.*}})'
// CHECK-NEXT: `-FunctionDecl [[ADDR_25:0x[a-z0-9]*]] <line:26:1, line:29:1> line:26:5 test 'int ({{.*}})'
// CHECK-NEXT: `-CompoundStmt [[ADDR_26:0x[a-z0-9]*]] <col:16, line:29:1>
// CHECK-NEXT: `-ReturnStmt [[ADDR_27:0x[a-z0-9]*]] <line:28:3, col:37>
// CHECK-NEXT: `-BinaryOperator [[ADDR_28:0x[a-z0-9]*]] <col:10, col:37> 'int' '+'
// CHECK-NEXT: |-PseudoObjectExpr [[ADDR_29:0x[a-z0-9]*]] <col:10, col:21> 'int'
// CHECK-NEXT: | |-CallExpr [[ADDR_30:0x[a-z0-9]*]] <col:10, col:21> 'int'
// CHECK-NEXT: | | `-ImplicitCastExpr [[ADDR_31:0x[a-z0-9]*]] <col:10> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: | | `-DeclRefExpr [[ADDR_32:0x[a-z0-9]*]] <col:10> 'int ({{.*}})' {{.*}}Function [[ADDR_20]] 'also_after' 'int ({{.*}})'
// CHECK-NEXT: | `-CallExpr [[ADDR_33:0x[a-z0-9]*]] <line:12:1, line:28:21> 'int'
// CHECK-NEXT: | `-ImplicitCastExpr [[ADDR_34:0x[a-z0-9]*]] <line:12:1> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_12]] <col:1> 'int ({{.*}})' Function [[ADDR_13]] 'also_after[implementation={vendor(llvm)}]' 'int ({{.*}})'
// CHECK-NEXT: `-PseudoObjectExpr [[ADDR_35:0x[a-z0-9]*]] <line:28:25, col:37> 'int'
// CHECK-NEXT: |-CallExpr [[ADDR_36:0x[a-z0-9]*]] <col:25, col:37> 'int'
// CHECK-NEXT: | `-ImplicitCastExpr [[ADDR_37:0x[a-z0-9]*]] <col:25> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_38:0x[a-z0-9]*]] <col:25> 'int ({{.*}})' {{.*}}Function [[ADDR_0]] 'also_before' 'int ({{.*}})'
// CHECK-NEXT: `-CallExpr [[ADDR_39:0x[a-z0-9]*]] <line:17:1, line:28:37> 'int'
// CHECK-NEXT: `-ImplicitCastExpr [[ADDR_40:0x[a-z0-9]*]] <line:17:1> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: `-DeclRefExpr [[ADDR_5]] <col:1> 'int ({{.*}})' Function [[ADDR_6]] 'also_before[implementation={vendor(llvm)}]' 'int ({{.*}})'
|
dSchCompUdt-cuda.c | /*! \file
Copyright (c) 2003, The Regents of the University of California, through
Lawrence Berkeley National Laboratory (subject to receipt of any required
approvals from U.S. Dept. of Energy)
All rights reserved.
The source code is distributed under BSD license, see the file License.txt
at the top-level directory.
*/
/*! @file
* \brief This file contains the main loop of pdgstrf which involves
* rank k update of the Schur complement.
* Uses CUDA GPU.
*
* <pre>
* -- Distributed SuperLU routine (version 4.0) --
* Lawrence Berkeley National Lab, Univ. of California Berkeley.
* October 1, 2014
*
*/
#define SCHEDULE_STRATEGY dynamic
#define cublasCheckErrors(fn) \
do { \
cublasStatus_t __err = fn; \
if (__err != CUBLAS_STATUS_SUCCESS) { \
fprintf(stderr, "Fatal cublas error: %d (at %s:%d)\n", \
(int)(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while(0);
int full;
double gemm_timer = 0.0;
double scatter_timer = 0.0;
if ( msg0 && msg2 ) { /* L(:,k) and U(k,:) are not empty. */
ldu =0;
full =1;
int cum_nrow;
int temp_nbrow;
lptr = lptr0;
luptr = luptr0;
nbrow= lsub[1];
if (myrow==krow) nbrow = lsub[1]-lsub[3];
if (nbrow>0) {
int ncol_max = SUPERLU_MIN(buffer_size/nbrow,bigu_size/ldt);
int num_streams_used, /*number of streams that will be used*/
ncpu_blks; /*Number of CPU dgemm blks*/
int jjj, jjj_st,jjj_global;
for (j = jj0; j < nub; ++j) {
arrive_at_ublock( j,&iukp,&rukp,&jb,&ljb,&nsupc,
iukp0,rukp0,usub,perm_u,xsup,grid );
ncols =0 ; //initialize at 0
jj = iukp;
int temp_ldu=0;
for (; jj < iukp+nsupc; ++jj) {
segsize = klst - usub[jj];
if ( segsize ) {
++ncols;
}
temp_ldu = SUPERLU_MAX(temp_ldu, segsize);
}
full_u_cols[j] = ncols;
blk_ldu[j] = temp_ldu;
} /* end for j = jj0..nub */
jjj = jj0; /* initialization */
// #pragma omp barrier
while ( jjj < nub ) {
jjj_st=jjj;
#ifdef _OPENMP
#pragma omp single
#endif
{
ldu = blk_ldu[jjj_st];
for (j = jjj_st; j < nub ; ++j) {
/* prefix sum */
if (j != jjj_st) full_u_cols[j] += full_u_cols[j-1];
ldu = SUPERLU_MAX(ldu, blk_ldu[j]);
/* break condition */
/* the number of columns that can be processed is limited by buffer size*/
if (full_u_cols[j]+((j+1==nub)?0:full_u_cols[j+1]) > ncol_max) {
break;
}
} /* end for j=jjj_st to nub */
jjj_global = SUPERLU_MIN(nub, j+1); /* Maximum value of jjj will be nub */
// TAU_STATIC_TIMER_START("work_divison");
/* Divide CPU-GPU gemm here */
gemm_division_cpu_gpu(
&num_streams_used, /*number of streams that will be used*/
stream_end_col, /*array holding last column blk for each partition*/
&ncpu_blks, /*Number of CPU gemm blks*/
/*input*/
nbrow, /*number of row in A matrix*/
ldu, /*number of k in dgemm*/
nstreams,
full_u_cols + jjj_st, /*array containing prefix sum of work load*/
jjj_global-jjj_st /*Number of work load */
);
// TAU_STATIC_TIMER_STOP("work_divison");
} /* pragma omp single */
jjj = jjj_global;
// printf("thread_id %d, jjj %d \n",thread_id,jjj );
if (jjj == jjj_st+1 && full_u_cols[jjj_st] > ncol_max) {
printf("allocate more memory for buffer !!!!\n");
if(nbrow * full_u_cols[jjj_st] > buffer_size)
printf("%d buffer_size %d\n",nbrow*full_u_cols[jjj_st],buffer_size );
}
// #pragma omp barrier
/* gathering circuit */
assert(jjj_st<nub);
assert(jjj-1<nub);
// TAU_STATIC_TIMER_START("GATHER_U");
#ifdef _OPENMP
#pragma omp for schedule( SCHEDULE_STRATEGY )
#endif
for (j = jjj_st; j < jjj; ++j) {
if (j==jjj_st) tempu = bigU;
else tempu = bigU + ldu*full_u_cols[j-1];
/* == processing each of the remaining columns == */
arrive_at_ublock(j,&iukp,&rukp,&jb,&ljb,&nsupc,
iukp0,rukp0,usub,perm_u,xsup,grid);
// tempu = tempU2d;
for (jj = iukp; jj < iukp+nsupc; ++jj) {
segsize = klst - usub[jj];
if ( segsize ) {
lead_zero = ldu - segsize;
for (i = 0; i < lead_zero; ++i) tempu[i] = zero;
tempu += lead_zero;
for (i = 0; i < segsize; ++i)
tempu[i] = uval[rukp+i];
rukp += segsize;
tempu += segsize;
}
}
rukp -= usub[iukp - 1]; /* Return to start of U(k,j). */
} /* end for j=jjj_st to jjj */
if ( num_streams_used > 0 ) {
#ifdef PI_DEBUG
printf("nbrow %d *ldu %d =%d < ldt %d * max_row_size %d =%d \n",nbrow,ldu,nbrow*ldu,ldt,max_row_size,ldt*max_row_size );
assert(nbrow*ldu<=ldt*max_row_size);
#endif
cudaMemcpy2DAsync(dA, nbrow*sizeof(double),
&lusup[luptr+(knsupc-ldu)*nsupr],
nsupr*sizeof(double), nbrow*sizeof(double),
ldu, cudaMemcpyHostToDevice, streams[0]);
}
for (int i = 0; i < num_streams_used; ++i) {
int st = (i==0) ? ncpu_blks+jjj_st : jjj_st+stream_end_col[i-1];
int st_col = full_u_cols[st-1];
int num_col_stream = full_u_cols[jjj_st+stream_end_col[i]-1]-full_u_cols[st-1];
tempu = bigU;
double *tempv1 = bigV + full_u_cols[st-1]*nbrow;
/* Following is for testing purpose */
#ifdef GPU_ACC
int stream_id = i;
int b_offset = ldu * st_col;
int c_offset = st_col * nbrow;
size_t B_stream_size = ldu * num_col_stream * sizeof(double);
size_t C_stream_size = nbrow * num_col_stream * sizeof(double);
assert(ldu*(st_col+num_col_stream) < bigu_size);
assert(nbrow*(st_col+num_col_stream) < buffer_size);
cudaMemcpyAsync(dB+b_offset, tempu+b_offset, B_stream_size,
cudaMemcpyHostToDevice, streams[stream_id]);
cublasCheckErrors(
cublasSetStream(handle[stream_id],
streams[stream_id])
);
cublasCheckErrors(
cublasDgemm(handle[stream_id],
CUBLAS_OP_N, CUBLAS_OP_N,
nbrow, num_col_stream, ldu,
&alpha, dA, nbrow,
&dB[b_offset], ldu,
&beta, &dC[c_offset],
nbrow)
);
checkCuda( cudaMemcpyAsync(tempv1, dC+c_offset,
C_stream_size,
cudaMemcpyDeviceToHost,
streams[stream_id]) );
#else
if ( num_col_stream > 0 ) {
my_dgemm_("N", "N", &nbrow, &num_col_stream, &ldu,
&alpha, &lusup[luptr+(knsupc-ldu)*nsupr],
&nsupr, tempu+ldu*st_col, &ldu, &beta,
tempv1, &nbrow, 1, 1);
}
#endif
} /* end for i = 1 to num_streams used */
int num_col = full_u_cols[jjj_st+ncpu_blks-1];
int st_col = 0; /*special case for cpu */
tempv = bigV + nbrow * st_col;
tempu = bigU;
double tstart = SuperLU_timer_();
#if defined (USE_VENDOR_BLAS)
dgemm_("N", "N", &nbrow, &num_col, &ldu, &alpha,
&lusup[luptr+(knsupc-ldu)*nsupr], &nsupr,
tempu+ldu*st_col, &ldu, &beta, tempv, &nbrow, 1, 1);
#else
dgemm_("N", "N", &nbrow, &num_col, &ldu, &alpha,
&lusup[luptr+(knsupc-ldu)*nsupr], &nsupr,
tempu+ldu*st_col, &ldu, &beta, tempv, &nbrow);
#endif
gemm_timer += SuperLU_timer_() -tstart;
stat->ops[FACT] += 2 * nbrow * ldu * full_u_cols[jjj-1];
// printf("after dgemm \n");
/* Now scattering blocks handled by cpu */
int temp_ncol;
/* scatter first blocks which cpu has computated*/
tstart = SuperLU_timer_();
#ifdef _OPENMP
#pragma omp parallel \
private(j,iukp,rukp, tempu, tempv, cum_nrow, jb, nsupc,ljb, \
segsize,lead_zero, \
ib, temp_nbrow,ilst,lib,index, \
ijb,fnz,ucol,rel,ldv,lptrj,luptrj, \
nzval, lb , jj, i) \
firstprivate(luptr,lptr) default (shared)
#endif
{
int thread_id = omp_get_thread_num();
int* indirect_thread = indirect + ldt*thread_id;
int* indirect2_thread = indirect2 + ldt*thread_id;
double* tempv1;
if (ncpu_blks< omp_get_num_threads()) {
// TAU_STATIC_TIMER_START("SPECIAL_CPU_SCATTER");
for (j = jjj_st; j < jjj_st+ncpu_blks; ++j) {
/* code */
#ifdef PI_DEBUG
printf("scattering %d block column\n",j);
#endif
/* == processing each of the remaining columns == */
if(j==jjj_st) tempv1 = bigV;
else tempv1 = bigV + full_u_cols[j-1]*nbrow;
arrive_at_ublock( j,&iukp,&rukp,&jb,&ljb,&nsupc,
iukp0,rukp0,usub,perm_u,xsup,grid );
cum_nrow =0 ;
/* do update with the kth column of L and (k,j)th block of U */
lptr = lptr0;
luptr = luptr0;
#ifdef _OPENMP
#pragma omp for schedule( SCHEDULE_STRATEGY ) nowait
#endif
for (lb = 0; lb < nlb; lb++ ) {
int cum_nrow = 0;
int temp_nbrow;
lptr = lptr0;
luptr = luptr0;
for (int i = 0; i < lb; ++i) {
ib = lsub[lptr]; /* Row block L(i,k). */
temp_nbrow = lsub[lptr+1]; /* Number of full rows. */
lptr += LB_DESCRIPTOR; /* Skip descriptor. */
lptr += temp_nbrow;
luptr += temp_nbrow;
cum_nrow +=temp_nbrow;
}
ib = lsub[lptr]; /* Row block L(i,k). */
temp_nbrow = lsub[lptr+1]; /* Number of full rows. */
assert(temp_nbrow<=nbrow);
lptr += LB_DESCRIPTOR; /* Skip descriptor. */
/* Now gather the result into the destination block. */
if ( ib < jb ) { /* A(i,j) is in U. */
#ifdef PI_DEBUG
printf("cpu scatter \n");
printf("A(%d,%d) goes to U block %d \n", ib,jb,ljb);
#endif
tempv = tempv1+cum_nrow;
dscatter_u (
ib,jb,
nsupc,iukp,xsup,
klst,nbrow,
lptr,temp_nbrow,lsub,
usub,tempv,
Ufstnz_br_ptr,
Unzval_br_ptr,
grid
);
} else { /* A(i,j) is in L. */
#ifdef PI_DEBUG
printf("cpu scatter \n");
printf("A(%d,%d) goes to L block %d \n", ib,jb,ljb);
#endif
tempv = tempv1+cum_nrow;
dscatter_l (
ib, ljb,nsupc,iukp,xsup,klst,nbrow,lptr,
temp_nbrow,usub,lsub,tempv,
indirect_thread,indirect2_thread,
Lrowind_bc_ptr,Lnzval_bc_ptr,grid
);
} /* if ib < jb ... */
lptr += temp_nbrow;
luptr += temp_nbrow;
cum_nrow += temp_nbrow;
} /* for lb ... */
luptr=luptr0;
} /* for j = jjj_st ... */
// TAU_STATIC_TIMER_STOP("SPECIAL_CPU_SCATTER");
} else {
#ifdef _OPENMP
#pragma omp for schedule(SCHEDULE_STRATEGY) nowait
#endif
for (j = jjj_st; j < jjj_st+ncpu_blks; ++j) {
/* code */
#ifdef PI_DEBUG
printf("scattering %d block column\n",j);
#endif
/* == processing each of the remaining columns == */
if(j==jjj_st) tempv1 = bigV;
else tempv1 = bigV + full_u_cols[j-1]*nbrow;
arrive_at_ublock( j,&iukp,&rukp,&jb,&ljb,&nsupc,
iukp0,rukp0,usub,perm_u,xsup,grid );
cum_nrow =0 ;
/* do update with the kth column of L and (k,j)th block of U */
lptr = lptr0;
luptr = luptr0;
for (lb = 0; lb < nlb; lb++ ) {
ib = lsub[lptr]; /* Row block L(i,k). */
temp_nbrow = lsub[lptr+1]; /* Number of full rows. */
assert(temp_nbrow<=nbrow);
lptr += LB_DESCRIPTOR; /* Skip descriptor. */
#ifdef DGEMM_STAT
if(j==jjj_st) {
temp_ncol = full_u_cols[j];
} else {
temp_ncol = full_u_cols[j]- full_u_cols[j-1];
}
printf("%d %d %d \n",temp_nbrow, temp_ncol,ldu);
#endif
/* Now gather the result into the destination block. */
if ( ib < jb ) { /* A(i,j) is in U. */
#ifdef PI_DEBUG
printf("cpu scatter \n");
printf("A(%d,%d) goes to U block %d \n", ib,jb,ljb);
#endif
tempv = tempv1+cum_nrow;
dscatter_u (
ib,jb,
nsupc,iukp,xsup,
klst,nbrow,
lptr,temp_nbrow,lsub,
usub,tempv,
Ufstnz_br_ptr,
Unzval_br_ptr,
grid
);
} else { /* A(i,j) is in L. */
#ifdef PI_DEBUG
printf("cpu scatter \n");
printf("A(%d,%d) goes to L block %d \n", ib,jb,ljb);
#endif
tempv = tempv1+cum_nrow;
dscatter_l (
ib, ljb,nsupc,iukp,xsup,klst,nbrow,lptr,
temp_nbrow,usub,lsub,tempv,
indirect_thread,indirect2_thread,
Lrowind_bc_ptr,Lnzval_bc_ptr,grid
);
} /* if ib < jb ... */
lptr += temp_nbrow;
luptr += temp_nbrow;
cum_nrow += temp_nbrow;
} /* for lb ... */
luptr=luptr0;
} /* for j = jjj_st ... */
} /* else if (ncpu_blks >= omp_get_num_threads()) */
} /* parallel region */
scatter_timer += SuperLU_timer_() - tstart;
#ifdef _OPENMP
#pragma omp parallel \
private(j,iukp,rukp, tempu, tempv, cum_nrow, jb, nsupc,ljb, \
segsize,lead_zero, \
ib, temp_nbrow,ilst,lib,index, \
ijb,fnz,ucol,rel,ldv,lptrj,luptrj, \
nzval, lb , jj, i) \
firstprivate(luptr,lptr) default (shared)
#endif
{
int thread_id = omp_get_thread_num();
int* indirect_thread = indirect + ldt*thread_id;
int* indirect2_thread = indirect2 + ldt*thread_id;
double* tempv1;
for(i = 0; i < num_streams_used; i++) { /* i is private variable */
checkCuda(cudaStreamSynchronize (streams[i]));
int jjj_st1 = (i==0) ? jjj_st + ncpu_blks : jjj_st + stream_end_col[i-1];
int jjj_end = jjj_st + stream_end_col[i];
assert(jjj_end-1<nub);
assert(jjj_st1>jjj_st) ;
/* now scatter it */
#pragma omp for schedule( SCHEDULE_STRATEGY ) nowait
for (j = jjj_st1; j < jjj_end; ++j) {
/* code */
#ifdef PI_DEBUG
printf("scattering %d block column\n",j);
#endif
/* == processing each of the remaining columns == */
if(j==jjj_st) tempv1 = bigV;
else tempv1 = bigV + full_u_cols[j-1]*nbrow;
arrive_at_ublock( j,&iukp,&rukp,&jb,&ljb,&nsupc,
iukp0,rukp0,usub,perm_u,xsup,grid );
cum_nrow =0 ;
/* do update with the kth column of L and (k,j)th block of U */
lptr = lptr0;
luptr = luptr0;
for (lb = 0; lb < nlb; lb++) {
ib = lsub[lptr]; /* Row block L(i,k). */
temp_nbrow = lsub[lptr+1]; /* Number of full rows. */
assert(temp_nbrow<=nbrow);
lptr += LB_DESCRIPTOR; /* Skip descriptor. */
#ifdef DGEMM_STAT
if(j==jjj_st) {
temp_ncol = full_u_cols[j];
} else {
temp_ncol = full_u_cols[j]- full_u_cols[j-1];
}
printf("%d %d %d \n",temp_nbrow, temp_ncol,ldu);
#endif
/* Now gather the result into the destination block. */
if ( ib < jb ) { /* A(i,j) is in U. */
#ifdef PI_DEBUG
printf("gpu scatter \n");
printf("A(%d,%d) goes to U block %d \n", ib,jb,ljb);
#endif
tempv = tempv1+cum_nrow;
dscatter_u (
ib,jb,
nsupc,iukp,xsup,
klst,nbrow,
lptr,temp_nbrow,lsub,
usub,tempv,
Ufstnz_br_ptr,
Unzval_br_ptr,
grid
);
} else { /* A(i,j) is in L. */
#ifdef PI_DEBUG
printf("gpu scatter \n");
printf("A(%d,%d) goes to L block %d \n", ib,jb,ljb);
#endif
tempv = tempv1+cum_nrow;
dscatter_l (
ib, ljb,nsupc,iukp,xsup,klst,nbrow,lptr,
temp_nbrow,usub,lsub,tempv,
indirect_thread,indirect2_thread,
Lrowind_bc_ptr,Lnzval_bc_ptr,grid
);
} /* if ib < jb ... */
lptr += temp_nbrow;
luptr += temp_nbrow;
cum_nrow += temp_nbrow;
} /* for lb ... */
luptr=luptr0;
} /* for j = jjj_st ... */
} /* end for i = 0 to nstreams */
// TAU_STATIC_TIMER_STOP("GPU_SCATTER");
// TAU_STATIC_TIMER_STOP("INSIDE_OMP");
} /* end pragma omp parallel */
// TAU_STATIC_TIMER_STOP("OUTSIDE_OMP");
} /* end while(jjj<nub) */
} /* if nbrow>0 */
} /* if msg1 and msg 2 */
|
ipa-fnsummary.c | /* Function summary pass.
Copyright (C) 2003-2020 Free Software Foundation, Inc.
Contributed by Jan Hubicka
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
/* Analysis of function bodies used by inter-procedural passes
We estimate for each function
- function body size and size after specializing into given context
- average function execution time in a given context
- function frame size
For each call
- call statement size, time and how often the parameters change
ipa_fn_summary data structures store above information locally (i.e.
parameters of the function itself) and globally (i.e. parameters of
the function created by applying all the inline decisions already
present in the callgraph).
We provide access to the ipa_fn_summary data structure and
basic logic updating the parameters when inlining is performed.
The summaries are context sensitive. Context means
1) partial assignment of known constant values of operands
2) whether function is inlined into the call or not.
It is easy to add more variants. To represent function size and time
that depends on context (i.e. it is known to be optimized away when
context is known either by inlining or from IP-CP and cloning),
we use predicates.
estimate_edge_size_and_time can be used to query
function size/time in the given context. ipa_merge_fn_summary_after_inlining merges
properties of caller and callee after inlining.
Finally pass_inline_parameters is exported. This is used to drive
computation of function parameters used by the early inliner. IPA
inlined performs analysis via its analyze_function method. */
#include "config.h"
#define INCLUDE_VECTOR
#include "system.h"
#include "coretypes.h"
#include "backend.h"
#include "tree.h"
#include "gimple.h"
#include "alloc-pool.h"
#include "tree-pass.h"
#include "ssa.h"
#include "tree-streamer.h"
#include "cgraph.h"
#include "diagnostic.h"
#include "fold-const.h"
#include "print-tree.h"
#include "tree-inline.h"
#include "gimple-pretty-print.h"
#include "cfganal.h"
#include "gimple-iterator.h"
#include "tree-cfg.h"
#include "tree-ssa-loop-niter.h"
#include "tree-ssa-loop.h"
#include "symbol-summary.h"
#include "ipa-prop.h"
#include "ipa-fnsummary.h"
#include "cfgloop.h"
#include "tree-scalar-evolution.h"
#include "ipa-utils.h"
#include "cfgexpand.h"
#include "gimplify.h"
#include "stringpool.h"
#include "attribs.h"
#include "tree-into-ssa.h"
/* Summaries. */
fast_function_summary <ipa_fn_summary *, va_gc> *ipa_fn_summaries;
fast_function_summary <ipa_size_summary *, va_heap> *ipa_size_summaries;
fast_call_summary <ipa_call_summary *, va_heap> *ipa_call_summaries;
/* Edge predicates goes here. */
static object_allocator<predicate> edge_predicate_pool ("edge predicates");
/* Dump IPA hints. */
void
ipa_dump_hints (FILE *f, ipa_hints hints)
{
if (!hints)
return;
fprintf (f, "IPA hints:");
if (hints & INLINE_HINT_indirect_call)
{
hints &= ~INLINE_HINT_indirect_call;
fprintf (f, " indirect_call");
}
if (hints & INLINE_HINT_loop_iterations)
{
hints &= ~INLINE_HINT_loop_iterations;
fprintf (f, " loop_iterations");
}
if (hints & INLINE_HINT_loop_stride)
{
hints &= ~INLINE_HINT_loop_stride;
fprintf (f, " loop_stride");
}
if (hints & INLINE_HINT_same_scc)
{
hints &= ~INLINE_HINT_same_scc;
fprintf (f, " same_scc");
}
if (hints & INLINE_HINT_in_scc)
{
hints &= ~INLINE_HINT_in_scc;
fprintf (f, " in_scc");
}
if (hints & INLINE_HINT_cross_module)
{
hints &= ~INLINE_HINT_cross_module;
fprintf (f, " cross_module");
}
if (hints & INLINE_HINT_declared_inline)
{
hints &= ~INLINE_HINT_declared_inline;
fprintf (f, " declared_inline");
}
if (hints & INLINE_HINT_known_hot)
{
hints &= ~INLINE_HINT_known_hot;
fprintf (f, " known_hot");
}
gcc_assert (!hints);
}
/* Record SIZE and TIME to SUMMARY.
The accounted code will be executed when EXEC_PRED is true.
When NONCONST_PRED is false the code will evaluate to constant and
will get optimized out in specialized clones of the function.
If CALL is true account to call_size_time_table rather than
size_time_table. */
void
ipa_fn_summary::account_size_time (int size, sreal time,
const predicate &exec_pred,
const predicate &nonconst_pred_in,
bool call)
{
size_time_entry *e;
bool found = false;
int i;
predicate nonconst_pred;
vec<size_time_entry, va_gc> *table = call
? call_size_time_table : size_time_table;
if (exec_pred == false)
return;
nonconst_pred = nonconst_pred_in & exec_pred;
if (nonconst_pred == false)
return;
/* We need to create initial empty unconditional clause, but otherwise
we don't need to account empty times and sizes. */
if (!size && time == 0 && table)
return;
/* Only for calls we are unaccounting what we previously recorded. */
gcc_checking_assert (time >= 0 || call);
for (i = 0; vec_safe_iterate (table, i, &e); i++)
if (e->exec_predicate == exec_pred
&& e->nonconst_predicate == nonconst_pred)
{
found = true;
break;
}
if (i == max_size_time_table_size)
{
i = 0;
found = true;
e = &(*table)[0];
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file,
"\t\tReached limit on number of entries, "
"ignoring the predicate.");
}
if (dump_file && (dump_flags & TDF_DETAILS) && (time != 0 || size))
{
fprintf (dump_file,
"\t\tAccounting size:%3.2f, time:%3.2f on %spredicate exec:",
((double) size) / ipa_fn_summary::size_scale,
(time.to_double ()), found ? "" : "new ");
exec_pred.dump (dump_file, conds, 0);
if (exec_pred != nonconst_pred)
{
fprintf (dump_file, " nonconst:");
nonconst_pred.dump (dump_file, conds);
}
else
fprintf (dump_file, "\n");
}
if (!found)
{
class size_time_entry new_entry;
new_entry.size = size;
new_entry.time = time;
new_entry.exec_predicate = exec_pred;
new_entry.nonconst_predicate = nonconst_pred;
if (call)
vec_safe_push (call_size_time_table, new_entry);
else
vec_safe_push (size_time_table, new_entry);
}
else
{
e->size += size;
e->time += time;
/* FIXME: PR bootstrap/92653 gcc_checking_assert (e->time >= -1); */
/* Tolerate small roundoff issues. */
if (e->time < 0)
e->time = 0;
}
}
/* We proved E to be unreachable, redirect it to __builtin_unreachable. */
static struct cgraph_edge *
redirect_to_unreachable (struct cgraph_edge *e)
{
struct cgraph_node *callee = !e->inline_failed ? e->callee : NULL;
struct cgraph_node *target = cgraph_node::get_create
(builtin_decl_implicit (BUILT_IN_UNREACHABLE));
if (e->speculative)
e = cgraph_edge::resolve_speculation (e, target->decl);
else if (!e->callee)
e = cgraph_edge::make_direct (e, target);
else
e->redirect_callee (target);
class ipa_call_summary *es = ipa_call_summaries->get (e);
e->inline_failed = CIF_UNREACHABLE;
e->count = profile_count::zero ();
es->call_stmt_size = 0;
es->call_stmt_time = 0;
if (callee)
callee->remove_symbol_and_inline_clones ();
return e;
}
/* Set predicate for edge E. */
static void
edge_set_predicate (struct cgraph_edge *e, predicate *predicate)
{
/* If the edge is determined to be never executed, redirect it
to BUILTIN_UNREACHABLE to make it clear to IPA passes the call will
be optimized out. */
if (predicate && *predicate == false
/* When handling speculative edges, we need to do the redirection
just once. Do it always on the direct edge, so we do not
attempt to resolve speculation while duplicating the edge. */
&& (!e->speculative || e->callee))
e = redirect_to_unreachable (e);
class ipa_call_summary *es = ipa_call_summaries->get (e);
if (predicate && *predicate != true)
{
if (!es->predicate)
es->predicate = edge_predicate_pool.allocate ();
*es->predicate = *predicate;
}
else
{
if (es->predicate)
edge_predicate_pool.remove (es->predicate);
es->predicate = NULL;
}
}
/* Set predicate for hint *P. */
static void
set_hint_predicate (predicate **p, predicate new_predicate)
{
if (new_predicate == false || new_predicate == true)
{
if (*p)
edge_predicate_pool.remove (*p);
*p = NULL;
}
else
{
if (!*p)
*p = edge_predicate_pool.allocate ();
**p = new_predicate;
}
}
/* Find if NEW_PREDICATE is already in V and if so, increment its freq.
Otherwise add a new item to the vector with this predicate and frerq equal
to add_freq, unless the number of predicates would exceed MAX_NUM_PREDICATES
in which case the function does nothing. */
static void
add_freqcounting_predicate (vec<ipa_freqcounting_predicate, va_gc> **v,
const predicate &new_predicate, sreal add_freq,
unsigned max_num_predicates)
{
if (new_predicate == false || new_predicate == true)
return;
ipa_freqcounting_predicate *f;
for (int i = 0; vec_safe_iterate (*v, i, &f); i++)
if (new_predicate == f->predicate)
{
f->freq += add_freq;
return;
}
if (vec_safe_length (*v) >= max_num_predicates)
/* Too many different predicates to account for. */
return;
ipa_freqcounting_predicate fcp;
fcp.predicate = NULL;
set_hint_predicate (&fcp.predicate, new_predicate);
fcp.freq = add_freq;
vec_safe_push (*v, fcp);
return;
}
/* Compute what conditions may or may not hold given information about
parameters. RET_CLAUSE returns truths that may hold in a specialized copy,
while RET_NONSPEC_CLAUSE returns truths that may hold in an nonspecialized
copy when called in a given context. It is a bitmask of conditions. Bit
0 means that condition is known to be false, while bit 1 means that condition
may or may not be true. These differs - for example NOT_INLINED condition
is always false in the second and also builtin_constant_p tests cannot use
the fact that parameter is indeed a constant.
When INLINE_P is true, assume that we are inlining. AVAL contains known
information about argument values. The function does not modify its content
and so AVALs could also be of type ipa_call_arg_values but so far all
callers work with the auto version and so we avoid the conversion for
convenience.
ERROR_MARK value of an argument means compile time invariant. */
static void
evaluate_conditions_for_known_args (struct cgraph_node *node,
bool inline_p,
ipa_auto_call_arg_values *avals,
clause_t *ret_clause,
clause_t *ret_nonspec_clause)
{
clause_t clause = inline_p ? 0 : 1 << predicate::not_inlined_condition;
clause_t nonspec_clause = 1 << predicate::not_inlined_condition;
class ipa_fn_summary *info = ipa_fn_summaries->get (node);
int i;
struct condition *c;
for (i = 0; vec_safe_iterate (info->conds, i, &c); i++)
{
tree val = NULL;
tree res;
int j;
struct expr_eval_op *op;
/* We allow call stmt to have fewer arguments than the callee function
(especially for K&R style programs). So bound check here (we assume
m_known_aggs vector is either empty or has the same length as
m_known_vals). */
gcc_checking_assert (!avals->m_known_aggs.length ()
|| !avals->m_known_vals.length ()
|| (avals->m_known_vals.length ()
== avals->m_known_aggs.length ()));
if (c->agg_contents)
{
if (c->code == predicate::changed
&& !c->by_ref
&& (avals->safe_sval_at(c->operand_num) == error_mark_node))
continue;
if (ipa_agg_value_set *agg = avals->safe_aggval_at (c->operand_num))
{
tree sval = avals->safe_sval_at (c->operand_num);
val = ipa_find_agg_cst_for_param (agg, sval, c->offset,
c->by_ref);
}
else
val = NULL_TREE;
}
else
{
val = avals->safe_sval_at (c->operand_num);
if (val && val == error_mark_node && c->code != predicate::changed)
val = NULL_TREE;
}
if (!val
&& (c->code == predicate::changed
|| c->code == predicate::is_not_constant))
{
clause |= 1 << (i + predicate::first_dynamic_condition);
nonspec_clause |= 1 << (i + predicate::first_dynamic_condition);
continue;
}
if (c->code == predicate::changed)
{
nonspec_clause |= 1 << (i + predicate::first_dynamic_condition);
continue;
}
if (c->code == predicate::is_not_constant)
{
nonspec_clause |= 1 << (i + predicate::first_dynamic_condition);
continue;
}
if (val && TYPE_SIZE (c->type) == TYPE_SIZE (TREE_TYPE (val)))
{
if (c->type != TREE_TYPE (val))
val = fold_unary (VIEW_CONVERT_EXPR, c->type, val);
for (j = 0; vec_safe_iterate (c->param_ops, j, &op); j++)
{
if (!val)
break;
if (!op->val[0])
val = fold_unary (op->code, op->type, val);
else if (!op->val[1])
val = fold_binary (op->code, op->type,
op->index ? op->val[0] : val,
op->index ? val : op->val[0]);
else if (op->index == 0)
val = fold_ternary (op->code, op->type,
val, op->val[0], op->val[1]);
else if (op->index == 1)
val = fold_ternary (op->code, op->type,
op->val[0], val, op->val[1]);
else if (op->index == 2)
val = fold_ternary (op->code, op->type,
op->val[0], op->val[1], val);
else
val = NULL_TREE;
}
res = val
? fold_binary_to_constant (c->code, boolean_type_node, val, c->val)
: NULL;
if (res && integer_zerop (res))
continue;
if (res && integer_onep (res))
{
clause |= 1 << (i + predicate::first_dynamic_condition);
nonspec_clause |= 1 << (i + predicate::first_dynamic_condition);
continue;
}
}
if (c->operand_num < (int) avals->m_known_value_ranges.length ()
&& !c->agg_contents
&& (!val || TREE_CODE (val) != INTEGER_CST))
{
value_range vr = avals->m_known_value_ranges[c->operand_num];
if (!vr.undefined_p ()
&& !vr.varying_p ()
&& (TYPE_SIZE (c->type) == TYPE_SIZE (vr.type ())))
{
if (!useless_type_conversion_p (c->type, vr.type ()))
{
value_range res;
range_fold_unary_expr (&res, NOP_EXPR,
c->type, &vr, vr.type ());
vr = res;
}
tree type = c->type;
for (j = 0; vec_safe_iterate (c->param_ops, j, &op); j++)
{
if (vr.varying_p () || vr.undefined_p ())
break;
value_range res;
if (!op->val[0])
range_fold_unary_expr (&res, op->code, op->type, &vr, type);
else if (!op->val[1])
{
value_range op0 (op->val[0], op->val[0]);
range_fold_binary_expr (&res, op->code, op->type,
op->index ? &op0 : &vr,
op->index ? &vr : &op0);
}
else
gcc_unreachable ();
type = op->type;
vr = res;
}
if (!vr.varying_p () && !vr.undefined_p ())
{
value_range res;
value_range val_vr (c->val, c->val);
range_fold_binary_expr (&res, c->code, boolean_type_node,
&vr,
&val_vr);
if (res.zero_p ())
continue;
}
}
}
clause |= 1 << (i + predicate::first_dynamic_condition);
nonspec_clause |= 1 << (i + predicate::first_dynamic_condition);
}
*ret_clause = clause;
if (ret_nonspec_clause)
*ret_nonspec_clause = nonspec_clause;
}
/* Return true if VRP will be exectued on the function.
We do not want to anticipate optimizations that will not happen.
FIXME: This can be confused with -fdisable and debug counters and thus
it should not be used for correctness (only to make heuristics work).
This means that inliner should do its own optimizations of expressions
that it predicts to be constant so wrong code can not be triggered by
builtin_constant_p. */
static bool
vrp_will_run_p (struct cgraph_node *node)
{
return (opt_for_fn (node->decl, optimize)
&& !opt_for_fn (node->decl, optimize_debug)
&& opt_for_fn (node->decl, flag_tree_vrp));
}
/* Similarly about FRE. */
static bool
fre_will_run_p (struct cgraph_node *node)
{
return (opt_for_fn (node->decl, optimize)
&& !opt_for_fn (node->decl, optimize_debug)
&& opt_for_fn (node->decl, flag_tree_fre));
}
/* Work out what conditions might be true at invocation of E.
Compute costs for inlined edge if INLINE_P is true.
Return in CLAUSE_PTR the evaluated conditions and in NONSPEC_CLAUSE_PTR
(if non-NULL) conditions evaluated for nonspecialized clone called
in a given context.
Vectors in AVALS will be populated with useful known information about
argument values - information not known to have any uses will be omitted -
except for m_known_contexts which will only be calculated if
COMPUTE_CONTEXTS is true. */
void
evaluate_properties_for_edge (struct cgraph_edge *e, bool inline_p,
clause_t *clause_ptr,
clause_t *nonspec_clause_ptr,
ipa_auto_call_arg_values *avals,
bool compute_contexts)
{
struct cgraph_node *callee = e->callee->ultimate_alias_target ();
class ipa_fn_summary *info = ipa_fn_summaries->get (callee);
class ipa_edge_args *args;
if (clause_ptr)
*clause_ptr = inline_p ? 0 : 1 << predicate::not_inlined_condition;
if (ipa_node_params_sum
&& !e->call_stmt_cannot_inline_p
&& (info->conds || compute_contexts)
&& (args = IPA_EDGE_REF (e)) != NULL)
{
struct cgraph_node *caller;
class ipa_node_params *caller_parms_info, *callee_pi = NULL;
class ipa_call_summary *es = ipa_call_summaries->get (e);
int i, count = ipa_get_cs_argument_count (args);
if (count)
{
if (e->caller->inlined_to)
caller = e->caller->inlined_to;
else
caller = e->caller;
caller_parms_info = IPA_NODE_REF (caller);
callee_pi = IPA_NODE_REF (callee);
/* Watch for thunks. */
if (callee_pi)
/* Watch for variadic functions. */
count = MIN (count, ipa_get_param_count (callee_pi));
}
if (callee_pi)
for (i = 0; i < count; i++)
{
struct ipa_jump_func *jf = ipa_get_ith_jump_func (args, i);
if (ipa_is_param_used_by_indirect_call (callee_pi, i)
|| ipa_is_param_used_by_ipa_predicates (callee_pi, i))
{
/* Determine if we know constant value of the parameter. */
tree cst = ipa_value_from_jfunc (caller_parms_info, jf,
ipa_get_type (callee_pi, i));
if (!cst && e->call_stmt
&& i < (int)gimple_call_num_args (e->call_stmt))
{
cst = gimple_call_arg (e->call_stmt, i);
if (!is_gimple_min_invariant (cst))
cst = NULL;
}
if (cst)
{
gcc_checking_assert (TREE_CODE (cst) != TREE_BINFO);
if (!avals->m_known_vals.length ())
avals->m_known_vals.safe_grow_cleared (count, true);
avals->m_known_vals[i] = cst;
}
else if (inline_p && !es->param[i].change_prob)
{
if (!avals->m_known_vals.length ())
avals->m_known_vals.safe_grow_cleared (count, true);
avals->m_known_vals[i] = error_mark_node;
}
/* If we failed to get simple constant, try value range. */
if ((!cst || TREE_CODE (cst) != INTEGER_CST)
&& vrp_will_run_p (caller)
&& ipa_is_param_used_by_ipa_predicates (callee_pi, i))
{
value_range vr
= ipa_value_range_from_jfunc (caller_parms_info, e, jf,
ipa_get_type (callee_pi,
i));
if (!vr.undefined_p () && !vr.varying_p ())
{
if (!avals->m_known_value_ranges.length ())
{
avals->m_known_value_ranges.safe_grow (count, true);
for (int i = 0; i < count; ++i)
new (&avals->m_known_value_ranges[i])
value_range ();
}
avals->m_known_value_ranges[i] = vr;
}
}
/* Determine known aggregate values. */
if (fre_will_run_p (caller))
{
ipa_agg_value_set agg
= ipa_agg_value_set_from_jfunc (caller_parms_info,
caller, &jf->agg);
if (agg.items.length ())
{
if (!avals->m_known_aggs.length ())
avals->m_known_aggs.safe_grow_cleared (count, true);
avals->m_known_aggs[i] = agg;
}
}
}
/* For calls used in polymorphic calls we further determine
polymorphic call context. */
if (compute_contexts
&& ipa_is_param_used_by_polymorphic_call (callee_pi, i))
{
ipa_polymorphic_call_context
ctx = ipa_context_from_jfunc (caller_parms_info, e, i, jf);
if (!ctx.useless_p ())
{
if (!avals->m_known_contexts.length ())
avals->m_known_contexts.safe_grow_cleared (count, true);
avals->m_known_contexts[i]
= ipa_context_from_jfunc (caller_parms_info, e, i, jf);
}
}
}
else
gcc_assert (!count || callee->thunk.thunk_p);
}
else if (e->call_stmt && !e->call_stmt_cannot_inline_p && info->conds)
{
int i, count = (int)gimple_call_num_args (e->call_stmt);
for (i = 0; i < count; i++)
{
tree cst = gimple_call_arg (e->call_stmt, i);
if (!is_gimple_min_invariant (cst))
cst = NULL;
if (cst)
{
if (!avals->m_known_vals.length ())
avals->m_known_vals.safe_grow_cleared (count, true);
avals->m_known_vals[i] = cst;
}
}
}
evaluate_conditions_for_known_args (callee, inline_p, avals, clause_ptr,
nonspec_clause_ptr);
}
/* Allocate the function summary. */
static void
ipa_fn_summary_alloc (void)
{
gcc_checking_assert (!ipa_fn_summaries);
ipa_size_summaries = new ipa_size_summary_t (symtab);
ipa_fn_summaries = ipa_fn_summary_t::create_ggc (symtab);
ipa_call_summaries = new ipa_call_summary_t (symtab);
}
ipa_call_summary::~ipa_call_summary ()
{
if (predicate)
edge_predicate_pool.remove (predicate);
param.release ();
}
ipa_fn_summary::~ipa_fn_summary ()
{
unsigned len = vec_safe_length (loop_iterations);
for (unsigned i = 0; i < len; i++)
edge_predicate_pool.remove ((*loop_iterations)[i].predicate);
len = vec_safe_length (loop_strides);
for (unsigned i = 0; i < len; i++)
edge_predicate_pool.remove ((*loop_strides)[i].predicate);
vec_free (conds);
vec_free (size_time_table);
vec_free (call_size_time_table);
vec_free (loop_iterations);
vec_free (loop_strides);
}
void
ipa_fn_summary_t::remove_callees (cgraph_node *node)
{
cgraph_edge *e;
for (e = node->callees; e; e = e->next_callee)
ipa_call_summaries->remove (e);
for (e = node->indirect_calls; e; e = e->next_callee)
ipa_call_summaries->remove (e);
}
/* Duplicate predicates in loop hint vector, allocating memory for them and
remove and deallocate any uninteresting (true or false) ones. Return the
result. */
static vec<ipa_freqcounting_predicate, va_gc> *
remap_freqcounting_preds_after_dup (vec<ipa_freqcounting_predicate, va_gc> *v,
clause_t possible_truths)
{
if (vec_safe_length (v) == 0)
return NULL;
vec<ipa_freqcounting_predicate, va_gc> *res = v->copy ();
int len = res->length();
for (int i = len - 1; i >= 0; i--)
{
predicate new_predicate
= (*res)[i].predicate->remap_after_duplication (possible_truths);
/* We do not want to free previous predicate; it is used by node
origin. */
(*res)[i].predicate = NULL;
set_hint_predicate (&(*res)[i].predicate, new_predicate);
if (!(*res)[i].predicate)
res->unordered_remove (i);
}
return res;
}
/* Hook that is called by cgraph.c when a node is duplicated. */
void
ipa_fn_summary_t::duplicate (cgraph_node *src,
cgraph_node *dst,
ipa_fn_summary *,
ipa_fn_summary *info)
{
new (info) ipa_fn_summary (*ipa_fn_summaries->get (src));
/* TODO: as an optimization, we may avoid copying conditions
that are known to be false or true. */
info->conds = vec_safe_copy (info->conds);
/* When there are any replacements in the function body, see if we can figure
out that something was optimized out. */
if (ipa_node_params_sum && dst->clone.tree_map)
{
vec<size_time_entry, va_gc> *entry = info->size_time_table;
/* Use SRC parm info since it may not be copied yet. */
class ipa_node_params *parms_info = IPA_NODE_REF (src);
ipa_auto_call_arg_values avals;
int count = ipa_get_param_count (parms_info);
int i, j;
clause_t possible_truths;
predicate true_pred = true;
size_time_entry *e;
int optimized_out_size = 0;
bool inlined_to_p = false;
struct cgraph_edge *edge, *next;
info->size_time_table = 0;
avals.m_known_vals.safe_grow_cleared (count, true);
for (i = 0; i < count; i++)
{
struct ipa_replace_map *r;
for (j = 0; vec_safe_iterate (dst->clone.tree_map, j, &r); j++)
{
if (r->parm_num == i)
{
avals.m_known_vals[i] = r->new_tree;
break;
}
}
}
evaluate_conditions_for_known_args (dst, false,
&avals,
&possible_truths,
/* We are going to specialize,
so ignore nonspec truths. */
NULL);
info->account_size_time (0, 0, true_pred, true_pred);
/* Remap size_time vectors.
Simplify the predicate by pruning out alternatives that are known
to be false.
TODO: as on optimization, we can also eliminate conditions known
to be true. */
for (i = 0; vec_safe_iterate (entry, i, &e); i++)
{
predicate new_exec_pred;
predicate new_nonconst_pred;
new_exec_pred = e->exec_predicate.remap_after_duplication
(possible_truths);
new_nonconst_pred = e->nonconst_predicate.remap_after_duplication
(possible_truths);
if (new_exec_pred == false || new_nonconst_pred == false)
optimized_out_size += e->size;
else
info->account_size_time (e->size, e->time, new_exec_pred,
new_nonconst_pred);
}
/* Remap edge predicates with the same simplification as above.
Also copy constantness arrays. */
for (edge = dst->callees; edge; edge = next)
{
predicate new_predicate;
class ipa_call_summary *es = ipa_call_summaries->get (edge);
next = edge->next_callee;
if (!edge->inline_failed)
inlined_to_p = true;
if (!es->predicate)
continue;
new_predicate = es->predicate->remap_after_duplication
(possible_truths);
if (new_predicate == false && *es->predicate != false)
optimized_out_size += es->call_stmt_size * ipa_fn_summary::size_scale;
edge_set_predicate (edge, &new_predicate);
}
/* Remap indirect edge predicates with the same simplification as above.
Also copy constantness arrays. */
for (edge = dst->indirect_calls; edge; edge = next)
{
predicate new_predicate;
class ipa_call_summary *es = ipa_call_summaries->get (edge);
next = edge->next_callee;
gcc_checking_assert (edge->inline_failed);
if (!es->predicate)
continue;
new_predicate = es->predicate->remap_after_duplication
(possible_truths);
if (new_predicate == false && *es->predicate != false)
optimized_out_size += es->call_stmt_size * ipa_fn_summary::size_scale;
edge_set_predicate (edge, &new_predicate);
}
info->loop_iterations
= remap_freqcounting_preds_after_dup (info->loop_iterations,
possible_truths);
info->loop_strides
= remap_freqcounting_preds_after_dup (info->loop_strides,
possible_truths);
/* If inliner or someone after inliner will ever start producing
non-trivial clones, we will get trouble with lack of information
about updating self sizes, because size vectors already contains
sizes of the callees. */
gcc_assert (!inlined_to_p || !optimized_out_size);
}
else
{
info->size_time_table = vec_safe_copy (info->size_time_table);
info->loop_iterations = vec_safe_copy (info->loop_iterations);
info->loop_strides = vec_safe_copy (info->loop_strides);
ipa_freqcounting_predicate *f;
for (int i = 0; vec_safe_iterate (info->loop_iterations, i, &f); i++)
{
predicate p = *f->predicate;
f->predicate = NULL;
set_hint_predicate (&f->predicate, p);
}
for (int i = 0; vec_safe_iterate (info->loop_strides, i, &f); i++)
{
predicate p = *f->predicate;
f->predicate = NULL;
set_hint_predicate (&f->predicate, p);
}
}
if (!dst->inlined_to)
ipa_update_overall_fn_summary (dst);
}
/* Hook that is called by cgraph.c when a node is duplicated. */
void
ipa_call_summary_t::duplicate (struct cgraph_edge *src,
struct cgraph_edge *dst,
class ipa_call_summary *srcinfo,
class ipa_call_summary *info)
{
new (info) ipa_call_summary (*srcinfo);
info->predicate = NULL;
edge_set_predicate (dst, srcinfo->predicate);
info->param = srcinfo->param.copy ();
if (!dst->indirect_unknown_callee && src->indirect_unknown_callee)
{
info->call_stmt_size -= (eni_size_weights.indirect_call_cost
- eni_size_weights.call_cost);
info->call_stmt_time -= (eni_time_weights.indirect_call_cost
- eni_time_weights.call_cost);
}
}
/* Dump edge summaries associated to NODE and recursively to all clones.
Indent by INDENT. */
static void
dump_ipa_call_summary (FILE *f, int indent, struct cgraph_node *node,
class ipa_fn_summary *info)
{
struct cgraph_edge *edge;
for (edge = node->callees; edge; edge = edge->next_callee)
{
class ipa_call_summary *es = ipa_call_summaries->get (edge);
struct cgraph_node *callee = edge->callee->ultimate_alias_target ();
int i;
fprintf (f,
"%*s%s %s\n%*s freq:%4.2f",
indent, "", callee->dump_name (),
!edge->inline_failed
? "inlined" : cgraph_inline_failed_string (edge-> inline_failed),
indent, "", edge->sreal_frequency ().to_double ());
if (cross_module_call_p (edge))
fprintf (f, " cross module");
if (es)
fprintf (f, " loop depth:%2i size:%2i time: %2i",
es->loop_depth, es->call_stmt_size, es->call_stmt_time);
ipa_fn_summary *s = ipa_fn_summaries->get (callee);
ipa_size_summary *ss = ipa_size_summaries->get (callee);
if (s != NULL)
fprintf (f, " callee size:%2i stack:%2i",
(int) (ss->size / ipa_fn_summary::size_scale),
(int) s->estimated_stack_size);
if (es && es->predicate)
{
fprintf (f, " predicate: ");
es->predicate->dump (f, info->conds);
}
else
fprintf (f, "\n");
if (es && es->param.exists ())
for (i = 0; i < (int) es->param.length (); i++)
{
int prob = es->param[i].change_prob;
if (!prob)
fprintf (f, "%*s op%i is compile time invariant\n",
indent + 2, "", i);
else if (prob != REG_BR_PROB_BASE)
fprintf (f, "%*s op%i change %f%% of time\n", indent + 2, "", i,
prob * 100.0 / REG_BR_PROB_BASE);
if (es->param[i].points_to_local_or_readonly_memory)
fprintf (f, "%*s op%i points to local or readonly memory\n",
indent + 2, "", i);
}
if (!edge->inline_failed)
{
ipa_size_summary *ss = ipa_size_summaries->get (callee);
fprintf (f, "%*sStack frame offset %i, callee self size %i\n",
indent + 2, "",
(int) ipa_get_stack_frame_offset (callee),
(int) ss->estimated_self_stack_size);
dump_ipa_call_summary (f, indent + 2, callee, info);
}
}
for (edge = node->indirect_calls; edge; edge = edge->next_callee)
{
class ipa_call_summary *es = ipa_call_summaries->get (edge);
fprintf (f, "%*sindirect call loop depth:%2i freq:%4.2f size:%2i"
" time: %2i",
indent, "",
es->loop_depth,
edge->sreal_frequency ().to_double (), es->call_stmt_size,
es->call_stmt_time);
if (es->predicate)
{
fprintf (f, "predicate: ");
es->predicate->dump (f, info->conds);
}
else
fprintf (f, "\n");
}
}
void
ipa_dump_fn_summary (FILE *f, struct cgraph_node *node)
{
if (node->definition)
{
class ipa_fn_summary *s = ipa_fn_summaries->get (node);
class ipa_size_summary *ss = ipa_size_summaries->get (node);
if (s != NULL)
{
size_time_entry *e;
int i;
fprintf (f, "IPA function summary for %s", node->dump_name ());
if (DECL_DISREGARD_INLINE_LIMITS (node->decl))
fprintf (f, " always_inline");
if (s->inlinable)
fprintf (f, " inlinable");
if (s->fp_expressions)
fprintf (f, " fp_expression");
fprintf (f, "\n global time: %f\n", s->time.to_double ());
fprintf (f, " self size: %i\n", ss->self_size);
fprintf (f, " global size: %i\n", ss->size);
fprintf (f, " min size: %i\n", s->min_size);
fprintf (f, " self stack: %i\n",
(int) ss->estimated_self_stack_size);
fprintf (f, " global stack: %i\n", (int) s->estimated_stack_size);
if (s->growth)
fprintf (f, " estimated growth:%i\n", (int) s->growth);
if (s->scc_no)
fprintf (f, " In SCC: %i\n", (int) s->scc_no);
for (i = 0; vec_safe_iterate (s->size_time_table, i, &e); i++)
{
fprintf (f, " size:%f, time:%f",
(double) e->size / ipa_fn_summary::size_scale,
e->time.to_double ());
if (e->exec_predicate != true)
{
fprintf (f, ", executed if:");
e->exec_predicate.dump (f, s->conds, 0);
}
if (e->exec_predicate != e->nonconst_predicate)
{
fprintf (f, ", nonconst if:");
e->nonconst_predicate.dump (f, s->conds, 0);
}
fprintf (f, "\n");
}
ipa_freqcounting_predicate *fcp;
bool first_fcp = true;
for (int i = 0; vec_safe_iterate (s->loop_iterations, i, &fcp); i++)
{
if (first_fcp)
{
fprintf (f, " loop iterations:");
first_fcp = false;
}
fprintf (f, " %3.2f for ", fcp->freq.to_double ());
fcp->predicate->dump (f, s->conds);
}
first_fcp = true;
for (int i = 0; vec_safe_iterate (s->loop_strides, i, &fcp); i++)
{
if (first_fcp)
{
fprintf (f, " loop strides:");
first_fcp = false;
}
fprintf (f, " %3.2f for :", fcp->freq.to_double ());
fcp->predicate->dump (f, s->conds);
}
fprintf (f, " calls:\n");
dump_ipa_call_summary (f, 4, node, s);
fprintf (f, "\n");
}
else
fprintf (f, "IPA summary for %s is missing.\n", node->dump_name ());
}
}
DEBUG_FUNCTION void
ipa_debug_fn_summary (struct cgraph_node *node)
{
ipa_dump_fn_summary (stderr, node);
}
void
ipa_dump_fn_summaries (FILE *f)
{
struct cgraph_node *node;
FOR_EACH_DEFINED_FUNCTION (node)
if (!node->inlined_to)
ipa_dump_fn_summary (f, node);
}
/* Callback of walk_aliased_vdefs. Flags that it has been invoked to the
boolean variable pointed to by DATA. */
static bool
mark_modified (ao_ref *ao ATTRIBUTE_UNUSED, tree vdef ATTRIBUTE_UNUSED,
void *data)
{
bool *b = (bool *) data;
*b = true;
return true;
}
/* If OP refers to value of function parameter, return the corresponding
parameter. If non-NULL, the size of the memory load (or the SSA_NAME of the
PARM_DECL) will be stored to *SIZE_P in that case too. */
static tree
unmodified_parm_1 (ipa_func_body_info *fbi, gimple *stmt, tree op,
poly_int64 *size_p)
{
/* SSA_NAME referring to parm default def? */
if (TREE_CODE (op) == SSA_NAME
&& SSA_NAME_IS_DEFAULT_DEF (op)
&& TREE_CODE (SSA_NAME_VAR (op)) == PARM_DECL)
{
if (size_p)
*size_p = tree_to_poly_int64 (TYPE_SIZE (TREE_TYPE (op)));
return SSA_NAME_VAR (op);
}
/* Non-SSA parm reference? */
if (TREE_CODE (op) == PARM_DECL)
{
bool modified = false;
ao_ref refd;
ao_ref_init (&refd, op);
int walked = walk_aliased_vdefs (&refd, gimple_vuse (stmt),
mark_modified, &modified, NULL, NULL,
fbi->aa_walk_budget + 1);
if (walked < 0)
{
fbi->aa_walk_budget = 0;
return NULL_TREE;
}
if (!modified)
{
if (size_p)
*size_p = tree_to_poly_int64 (TYPE_SIZE (TREE_TYPE (op)));
return op;
}
}
return NULL_TREE;
}
/* If OP refers to value of function parameter, return the corresponding
parameter. Also traverse chains of SSA register assignments. If non-NULL,
the size of the memory load (or the SSA_NAME of the PARM_DECL) will be
stored to *SIZE_P in that case too. */
static tree
unmodified_parm (ipa_func_body_info *fbi, gimple *stmt, tree op,
poly_int64 *size_p)
{
tree res = unmodified_parm_1 (fbi, stmt, op, size_p);
if (res)
return res;
if (TREE_CODE (op) == SSA_NAME
&& !SSA_NAME_IS_DEFAULT_DEF (op)
&& gimple_assign_single_p (SSA_NAME_DEF_STMT (op)))
return unmodified_parm (fbi, SSA_NAME_DEF_STMT (op),
gimple_assign_rhs1 (SSA_NAME_DEF_STMT (op)),
size_p);
return NULL_TREE;
}
/* If OP refers to a value of a function parameter or value loaded from an
aggregate passed to a parameter (either by value or reference), return TRUE
and store the number of the parameter to *INDEX_P, the access size into
*SIZE_P, and information whether and how it has been loaded from an
aggregate into *AGGPOS. INFO describes the function parameters, STMT is the
statement in which OP is used or loaded. */
static bool
unmodified_parm_or_parm_agg_item (struct ipa_func_body_info *fbi,
gimple *stmt, tree op, int *index_p,
poly_int64 *size_p,
struct agg_position_info *aggpos)
{
tree res = unmodified_parm_1 (fbi, stmt, op, size_p);
gcc_checking_assert (aggpos);
if (res)
{
*index_p = ipa_get_param_decl_index (fbi->info, res);
if (*index_p < 0)
return false;
aggpos->agg_contents = false;
aggpos->by_ref = false;
return true;
}
if (TREE_CODE (op) == SSA_NAME)
{
if (SSA_NAME_IS_DEFAULT_DEF (op)
|| !gimple_assign_single_p (SSA_NAME_DEF_STMT (op)))
return false;
stmt = SSA_NAME_DEF_STMT (op);
op = gimple_assign_rhs1 (stmt);
if (!REFERENCE_CLASS_P (op))
return unmodified_parm_or_parm_agg_item (fbi, stmt, op, index_p, size_p,
aggpos);
}
aggpos->agg_contents = true;
return ipa_load_from_parm_agg (fbi, fbi->info->descriptors,
stmt, op, index_p, &aggpos->offset,
size_p, &aggpos->by_ref);
}
/* See if statement might disappear after inlining.
0 - means not eliminated
1 - half of statements goes away
2 - for sure it is eliminated.
We are not terribly sophisticated, basically looking for simple abstraction
penalty wrappers. */
static int
eliminated_by_inlining_prob (ipa_func_body_info *fbi, gimple *stmt)
{
enum gimple_code code = gimple_code (stmt);
enum tree_code rhs_code;
if (!optimize)
return 0;
switch (code)
{
case GIMPLE_RETURN:
return 2;
case GIMPLE_ASSIGN:
if (gimple_num_ops (stmt) != 2)
return 0;
rhs_code = gimple_assign_rhs_code (stmt);
/* Casts of parameters, loads from parameters passed by reference
and stores to return value or parameters are often free after
inlining due to SRA and further combining.
Assume that half of statements goes away. */
if (CONVERT_EXPR_CODE_P (rhs_code)
|| rhs_code == VIEW_CONVERT_EXPR
|| rhs_code == ADDR_EXPR
|| gimple_assign_rhs_class (stmt) == GIMPLE_SINGLE_RHS)
{
tree rhs = gimple_assign_rhs1 (stmt);
tree lhs = gimple_assign_lhs (stmt);
tree inner_rhs = get_base_address (rhs);
tree inner_lhs = get_base_address (lhs);
bool rhs_free = false;
bool lhs_free = false;
if (!inner_rhs)
inner_rhs = rhs;
if (!inner_lhs)
inner_lhs = lhs;
/* Reads of parameter are expected to be free. */
if (unmodified_parm (fbi, stmt, inner_rhs, NULL))
rhs_free = true;
/* Match expressions of form &this->field. Those will most likely
combine with something upstream after inlining. */
else if (TREE_CODE (inner_rhs) == ADDR_EXPR)
{
tree op = get_base_address (TREE_OPERAND (inner_rhs, 0));
if (TREE_CODE (op) == PARM_DECL)
rhs_free = true;
else if (TREE_CODE (op) == MEM_REF
&& unmodified_parm (fbi, stmt, TREE_OPERAND (op, 0),
NULL))
rhs_free = true;
}
/* When parameter is not SSA register because its address is taken
and it is just copied into one, the statement will be completely
free after inlining (we will copy propagate backward). */
if (rhs_free && is_gimple_reg (lhs))
return 2;
/* Reads of parameters passed by reference
expected to be free (i.e. optimized out after inlining). */
if (TREE_CODE (inner_rhs) == MEM_REF
&& unmodified_parm (fbi, stmt, TREE_OPERAND (inner_rhs, 0), NULL))
rhs_free = true;
/* Copying parameter passed by reference into gimple register is
probably also going to copy propagate, but we can't be quite
sure. */
if (rhs_free && is_gimple_reg (lhs))
lhs_free = true;
/* Writes to parameters, parameters passed by value and return value
(either directly or passed via invisible reference) are free.
TODO: We ought to handle testcase like
struct a {int a,b;};
struct a
returnstruct (void)
{
struct a a ={1,2};
return a;
}
This translate into:
returnstruct ()
{
int a$b;
int a$a;
struct a a;
struct a D.2739;
<bb 2>:
D.2739.a = 1;
D.2739.b = 2;
return D.2739;
}
For that we either need to copy ipa-split logic detecting writes
to return value. */
if (TREE_CODE (inner_lhs) == PARM_DECL
|| TREE_CODE (inner_lhs) == RESULT_DECL
|| (TREE_CODE (inner_lhs) == MEM_REF
&& (unmodified_parm (fbi, stmt, TREE_OPERAND (inner_lhs, 0),
NULL)
|| (TREE_CODE (TREE_OPERAND (inner_lhs, 0)) == SSA_NAME
&& SSA_NAME_VAR (TREE_OPERAND (inner_lhs, 0))
&& TREE_CODE (SSA_NAME_VAR (TREE_OPERAND
(inner_lhs,
0))) == RESULT_DECL))))
lhs_free = true;
if (lhs_free
&& (is_gimple_reg (rhs) || is_gimple_min_invariant (rhs)))
rhs_free = true;
if (lhs_free && rhs_free)
return 1;
}
return 0;
default:
return 0;
}
}
/* Analyze EXPR if it represents a series of simple operations performed on
a function parameter and return true if so. FBI, STMT, EXPR, INDEX_P and
AGGPOS have the same meaning like in unmodified_parm_or_parm_agg_item.
Type of the parameter or load from an aggregate via the parameter is
stored in *TYPE_P. Operations on the parameter are recorded to
PARAM_OPS_P if it is not NULL. */
static bool
decompose_param_expr (struct ipa_func_body_info *fbi,
gimple *stmt, tree expr,
int *index_p, tree *type_p,
struct agg_position_info *aggpos,
expr_eval_ops *param_ops_p = NULL)
{
int op_limit = opt_for_fn (fbi->node->decl, param_ipa_max_param_expr_ops);
int op_count = 0;
if (param_ops_p)
*param_ops_p = NULL;
while (true)
{
expr_eval_op eval_op;
unsigned rhs_count;
unsigned cst_count = 0;
if (unmodified_parm_or_parm_agg_item (fbi, stmt, expr, index_p, NULL,
aggpos))
{
tree type = TREE_TYPE (expr);
if (aggpos->agg_contents)
{
/* Stop if containing bit-field. */
if (TREE_CODE (expr) == BIT_FIELD_REF
|| contains_bitfld_component_ref_p (expr))
break;
}
*type_p = type;
return true;
}
if (TREE_CODE (expr) != SSA_NAME || SSA_NAME_IS_DEFAULT_DEF (expr))
break;
if (!is_gimple_assign (stmt = SSA_NAME_DEF_STMT (expr)))
break;
switch (gimple_assign_rhs_class (stmt))
{
case GIMPLE_SINGLE_RHS:
expr = gimple_assign_rhs1 (stmt);
continue;
case GIMPLE_UNARY_RHS:
rhs_count = 1;
break;
case GIMPLE_BINARY_RHS:
rhs_count = 2;
break;
case GIMPLE_TERNARY_RHS:
rhs_count = 3;
break;
default:
goto fail;
}
/* Stop if expression is too complex. */
if (op_count++ == op_limit)
break;
if (param_ops_p)
{
eval_op.code = gimple_assign_rhs_code (stmt);
eval_op.type = TREE_TYPE (gimple_assign_lhs (stmt));
eval_op.val[0] = NULL_TREE;
eval_op.val[1] = NULL_TREE;
}
expr = NULL_TREE;
for (unsigned i = 0; i < rhs_count; i++)
{
tree op = gimple_op (stmt, i + 1);
gcc_assert (op && !TYPE_P (op));
if (is_gimple_ip_invariant (op))
{
if (++cst_count == rhs_count)
goto fail;
eval_op.val[cst_count - 1] = op;
}
else if (!expr)
{
/* Found a non-constant operand, and record its index in rhs
operands. */
eval_op.index = i;
expr = op;
}
else
{
/* Found more than one non-constant operands. */
goto fail;
}
}
if (param_ops_p)
vec_safe_insert (*param_ops_p, 0, eval_op);
}
/* Failed to decompose, free resource and return. */
fail:
if (param_ops_p)
vec_free (*param_ops_p);
return false;
}
/* If BB ends by a conditional we can turn into predicates, attach corresponding
predicates to the CFG edges. */
static void
set_cond_stmt_execution_predicate (struct ipa_func_body_info *fbi,
class ipa_fn_summary *summary,
class ipa_node_params *params_summary,
basic_block bb)
{
gimple *last;
tree op, op2;
int index;
struct agg_position_info aggpos;
enum tree_code code, inverted_code;
edge e;
edge_iterator ei;
gimple *set_stmt;
tree param_type;
expr_eval_ops param_ops;
last = last_stmt (bb);
if (!last || gimple_code (last) != GIMPLE_COND)
return;
if (!is_gimple_ip_invariant (gimple_cond_rhs (last)))
return;
op = gimple_cond_lhs (last);
if (decompose_param_expr (fbi, last, op, &index, ¶m_type, &aggpos,
¶m_ops))
{
code = gimple_cond_code (last);
inverted_code = invert_tree_comparison (code, HONOR_NANS (op));
FOR_EACH_EDGE (e, ei, bb->succs)
{
enum tree_code this_code = (e->flags & EDGE_TRUE_VALUE
? code : inverted_code);
/* invert_tree_comparison will return ERROR_MARK on FP
comparisons that are not EQ/NE instead of returning proper
unordered one. Be sure it is not confused with NON_CONSTANT.
And if the edge's target is the final block of diamond CFG graph
of this conditional statement, we do not need to compute
predicate for the edge because the final block's predicate must
be at least as that of the first block of the statement. */
if (this_code != ERROR_MARK
&& !dominated_by_p (CDI_POST_DOMINATORS, bb, e->dest))
{
predicate p
= add_condition (summary, params_summary, index,
param_type, &aggpos,
this_code, gimple_cond_rhs (last), param_ops);
e->aux = edge_predicate_pool.allocate ();
*(predicate *) e->aux = p;
}
}
vec_free (param_ops);
}
if (TREE_CODE (op) != SSA_NAME)
return;
/* Special case
if (builtin_constant_p (op))
constant_code
else
nonconstant_code.
Here we can predicate nonconstant_code. We can't
really handle constant_code since we have no predicate
for this and also the constant code is not known to be
optimized away when inliner doesn't see operand is constant.
Other optimizers might think otherwise. */
if (gimple_cond_code (last) != NE_EXPR
|| !integer_zerop (gimple_cond_rhs (last)))
return;
set_stmt = SSA_NAME_DEF_STMT (op);
if (!gimple_call_builtin_p (set_stmt, BUILT_IN_CONSTANT_P)
|| gimple_call_num_args (set_stmt) != 1)
return;
op2 = gimple_call_arg (set_stmt, 0);
if (!decompose_param_expr (fbi, set_stmt, op2, &index, ¶m_type, &aggpos))
return;
FOR_EACH_EDGE (e, ei, bb->succs) if (e->flags & EDGE_FALSE_VALUE)
{
predicate p = add_condition (summary, params_summary, index,
param_type, &aggpos,
predicate::is_not_constant, NULL_TREE);
e->aux = edge_predicate_pool.allocate ();
*(predicate *) e->aux = p;
}
}
/* If BB ends by a switch we can turn into predicates, attach corresponding
predicates to the CFG edges. */
static void
set_switch_stmt_execution_predicate (struct ipa_func_body_info *fbi,
class ipa_fn_summary *summary,
class ipa_node_params *params_summary,
basic_block bb)
{
gimple *lastg;
tree op;
int index;
struct agg_position_info aggpos;
edge e;
edge_iterator ei;
size_t n;
size_t case_idx;
tree param_type;
expr_eval_ops param_ops;
lastg = last_stmt (bb);
if (!lastg || gimple_code (lastg) != GIMPLE_SWITCH)
return;
gswitch *last = as_a <gswitch *> (lastg);
op = gimple_switch_index (last);
if (!decompose_param_expr (fbi, last, op, &index, ¶m_type, &aggpos,
¶m_ops))
return;
auto_vec<std::pair<tree, tree> > ranges;
tree type = TREE_TYPE (op);
int bound_limit = opt_for_fn (fbi->node->decl,
param_ipa_max_switch_predicate_bounds);
int bound_count = 0;
wide_int vr_wmin, vr_wmax;
value_range_kind vr_type = get_range_info (op, &vr_wmin, &vr_wmax);
FOR_EACH_EDGE (e, ei, bb->succs)
{
e->aux = edge_predicate_pool.allocate ();
*(predicate *) e->aux = false;
}
e = gimple_switch_edge (cfun, last, 0);
/* Set BOUND_COUNT to maximum count to bypass computing predicate for
default case if its target basic block is in convergence point of all
switch cases, which can be determined by checking whether it
post-dominates the switch statement. */
if (dominated_by_p (CDI_POST_DOMINATORS, bb, e->dest))
bound_count = INT_MAX;
n = gimple_switch_num_labels (last);
for (case_idx = 1; case_idx < n; ++case_idx)
{
tree cl = gimple_switch_label (last, case_idx);
tree min = CASE_LOW (cl);
tree max = CASE_HIGH (cl);
predicate p;
e = gimple_switch_edge (cfun, last, case_idx);
/* The case value might not have same type as switch expression,
extend the value based on the expression type. */
if (TREE_TYPE (min) != type)
min = wide_int_to_tree (type, wi::to_wide (min));
if (!max)
max = min;
else if (TREE_TYPE (max) != type)
max = wide_int_to_tree (type, wi::to_wide (max));
/* The case's target basic block is in convergence point of all switch
cases, its predicate should be at least as that of the switch
statement. */
if (dominated_by_p (CDI_POST_DOMINATORS, bb, e->dest))
p = true;
else if (min == max)
p = add_condition (summary, params_summary, index, param_type,
&aggpos, EQ_EXPR, min, param_ops);
else
{
predicate p1, p2;
p1 = add_condition (summary, params_summary, index, param_type,
&aggpos, GE_EXPR, min, param_ops);
p2 = add_condition (summary, params_summary,index, param_type,
&aggpos, LE_EXPR, max, param_ops);
p = p1 & p2;
}
*(class predicate *) e->aux
= p.or_with (summary->conds, *(class predicate *) e->aux);
/* If there are too many disjoint case ranges, predicate for default
case might become too complicated. So add a limit here. */
if (bound_count > bound_limit)
continue;
bool new_range = true;
if (!ranges.is_empty ())
{
wide_int curr_wmin = wi::to_wide (min);
wide_int last_wmax = wi::to_wide (ranges.last ().second);
/* Merge case ranges if they are continuous. */
if (curr_wmin == last_wmax + 1)
new_range = false;
else if (vr_type == VR_ANTI_RANGE)
{
/* If two disjoint case ranges can be connected by anti-range
of switch index, combine them to one range. */
if (wi::lt_p (vr_wmax, curr_wmin - 1, TYPE_SIGN (type)))
vr_type = VR_UNDEFINED;
else if (wi::le_p (vr_wmin, last_wmax + 1, TYPE_SIGN (type)))
new_range = false;
}
}
/* Create/extend a case range. And we count endpoints of range set,
this number nearly equals to number of conditions that we will create
for predicate of default case. */
if (new_range)
{
bound_count += (min == max) ? 1 : 2;
ranges.safe_push (std::make_pair (min, max));
}
else
{
bound_count += (ranges.last ().first == ranges.last ().second);
ranges.last ().second = max;
}
}
e = gimple_switch_edge (cfun, last, 0);
if (bound_count > bound_limit)
{
*(class predicate *) e->aux = true;
vec_free (param_ops);
return;
}
predicate p_seg = true;
predicate p_all = false;
if (vr_type != VR_RANGE)
{
vr_wmin = wi::to_wide (TYPE_MIN_VALUE (type));
vr_wmax = wi::to_wide (TYPE_MAX_VALUE (type));
}
/* Construct predicate to represent default range set that is negation of
all case ranges. Case range is classified as containing single/non-single
values. Suppose a piece of case ranges in the following.
[D1...D2] [S1] ... [Sn] [D3...D4]
To represent default case's range sets between two non-single value
case ranges (From D2 to D3), we construct predicate as:
D2 < x < D3 && x != S1 && ... && x != Sn
*/
for (size_t i = 0; i < ranges.length (); i++)
{
tree min = ranges[i].first;
tree max = ranges[i].second;
if (min == max)
p_seg &= add_condition (summary, params_summary, index,
param_type, &aggpos, NE_EXPR,
min, param_ops);
else
{
/* Do not create sub-predicate for range that is beyond low bound
of switch index. */
if (wi::lt_p (vr_wmin, wi::to_wide (min), TYPE_SIGN (type)))
{
p_seg &= add_condition (summary, params_summary, index,
param_type, &aggpos,
LT_EXPR, min, param_ops);
p_all = p_all.or_with (summary->conds, p_seg);
}
/* Do not create sub-predicate for range that is beyond up bound
of switch index. */
if (wi::le_p (vr_wmax, wi::to_wide (max), TYPE_SIGN (type)))
{
p_seg = false;
break;
}
p_seg = add_condition (summary, params_summary, index,
param_type, &aggpos, GT_EXPR,
max, param_ops);
}
}
p_all = p_all.or_with (summary->conds, p_seg);
*(class predicate *) e->aux
= p_all.or_with (summary->conds, *(class predicate *) e->aux);
vec_free (param_ops);
}
/* For each BB in NODE attach to its AUX pointer predicate under
which it is executable. */
static void
compute_bb_predicates (struct ipa_func_body_info *fbi,
struct cgraph_node *node,
class ipa_fn_summary *summary,
class ipa_node_params *params_summary)
{
struct function *my_function = DECL_STRUCT_FUNCTION (node->decl);
bool done = false;
basic_block bb;
FOR_EACH_BB_FN (bb, my_function)
{
set_cond_stmt_execution_predicate (fbi, summary, params_summary, bb);
set_switch_stmt_execution_predicate (fbi, summary, params_summary, bb);
}
/* Entry block is always executable. */
ENTRY_BLOCK_PTR_FOR_FN (my_function)->aux
= edge_predicate_pool.allocate ();
*(predicate *) ENTRY_BLOCK_PTR_FOR_FN (my_function)->aux = true;
/* A simple dataflow propagation of predicates forward in the CFG.
TODO: work in reverse postorder. */
while (!done)
{
done = true;
FOR_EACH_BB_FN (bb, my_function)
{
predicate p = false;
edge e;
edge_iterator ei;
FOR_EACH_EDGE (e, ei, bb->preds)
{
if (e->src->aux)
{
predicate this_bb_predicate
= *(predicate *) e->src->aux;
if (e->aux)
this_bb_predicate &= (*(class predicate *) e->aux);
p = p.or_with (summary->conds, this_bb_predicate);
if (p == true)
break;
}
}
if (p != false)
{
basic_block pdom_bb;
if (!bb->aux)
{
done = false;
bb->aux = edge_predicate_pool.allocate ();
*((predicate *) bb->aux) = p;
}
else if (p != *(predicate *) bb->aux)
{
/* This OR operation is needed to ensure monotonous data flow
in the case we hit the limit on number of clauses and the
and/or operations above give approximate answers. */
p = p.or_with (summary->conds, *(predicate *)bb->aux);
if (p != *(predicate *) bb->aux)
{
done = false;
*((predicate *) bb->aux) = p;
}
}
/* For switch/if statement, we can OR-combine predicates of all
its cases/branches to get predicate for basic block in their
convergence point, but sometimes this will generate very
complicated predicate. Actually, we can get simplified
predicate in another way by using the fact that predicate
for a basic block must also hold true for its post dominators.
To be specific, basic block in convergence point of
conditional statement should include predicate of the
statement. */
pdom_bb = get_immediate_dominator (CDI_POST_DOMINATORS, bb);
if (pdom_bb == EXIT_BLOCK_PTR_FOR_FN (my_function) || !pdom_bb)
;
else if (!pdom_bb->aux)
{
done = false;
pdom_bb->aux = edge_predicate_pool.allocate ();
*((predicate *) pdom_bb->aux) = p;
}
else if (p != *(predicate *) pdom_bb->aux)
{
p = p.or_with (summary->conds, *(predicate *)pdom_bb->aux);
if (p != *(predicate *) pdom_bb->aux)
{
done = false;
*((predicate *) pdom_bb->aux) = p;
}
}
}
}
}
}
/* Return predicate specifying when the STMT might have result that is not
a compile time constant. */
static predicate
will_be_nonconstant_expr_predicate (ipa_func_body_info *fbi,
class ipa_fn_summary *summary,
class ipa_node_params *params_summary,
tree expr,
vec<predicate> nonconstant_names)
{
tree parm;
int index;
while (UNARY_CLASS_P (expr))
expr = TREE_OPERAND (expr, 0);
parm = unmodified_parm (fbi, NULL, expr, NULL);
if (parm && (index = ipa_get_param_decl_index (fbi->info, parm)) >= 0)
return add_condition (summary, params_summary, index, TREE_TYPE (parm), NULL,
predicate::changed, NULL_TREE);
if (is_gimple_min_invariant (expr))
return false;
if (TREE_CODE (expr) == SSA_NAME)
return nonconstant_names[SSA_NAME_VERSION (expr)];
if (BINARY_CLASS_P (expr) || COMPARISON_CLASS_P (expr))
{
predicate p1
= will_be_nonconstant_expr_predicate (fbi, summary,
params_summary,
TREE_OPERAND (expr, 0),
nonconstant_names);
if (p1 == true)
return p1;
predicate p2
= will_be_nonconstant_expr_predicate (fbi, summary,
params_summary,
TREE_OPERAND (expr, 1),
nonconstant_names);
return p1.or_with (summary->conds, p2);
}
else if (TREE_CODE (expr) == COND_EXPR)
{
predicate p1
= will_be_nonconstant_expr_predicate (fbi, summary,
params_summary,
TREE_OPERAND (expr, 0),
nonconstant_names);
if (p1 == true)
return p1;
predicate p2
= will_be_nonconstant_expr_predicate (fbi, summary,
params_summary,
TREE_OPERAND (expr, 1),
nonconstant_names);
if (p2 == true)
return p2;
p1 = p1.or_with (summary->conds, p2);
p2 = will_be_nonconstant_expr_predicate (fbi, summary,
params_summary,
TREE_OPERAND (expr, 2),
nonconstant_names);
return p2.or_with (summary->conds, p1);
}
else if (TREE_CODE (expr) == CALL_EXPR)
return true;
else
{
debug_tree (expr);
gcc_unreachable ();
}
return false;
}
/* Return predicate specifying when the STMT might have result that is not
a compile time constant. */
static predicate
will_be_nonconstant_predicate (struct ipa_func_body_info *fbi,
class ipa_fn_summary *summary,
class ipa_node_params *params_summary,
gimple *stmt,
vec<predicate> nonconstant_names)
{
predicate p = true;
ssa_op_iter iter;
tree use;
tree param_type = NULL_TREE;
predicate op_non_const;
bool is_load;
int base_index;
struct agg_position_info aggpos;
/* What statements might be optimized away
when their arguments are constant. */
if (gimple_code (stmt) != GIMPLE_ASSIGN
&& gimple_code (stmt) != GIMPLE_COND
&& gimple_code (stmt) != GIMPLE_SWITCH
&& (gimple_code (stmt) != GIMPLE_CALL
|| !(gimple_call_flags (stmt) & ECF_CONST)))
return p;
/* Stores will stay anyway. */
if (gimple_store_p (stmt))
return p;
is_load = gimple_assign_load_p (stmt);
/* Loads can be optimized when the value is known. */
if (is_load)
{
tree op = gimple_assign_rhs1 (stmt);
if (!decompose_param_expr (fbi, stmt, op, &base_index, ¶m_type,
&aggpos))
return p;
}
else
base_index = -1;
/* See if we understand all operands before we start
adding conditionals. */
FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_USE)
{
tree parm = unmodified_parm (fbi, stmt, use, NULL);
/* For arguments we can build a condition. */
if (parm && ipa_get_param_decl_index (fbi->info, parm) >= 0)
continue;
if (TREE_CODE (use) != SSA_NAME)
return p;
/* If we know when operand is constant,
we still can say something useful. */
if (nonconstant_names[SSA_NAME_VERSION (use)] != true)
continue;
return p;
}
if (is_load)
op_non_const =
add_condition (summary, params_summary,
base_index, param_type, &aggpos,
predicate::changed, NULL_TREE);
else
op_non_const = false;
FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_USE)
{
tree parm = unmodified_parm (fbi, stmt, use, NULL);
int index;
if (parm && (index = ipa_get_param_decl_index (fbi->info, parm)) >= 0)
{
if (index != base_index)
p = add_condition (summary, params_summary, index,
TREE_TYPE (parm), NULL,
predicate::changed, NULL_TREE);
else
continue;
}
else
p = nonconstant_names[SSA_NAME_VERSION (use)];
op_non_const = p.or_with (summary->conds, op_non_const);
}
if ((gimple_code (stmt) == GIMPLE_ASSIGN || gimple_code (stmt) == GIMPLE_CALL)
&& gimple_op (stmt, 0)
&& TREE_CODE (gimple_op (stmt, 0)) == SSA_NAME)
nonconstant_names[SSA_NAME_VERSION (gimple_op (stmt, 0))]
= op_non_const;
return op_non_const;
}
struct record_modified_bb_info
{
tree op;
bitmap bb_set;
gimple *stmt;
};
/* Value is initialized in INIT_BB and used in USE_BB. We want to compute
probability how often it changes between USE_BB.
INIT_BB->count/USE_BB->count is an estimate, but if INIT_BB
is in different loop nest, we can do better.
This is all just estimate. In theory we look for minimal cut separating
INIT_BB and USE_BB, but we only want to anticipate loop invariant motion
anyway. */
static basic_block
get_minimal_bb (basic_block init_bb, basic_block use_bb)
{
class loop *l = find_common_loop (init_bb->loop_father, use_bb->loop_father);
if (l && l->header->count < init_bb->count)
return l->header;
return init_bb;
}
/* Callback of walk_aliased_vdefs. Records basic blocks where the value may be
set except for info->stmt. */
static bool
record_modified (ao_ref *ao ATTRIBUTE_UNUSED, tree vdef, void *data)
{
struct record_modified_bb_info *info =
(struct record_modified_bb_info *) data;
if (SSA_NAME_DEF_STMT (vdef) == info->stmt)
return false;
if (gimple_clobber_p (SSA_NAME_DEF_STMT (vdef)))
return false;
bitmap_set_bit (info->bb_set,
SSA_NAME_IS_DEFAULT_DEF (vdef)
? ENTRY_BLOCK_PTR_FOR_FN (cfun)->index
: get_minimal_bb
(gimple_bb (SSA_NAME_DEF_STMT (vdef)),
gimple_bb (info->stmt))->index);
if (dump_file)
{
fprintf (dump_file, " Param ");
print_generic_expr (dump_file, info->op, TDF_SLIM);
fprintf (dump_file, " changed at bb %i, minimal: %i stmt: ",
gimple_bb (SSA_NAME_DEF_STMT (vdef))->index,
get_minimal_bb
(gimple_bb (SSA_NAME_DEF_STMT (vdef)),
gimple_bb (info->stmt))->index);
print_gimple_stmt (dump_file, SSA_NAME_DEF_STMT (vdef), 0);
}
return false;
}
/* Return probability (based on REG_BR_PROB_BASE) that I-th parameter of STMT
will change since last invocation of STMT.
Value 0 is reserved for compile time invariants.
For common parameters it is REG_BR_PROB_BASE. For loop invariants it
ought to be REG_BR_PROB_BASE / estimated_iters. */
static int
param_change_prob (ipa_func_body_info *fbi, gimple *stmt, int i)
{
tree op = gimple_call_arg (stmt, i);
basic_block bb = gimple_bb (stmt);
if (TREE_CODE (op) == WITH_SIZE_EXPR)
op = TREE_OPERAND (op, 0);
tree base = get_base_address (op);
/* Global invariants never change. */
if (is_gimple_min_invariant (base))
return 0;
/* We would have to do non-trivial analysis to really work out what
is the probability of value to change (i.e. when init statement
is in a sibling loop of the call).
We do an conservative estimate: when call is executed N times more often
than the statement defining value, we take the frequency 1/N. */
if (TREE_CODE (base) == SSA_NAME)
{
profile_count init_count;
if (!bb->count.nonzero_p ())
return REG_BR_PROB_BASE;
if (SSA_NAME_IS_DEFAULT_DEF (base))
init_count = ENTRY_BLOCK_PTR_FOR_FN (cfun)->count;
else
init_count = get_minimal_bb
(gimple_bb (SSA_NAME_DEF_STMT (base)),
gimple_bb (stmt))->count;
if (init_count < bb->count)
return MAX ((init_count.to_sreal_scale (bb->count)
* REG_BR_PROB_BASE).to_int (), 1);
return REG_BR_PROB_BASE;
}
else
{
ao_ref refd;
profile_count max = ENTRY_BLOCK_PTR_FOR_FN (cfun)->count;
struct record_modified_bb_info info;
tree init = ctor_for_folding (base);
if (init != error_mark_node)
return 0;
if (!bb->count.nonzero_p ())
return REG_BR_PROB_BASE;
if (dump_file)
{
fprintf (dump_file, " Analyzing param change probability of ");
print_generic_expr (dump_file, op, TDF_SLIM);
fprintf (dump_file, "\n");
}
ao_ref_init (&refd, op);
info.op = op;
info.stmt = stmt;
info.bb_set = BITMAP_ALLOC (NULL);
int walked
= walk_aliased_vdefs (&refd, gimple_vuse (stmt), record_modified, &info,
NULL, NULL, fbi->aa_walk_budget);
if (walked < 0 || bitmap_bit_p (info.bb_set, bb->index))
{
if (dump_file)
{
if (walked < 0)
fprintf (dump_file, " Ran out of AA walking budget.\n");
else
fprintf (dump_file, " Set in same BB as used.\n");
}
BITMAP_FREE (info.bb_set);
return REG_BR_PROB_BASE;
}
bitmap_iterator bi;
unsigned index;
/* Lookup the most frequent update of the value and believe that
it dominates all the other; precise analysis here is difficult. */
EXECUTE_IF_SET_IN_BITMAP (info.bb_set, 0, index, bi)
max = max.max (BASIC_BLOCK_FOR_FN (cfun, index)->count);
if (dump_file)
{
fprintf (dump_file, " Set with count ");
max.dump (dump_file);
fprintf (dump_file, " and used with count ");
bb->count.dump (dump_file);
fprintf (dump_file, " freq %f\n",
max.to_sreal_scale (bb->count).to_double ());
}
BITMAP_FREE (info.bb_set);
if (max < bb->count)
return MAX ((max.to_sreal_scale (bb->count)
* REG_BR_PROB_BASE).to_int (), 1);
return REG_BR_PROB_BASE;
}
}
/* Find whether a basic block BB is the final block of a (half) diamond CFG
sub-graph and if the predicate the condition depends on is known. If so,
return true and store the pointer the predicate in *P. */
static bool
phi_result_unknown_predicate (ipa_func_body_info *fbi,
ipa_fn_summary *summary,
class ipa_node_params *params_summary,
basic_block bb,
predicate *p,
vec<predicate> nonconstant_names)
{
edge e;
edge_iterator ei;
basic_block first_bb = NULL;
gimple *stmt;
if (single_pred_p (bb))
{
*p = false;
return true;
}
FOR_EACH_EDGE (e, ei, bb->preds)
{
if (single_succ_p (e->src))
{
if (!single_pred_p (e->src))
return false;
if (!first_bb)
first_bb = single_pred (e->src);
else if (single_pred (e->src) != first_bb)
return false;
}
else
{
if (!first_bb)
first_bb = e->src;
else if (e->src != first_bb)
return false;
}
}
if (!first_bb)
return false;
stmt = last_stmt (first_bb);
if (!stmt
|| gimple_code (stmt) != GIMPLE_COND
|| !is_gimple_ip_invariant (gimple_cond_rhs (stmt)))
return false;
*p = will_be_nonconstant_expr_predicate (fbi, summary, params_summary,
gimple_cond_lhs (stmt),
nonconstant_names);
if (*p == true)
return false;
else
return true;
}
/* Given a PHI statement in a function described by inline properties SUMMARY
and *P being the predicate describing whether the selected PHI argument is
known, store a predicate for the result of the PHI statement into
NONCONSTANT_NAMES, if possible. */
static void
predicate_for_phi_result (class ipa_fn_summary *summary, gphi *phi,
predicate *p,
vec<predicate> nonconstant_names)
{
unsigned i;
for (i = 0; i < gimple_phi_num_args (phi); i++)
{
tree arg = gimple_phi_arg (phi, i)->def;
if (!is_gimple_min_invariant (arg))
{
gcc_assert (TREE_CODE (arg) == SSA_NAME);
*p = p->or_with (summary->conds,
nonconstant_names[SSA_NAME_VERSION (arg)]);
if (*p == true)
return;
}
}
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file, "\t\tphi predicate: ");
p->dump (dump_file, summary->conds);
}
nonconstant_names[SSA_NAME_VERSION (gimple_phi_result (phi))] = *p;
}
/* For a typical usage of __builtin_expect (a<b, 1), we
may introduce an extra relation stmt:
With the builtin, we have
t1 = a <= b;
t2 = (long int) t1;
t3 = __builtin_expect (t2, 1);
if (t3 != 0)
goto ...
Without the builtin, we have
if (a<=b)
goto...
This affects the size/time estimation and may have
an impact on the earlier inlining.
Here find this pattern and fix it up later. */
static gimple *
find_foldable_builtin_expect (basic_block bb)
{
gimple_stmt_iterator bsi;
for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
{
gimple *stmt = gsi_stmt (bsi);
if (gimple_call_builtin_p (stmt, BUILT_IN_EXPECT)
|| gimple_call_builtin_p (stmt, BUILT_IN_EXPECT_WITH_PROBABILITY)
|| gimple_call_internal_p (stmt, IFN_BUILTIN_EXPECT))
{
tree var = gimple_call_lhs (stmt);
tree arg = gimple_call_arg (stmt, 0);
use_operand_p use_p;
gimple *use_stmt;
bool match = false;
bool done = false;
if (!var || !arg)
continue;
gcc_assert (TREE_CODE (var) == SSA_NAME);
while (TREE_CODE (arg) == SSA_NAME)
{
gimple *stmt_tmp = SSA_NAME_DEF_STMT (arg);
if (!is_gimple_assign (stmt_tmp))
break;
switch (gimple_assign_rhs_code (stmt_tmp))
{
case LT_EXPR:
case LE_EXPR:
case GT_EXPR:
case GE_EXPR:
case EQ_EXPR:
case NE_EXPR:
match = true;
done = true;
break;
CASE_CONVERT:
break;
default:
done = true;
break;
}
if (done)
break;
arg = gimple_assign_rhs1 (stmt_tmp);
}
if (match && single_imm_use (var, &use_p, &use_stmt)
&& gimple_code (use_stmt) == GIMPLE_COND)
return use_stmt;
}
}
return NULL;
}
/* Return true when the basic blocks contains only clobbers followed by RESX.
Such BBs are kept around to make removal of dead stores possible with
presence of EH and will be optimized out by optimize_clobbers later in the
game.
NEED_EH is used to recurse in case the clobber has non-EH predecessors
that can be clobber only, too.. When it is false, the RESX is not necessary
on the end of basic block. */
static bool
clobber_only_eh_bb_p (basic_block bb, bool need_eh = true)
{
gimple_stmt_iterator gsi = gsi_last_bb (bb);
edge_iterator ei;
edge e;
if (need_eh)
{
if (gsi_end_p (gsi))
return false;
if (gimple_code (gsi_stmt (gsi)) != GIMPLE_RESX)
return false;
gsi_prev (&gsi);
}
else if (!single_succ_p (bb))
return false;
for (; !gsi_end_p (gsi); gsi_prev (&gsi))
{
gimple *stmt = gsi_stmt (gsi);
if (is_gimple_debug (stmt))
continue;
if (gimple_clobber_p (stmt))
continue;
if (gimple_code (stmt) == GIMPLE_LABEL)
break;
return false;
}
/* See if all predecessors are either throws or clobber only BBs. */
FOR_EACH_EDGE (e, ei, bb->preds)
if (!(e->flags & EDGE_EH)
&& !clobber_only_eh_bb_p (e->src, false))
return false;
return true;
}
/* Return true if STMT compute a floating point expression that may be affected
by -ffast-math and similar flags. */
static bool
fp_expression_p (gimple *stmt)
{
ssa_op_iter i;
tree op;
FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_DEF|SSA_OP_USE)
if (FLOAT_TYPE_P (TREE_TYPE (op)))
return true;
return false;
}
/* Return true if T references memory location that is local
for the function (that means, dead after return) or read-only. */
bool
refs_local_or_readonly_memory_p (tree t)
{
/* Non-escaping memory is fine. */
t = get_base_address (t);
if ((TREE_CODE (t) == MEM_REF
|| TREE_CODE (t) == TARGET_MEM_REF))
return points_to_local_or_readonly_memory_p (TREE_OPERAND (t, 0));
/* Automatic variables are fine. */
if (DECL_P (t)
&& auto_var_in_fn_p (t, current_function_decl))
return true;
/* Read-only variables are fine. */
if (DECL_P (t) && TREE_READONLY (t))
return true;
return false;
}
/* Return true if T is a pointer pointing to memory location that is local
for the function (that means, dead after return) or read-only. */
bool
points_to_local_or_readonly_memory_p (tree t)
{
/* See if memory location is clearly invalid. */
if (integer_zerop (t))
return flag_delete_null_pointer_checks;
if (TREE_CODE (t) == SSA_NAME)
return !ptr_deref_may_alias_global_p (t);
if (TREE_CODE (t) == ADDR_EXPR)
return refs_local_or_readonly_memory_p (TREE_OPERAND (t, 0));
return false;
}
/* Analyze function body for NODE.
EARLY indicates run from early optimization pipeline. */
static void
analyze_function_body (struct cgraph_node *node, bool early)
{
sreal time = opt_for_fn (node->decl, param_uninlined_function_time);
/* Estimate static overhead for function prologue/epilogue and alignment. */
int size = opt_for_fn (node->decl, param_uninlined_function_insns);
/* Benefits are scaled by probability of elimination that is in range
<0,2>. */
basic_block bb;
struct function *my_function = DECL_STRUCT_FUNCTION (node->decl);
sreal freq;
class ipa_fn_summary *info = ipa_fn_summaries->get_create (node);
class ipa_node_params *params_summary = early ? NULL : IPA_NODE_REF (node);
predicate bb_predicate;
struct ipa_func_body_info fbi;
vec<predicate> nonconstant_names = vNULL;
int nblocks, n;
int *order;
gimple *fix_builtin_expect_stmt;
gcc_assert (my_function && my_function->cfg);
gcc_assert (cfun == my_function);
memset(&fbi, 0, sizeof(fbi));
vec_free (info->conds);
info->conds = NULL;
vec_free (info->size_time_table);
info->size_time_table = NULL;
/* When optimizing and analyzing for IPA inliner, initialize loop optimizer
so we can produce proper inline hints.
When optimizing and analyzing for early inliner, initialize node params
so we can produce correct BB predicates. */
if (opt_for_fn (node->decl, optimize))
{
calculate_dominance_info (CDI_DOMINATORS);
calculate_dominance_info (CDI_POST_DOMINATORS);
if (!early)
loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
else
{
ipa_check_create_node_params ();
ipa_initialize_node_params (node);
}
if (ipa_node_params_sum)
{
fbi.node = node;
fbi.info = IPA_NODE_REF (node);
fbi.bb_infos = vNULL;
fbi.bb_infos.safe_grow_cleared (last_basic_block_for_fn (cfun), true);
fbi.param_count = count_formal_params (node->decl);
fbi.aa_walk_budget = opt_for_fn (node->decl, param_ipa_max_aa_steps);
nonconstant_names.safe_grow_cleared
(SSANAMES (my_function)->length (), true);
}
}
if (dump_file)
fprintf (dump_file, "\nAnalyzing function body size: %s\n",
node->dump_name ());
/* When we run into maximal number of entries, we assign everything to the
constant truth case. Be sure to have it in list. */
bb_predicate = true;
info->account_size_time (0, 0, bb_predicate, bb_predicate);
bb_predicate = predicate::not_inlined ();
info->account_size_time (opt_for_fn (node->decl,
param_uninlined_function_insns)
* ipa_fn_summary::size_scale,
opt_for_fn (node->decl,
param_uninlined_function_time),
bb_predicate,
bb_predicate);
if (fbi.info)
compute_bb_predicates (&fbi, node, info, params_summary);
const profile_count entry_count = ENTRY_BLOCK_PTR_FOR_FN (cfun)->count;
order = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
nblocks = pre_and_rev_post_order_compute (NULL, order, false);
for (n = 0; n < nblocks; n++)
{
bb = BASIC_BLOCK_FOR_FN (cfun, order[n]);
freq = bb->count.to_sreal_scale (entry_count);
if (clobber_only_eh_bb_p (bb))
{
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "\n Ignoring BB %i;"
" it will be optimized away by cleanup_clobbers\n",
bb->index);
continue;
}
/* TODO: Obviously predicates can be propagated down across CFG. */
if (fbi.info)
{
if (bb->aux)
bb_predicate = *(predicate *) bb->aux;
else
bb_predicate = false;
}
else
bb_predicate = true;
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file, "\n BB %i predicate:", bb->index);
bb_predicate.dump (dump_file, info->conds);
}
if (fbi.info && nonconstant_names.exists ())
{
predicate phi_predicate;
bool first_phi = true;
for (gphi_iterator bsi = gsi_start_phis (bb); !gsi_end_p (bsi);
gsi_next (&bsi))
{
if (first_phi
&& !phi_result_unknown_predicate (&fbi, info,
params_summary,
bb,
&phi_predicate,
nonconstant_names))
break;
first_phi = false;
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file, " ");
print_gimple_stmt (dump_file, gsi_stmt (bsi), 0);
}
predicate_for_phi_result (info, bsi.phi (), &phi_predicate,
nonconstant_names);
}
}
fix_builtin_expect_stmt = find_foldable_builtin_expect (bb);
for (gimple_stmt_iterator bsi = gsi_start_nondebug_bb (bb);
!gsi_end_p (bsi); gsi_next_nondebug (&bsi))
{
gimple *stmt = gsi_stmt (bsi);
int this_size = estimate_num_insns (stmt, &eni_size_weights);
int this_time = estimate_num_insns (stmt, &eni_time_weights);
int prob;
predicate will_be_nonconstant;
/* This relation stmt should be folded after we remove
__builtin_expect call. Adjust the cost here. */
if (stmt == fix_builtin_expect_stmt)
{
this_size--;
this_time--;
}
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file, " ");
print_gimple_stmt (dump_file, stmt, 0);
fprintf (dump_file, "\t\tfreq:%3.2f size:%3i time:%3i\n",
freq.to_double (), this_size,
this_time);
}
if (is_gimple_call (stmt)
&& !gimple_call_internal_p (stmt))
{
struct cgraph_edge *edge = node->get_edge (stmt);
ipa_call_summary *es = ipa_call_summaries->get_create (edge);
/* Special case: results of BUILT_IN_CONSTANT_P will be always
resolved as constant. We however don't want to optimize
out the cgraph edges. */
if (nonconstant_names.exists ()
&& gimple_call_builtin_p (stmt, BUILT_IN_CONSTANT_P)
&& gimple_call_lhs (stmt)
&& TREE_CODE (gimple_call_lhs (stmt)) == SSA_NAME)
{
predicate false_p = false;
nonconstant_names[SSA_NAME_VERSION (gimple_call_lhs (stmt))]
= false_p;
}
if (ipa_node_params_sum)
{
int count = gimple_call_num_args (stmt);
int i;
if (count)
es->param.safe_grow_cleared (count, true);
for (i = 0; i < count; i++)
{
int prob = param_change_prob (&fbi, stmt, i);
gcc_assert (prob >= 0 && prob <= REG_BR_PROB_BASE);
es->param[i].change_prob = prob;
es->param[i].points_to_local_or_readonly_memory
= points_to_local_or_readonly_memory_p
(gimple_call_arg (stmt, i));
}
}
es->call_stmt_size = this_size;
es->call_stmt_time = this_time;
es->loop_depth = bb_loop_depth (bb);
edge_set_predicate (edge, &bb_predicate);
if (edge->speculative)
{
cgraph_edge *indirect
= edge->speculative_call_indirect_edge ();
ipa_call_summary *es2
= ipa_call_summaries->get_create (indirect);
ipa_call_summaries->duplicate (edge, indirect,
es, es2);
/* Edge is the first direct call.
create and duplicate call summaries for multiple
speculative call targets. */
for (cgraph_edge *direct
= edge->next_speculative_call_target ();
direct;
direct = direct->next_speculative_call_target ())
{
ipa_call_summary *es3
= ipa_call_summaries->get_create (direct);
ipa_call_summaries->duplicate (edge, direct,
es, es3);
}
}
}
/* TODO: When conditional jump or switch is known to be constant, but
we did not translate it into the predicates, we really can account
just maximum of the possible paths. */
if (fbi.info)
will_be_nonconstant
= will_be_nonconstant_predicate (&fbi, info, params_summary,
stmt, nonconstant_names);
else
will_be_nonconstant = true;
if (this_time || this_size)
{
sreal final_time = (sreal)this_time * freq;
prob = eliminated_by_inlining_prob (&fbi, stmt);
if (prob == 1 && dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file,
"\t\t50%% will be eliminated by inlining\n");
if (prob == 2 && dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "\t\tWill be eliminated by inlining\n");
class predicate p = bb_predicate & will_be_nonconstant;
/* We can ignore statement when we proved it is never going
to happen, but we cannot do that for call statements
because edges are accounted specially. */
if (*(is_gimple_call (stmt) ? &bb_predicate : &p) != false)
{
time += final_time;
size += this_size;
}
/* We account everything but the calls. Calls have their own
size/time info attached to cgraph edges. This is necessary
in order to make the cost disappear after inlining. */
if (!is_gimple_call (stmt))
{
if (prob)
{
predicate ip = bb_predicate & predicate::not_inlined ();
info->account_size_time (this_size * prob,
(final_time * prob) / 2, ip,
p);
}
if (prob != 2)
info->account_size_time (this_size * (2 - prob),
(final_time * (2 - prob) / 2),
bb_predicate,
p);
}
if (!info->fp_expressions && fp_expression_p (stmt))
{
info->fp_expressions = true;
if (dump_file)
fprintf (dump_file, " fp_expression set\n");
}
}
/* Account cost of address calculations in the statements. */
for (unsigned int i = 0; i < gimple_num_ops (stmt); i++)
{
for (tree op = gimple_op (stmt, i);
op && handled_component_p (op);
op = TREE_OPERAND (op, 0))
if ((TREE_CODE (op) == ARRAY_REF
|| TREE_CODE (op) == ARRAY_RANGE_REF)
&& TREE_CODE (TREE_OPERAND (op, 1)) == SSA_NAME)
{
predicate p = bb_predicate;
if (fbi.info)
p = p & will_be_nonconstant_expr_predicate
(&fbi, info, params_summary,
TREE_OPERAND (op, 1),
nonconstant_names);
if (p != false)
{
time += freq;
size += 1;
if (dump_file)
fprintf (dump_file,
"\t\tAccounting address calculation.\n");
info->account_size_time (ipa_fn_summary::size_scale,
freq,
bb_predicate,
p);
}
}
}
}
}
free (order);
if (nonconstant_names.exists () && !early)
{
ipa_fn_summary *s = ipa_fn_summaries->get (node);
class loop *loop;
unsigned max_loop_predicates = opt_for_fn (node->decl,
param_ipa_max_loop_predicates);
if (dump_file && (dump_flags & TDF_DETAILS))
flow_loops_dump (dump_file, NULL, 0);
scev_initialize ();
FOR_EACH_LOOP (loop, 0)
{
predicate loop_iterations = true;
sreal header_freq;
edge ex;
unsigned int j;
class tree_niter_desc niter_desc;
if (!loop->header->aux)
continue;
profile_count phdr_count = loop_preheader_edge (loop)->count ();
sreal phdr_freq = phdr_count.to_sreal_scale (entry_count);
bb_predicate = *(predicate *) loop->header->aux;
auto_vec<edge> exits = get_loop_exit_edges (loop);
FOR_EACH_VEC_ELT (exits, j, ex)
if (number_of_iterations_exit (loop, ex, &niter_desc, false)
&& !is_gimple_min_invariant (niter_desc.niter))
{
predicate will_be_nonconstant
= will_be_nonconstant_expr_predicate (&fbi, info,
params_summary,
niter_desc.niter,
nonconstant_names);
if (will_be_nonconstant != true)
will_be_nonconstant = bb_predicate & will_be_nonconstant;
if (will_be_nonconstant != true
&& will_be_nonconstant != false)
loop_iterations &= will_be_nonconstant;
}
add_freqcounting_predicate (&s->loop_iterations, loop_iterations,
phdr_freq, max_loop_predicates);
}
/* To avoid quadratic behavior we analyze stride predicates only
with respect to the containing loop. Thus we simply iterate
over all defs in the outermost loop body. */
for (loop = loops_for_fn (cfun)->tree_root->inner;
loop != NULL; loop = loop->next)
{
predicate loop_stride = true;
basic_block *body = get_loop_body (loop);
profile_count phdr_count = loop_preheader_edge (loop)->count ();
sreal phdr_freq = phdr_count.to_sreal_scale (entry_count);
for (unsigned i = 0; i < loop->num_nodes; i++)
{
gimple_stmt_iterator gsi;
if (!body[i]->aux)
continue;
bb_predicate = *(predicate *) body[i]->aux;
for (gsi = gsi_start_bb (body[i]); !gsi_end_p (gsi);
gsi_next (&gsi))
{
gimple *stmt = gsi_stmt (gsi);
if (!is_gimple_assign (stmt))
continue;
tree def = gimple_assign_lhs (stmt);
if (TREE_CODE (def) != SSA_NAME)
continue;
affine_iv iv;
if (!simple_iv (loop_containing_stmt (stmt),
loop_containing_stmt (stmt),
def, &iv, true)
|| is_gimple_min_invariant (iv.step))
continue;
predicate will_be_nonconstant
= will_be_nonconstant_expr_predicate (&fbi, info,
params_summary,
iv.step,
nonconstant_names);
if (will_be_nonconstant != true)
will_be_nonconstant = bb_predicate & will_be_nonconstant;
if (will_be_nonconstant != true
&& will_be_nonconstant != false)
loop_stride = loop_stride & will_be_nonconstant;
}
}
add_freqcounting_predicate (&s->loop_strides, loop_stride,
phdr_freq, max_loop_predicates);
free (body);
}
scev_finalize ();
}
FOR_ALL_BB_FN (bb, my_function)
{
edge e;
edge_iterator ei;
if (bb->aux)
edge_predicate_pool.remove ((predicate *)bb->aux);
bb->aux = NULL;
FOR_EACH_EDGE (e, ei, bb->succs)
{
if (e->aux)
edge_predicate_pool.remove ((predicate *) e->aux);
e->aux = NULL;
}
}
ipa_fn_summary *s = ipa_fn_summaries->get (node);
ipa_size_summary *ss = ipa_size_summaries->get (node);
s->time = time;
ss->self_size = size;
nonconstant_names.release ();
ipa_release_body_info (&fbi);
if (opt_for_fn (node->decl, optimize))
{
if (!early)
loop_optimizer_finalize ();
else if (!ipa_edge_args_sum)
ipa_free_all_node_params ();
free_dominance_info (CDI_DOMINATORS);
free_dominance_info (CDI_POST_DOMINATORS);
}
if (dump_file)
{
fprintf (dump_file, "\n");
ipa_dump_fn_summary (dump_file, node);
}
}
/* Compute function summary.
EARLY is true when we compute parameters during early opts. */
void
compute_fn_summary (struct cgraph_node *node, bool early)
{
HOST_WIDE_INT self_stack_size;
struct cgraph_edge *e;
gcc_assert (!node->inlined_to);
if (!ipa_fn_summaries)
ipa_fn_summary_alloc ();
/* Create a new ipa_fn_summary. */
((ipa_fn_summary_t *)ipa_fn_summaries)->remove_callees (node);
ipa_fn_summaries->remove (node);
class ipa_fn_summary *info = ipa_fn_summaries->get_create (node);
class ipa_size_summary *size_info = ipa_size_summaries->get_create (node);
/* Estimate the stack size for the function if we're optimizing. */
self_stack_size = optimize && !node->thunk.thunk_p
? estimated_stack_frame_size (node) : 0;
size_info->estimated_self_stack_size = self_stack_size;
info->estimated_stack_size = self_stack_size;
if (node->thunk.thunk_p)
{
ipa_call_summary *es = ipa_call_summaries->get_create (node->callees);
predicate t = true;
node->can_change_signature = false;
es->call_stmt_size = eni_size_weights.call_cost;
es->call_stmt_time = eni_time_weights.call_cost;
info->account_size_time (ipa_fn_summary::size_scale
* opt_for_fn (node->decl,
param_uninlined_function_thunk_insns),
opt_for_fn (node->decl,
param_uninlined_function_thunk_time), t, t);
t = predicate::not_inlined ();
info->account_size_time (2 * ipa_fn_summary::size_scale, 0, t, t);
ipa_update_overall_fn_summary (node);
size_info->self_size = size_info->size;
if (stdarg_p (TREE_TYPE (node->decl)))
{
info->inlinable = false;
node->callees->inline_failed = CIF_VARIADIC_THUNK;
}
else
info->inlinable = true;
}
else
{
/* Even is_gimple_min_invariant rely on current_function_decl. */
push_cfun (DECL_STRUCT_FUNCTION (node->decl));
/* During IPA profile merging we may be called w/o virtual SSA form
built. */
update_ssa (TODO_update_ssa_only_virtuals);
/* Can this function be inlined at all? */
if (!opt_for_fn (node->decl, optimize)
&& !lookup_attribute ("always_inline",
DECL_ATTRIBUTES (node->decl)))
info->inlinable = false;
else
info->inlinable = tree_inlinable_function_p (node->decl);
/* Type attributes can use parameter indices to describe them. */
if (TYPE_ATTRIBUTES (TREE_TYPE (node->decl))
/* Likewise for #pragma omp declare simd functions or functions
with simd attribute. */
|| lookup_attribute ("omp declare simd",
DECL_ATTRIBUTES (node->decl)))
node->can_change_signature = false;
else
{
/* Otherwise, inlinable functions always can change signature. */
if (info->inlinable)
node->can_change_signature = true;
else
{
/* Functions calling builtin_apply cannot change signature. */
for (e = node->callees; e; e = e->next_callee)
{
tree cdecl = e->callee->decl;
if (fndecl_built_in_p (cdecl, BUILT_IN_APPLY_ARGS)
|| fndecl_built_in_p (cdecl, BUILT_IN_VA_START))
break;
}
node->can_change_signature = !e;
}
}
analyze_function_body (node, early);
pop_cfun ();
}
/* Inlining characteristics are maintained by the cgraph_mark_inline. */
size_info->size = size_info->self_size;
info->estimated_stack_size = size_info->estimated_self_stack_size;
/* Code above should compute exactly the same result as
ipa_update_overall_fn_summary but because computation happens in
different order the roundoff errors result in slight changes. */
ipa_update_overall_fn_summary (node);
/* In LTO mode we may have speculative edges set. */
gcc_assert (in_lto_p || size_info->size == size_info->self_size);
}
/* Compute parameters of functions used by inliner using
current_function_decl. */
static unsigned int
compute_fn_summary_for_current (void)
{
compute_fn_summary (cgraph_node::get (current_function_decl), true);
return 0;
}
/* Estimate benefit devirtualizing indirect edge IE and return true if it can
be devirtualized and inlined, provided m_known_vals, m_known_contexts and
m_known_aggs in AVALS. Return false straight away if AVALS is NULL. */
static bool
estimate_edge_devirt_benefit (struct cgraph_edge *ie,
int *size, int *time,
ipa_call_arg_values *avals)
{
tree target;
struct cgraph_node *callee;
class ipa_fn_summary *isummary;
enum availability avail;
bool speculative;
if (!avals
|| (!avals->m_known_vals.length() && !avals->m_known_contexts.length ()))
return false;
if (!opt_for_fn (ie->caller->decl, flag_indirect_inlining))
return false;
target = ipa_get_indirect_edge_target (ie, avals, &speculative);
if (!target || speculative)
return false;
/* Account for difference in cost between indirect and direct calls. */
*size -= (eni_size_weights.indirect_call_cost - eni_size_weights.call_cost);
*time -= (eni_time_weights.indirect_call_cost - eni_time_weights.call_cost);
gcc_checking_assert (*time >= 0);
gcc_checking_assert (*size >= 0);
callee = cgraph_node::get (target);
if (!callee || !callee->definition)
return false;
callee = callee->function_symbol (&avail);
if (avail < AVAIL_AVAILABLE)
return false;
isummary = ipa_fn_summaries->get (callee);
if (isummary == NULL)
return false;
return isummary->inlinable;
}
/* Increase SIZE, MIN_SIZE (if non-NULL) and TIME for size and time needed to
handle edge E with probability PROB. Set HINTS accordingly if edge may be
devirtualized. AVALS, if non-NULL, describes the context of the call site
as far as values of parameters are concerened. */
static inline void
estimate_edge_size_and_time (struct cgraph_edge *e, int *size, int *min_size,
sreal *time, ipa_call_arg_values *avals,
ipa_hints *hints)
{
class ipa_call_summary *es = ipa_call_summaries->get (e);
int call_size = es->call_stmt_size;
int call_time = es->call_stmt_time;
int cur_size;
if (!e->callee && hints && e->maybe_hot_p ()
&& estimate_edge_devirt_benefit (e, &call_size, &call_time, avals))
*hints |= INLINE_HINT_indirect_call;
cur_size = call_size * ipa_fn_summary::size_scale;
*size += cur_size;
if (min_size)
*min_size += cur_size;
if (time)
*time += ((sreal)call_time) * e->sreal_frequency ();
}
/* Increase SIZE, MIN_SIZE and TIME for size and time needed to handle all
calls in NODE. POSSIBLE_TRUTHS and AVALS describe the context of the call
site.
Helper for estimate_calls_size_and_time which does the same but
(in most cases) faster. */
static void
estimate_calls_size_and_time_1 (struct cgraph_node *node, int *size,
int *min_size, sreal *time,
ipa_hints *hints,
clause_t possible_truths,
ipa_call_arg_values *avals)
{
struct cgraph_edge *e;
for (e = node->callees; e; e = e->next_callee)
{
if (!e->inline_failed)
{
gcc_checking_assert (!ipa_call_summaries->get (e));
estimate_calls_size_and_time_1 (e->callee, size, min_size, time,
hints, possible_truths, avals);
continue;
}
class ipa_call_summary *es = ipa_call_summaries->get (e);
/* Do not care about zero sized builtins. */
if (!es->call_stmt_size)
{
gcc_checking_assert (!es->call_stmt_time);
continue;
}
if (!es->predicate
|| es->predicate->evaluate (possible_truths))
{
/* Predicates of calls shall not use NOT_CHANGED codes,
so we do not need to compute probabilities. */
estimate_edge_size_and_time (e, size,
es->predicate ? NULL : min_size,
time, avals, hints);
}
}
for (e = node->indirect_calls; e; e = e->next_callee)
{
class ipa_call_summary *es = ipa_call_summaries->get (e);
if (!es->predicate
|| es->predicate->evaluate (possible_truths))
estimate_edge_size_and_time (e, size,
es->predicate ? NULL : min_size,
time, avals, hints);
}
}
/* Populate sum->call_size_time_table for edges from NODE. */
static void
summarize_calls_size_and_time (struct cgraph_node *node,
ipa_fn_summary *sum)
{
struct cgraph_edge *e;
for (e = node->callees; e; e = e->next_callee)
{
if (!e->inline_failed)
{
gcc_checking_assert (!ipa_call_summaries->get (e));
summarize_calls_size_and_time (e->callee, sum);
continue;
}
int size = 0;
sreal time = 0;
estimate_edge_size_and_time (e, &size, NULL, &time, NULL, NULL);
struct predicate pred = true;
class ipa_call_summary *es = ipa_call_summaries->get (e);
if (es->predicate)
pred = *es->predicate;
sum->account_size_time (size, time, pred, pred, true);
}
for (e = node->indirect_calls; e; e = e->next_callee)
{
int size = 0;
sreal time = 0;
estimate_edge_size_and_time (e, &size, NULL, &time, NULL, NULL);
struct predicate pred = true;
class ipa_call_summary *es = ipa_call_summaries->get (e);
if (es->predicate)
pred = *es->predicate;
sum->account_size_time (size, time, pred, pred, true);
}
}
/* Increase SIZE, MIN_SIZE and TIME for size and time needed to handle all
calls in NODE. POSSIBLE_TRUTHS and AVALS (the latter if non-NULL) describe
context of the call site. */
static void
estimate_calls_size_and_time (struct cgraph_node *node, int *size,
int *min_size, sreal *time,
ipa_hints *hints,
clause_t possible_truths,
ipa_call_arg_values *avals)
{
class ipa_fn_summary *sum = ipa_fn_summaries->get (node);
bool use_table = true;
gcc_assert (node->callees || node->indirect_calls);
/* During early inlining we do not calculate info for very
large functions and thus there is no need for producing
summaries. */
if (!ipa_node_params_sum)
use_table = false;
/* Do not calculate summaries for simple wrappers; it is waste
of memory. */
else if (node->callees && node->indirect_calls
&& node->callees->inline_failed && !node->callees->next_callee)
use_table = false;
/* If there is an indirect edge that may be optimized, we need
to go the slow way. */
else if (avals && hints
&& (avals->m_known_vals.length ()
|| avals->m_known_contexts.length ()
|| avals->m_known_aggs.length ()))
{
class ipa_node_params *params_summary = IPA_NODE_REF (node);
unsigned int nargs = params_summary
? ipa_get_param_count (params_summary) : 0;
for (unsigned int i = 0; i < nargs && use_table; i++)
{
if (ipa_is_param_used_by_indirect_call (params_summary, i)
&& (avals->safe_sval_at (i)
|| (avals->m_known_aggs.length () > i
&& avals->m_known_aggs[i].items.length ())))
use_table = false;
else if (ipa_is_param_used_by_polymorphic_call (params_summary, i)
&& (avals->m_known_contexts.length () > i
&& !avals->m_known_contexts[i].useless_p ()))
use_table = false;
}
}
/* Fast path is via the call size time table. */
if (use_table)
{
/* Build summary if it is absent. */
if (!sum->call_size_time_table)
{
predicate true_pred = true;
sum->account_size_time (0, 0, true_pred, true_pred, true);
summarize_calls_size_and_time (node, sum);
}
int old_size = *size;
sreal old_time = time ? *time : 0;
if (min_size)
*min_size += (*sum->call_size_time_table)[0].size;
unsigned int i;
size_time_entry *e;
/* Walk the table and account sizes and times. */
for (i = 0; vec_safe_iterate (sum->call_size_time_table, i, &e);
i++)
if (e->exec_predicate.evaluate (possible_truths))
{
*size += e->size;
if (time)
*time += e->time;
}
/* Be careful and see if both methods agree. */
if ((flag_checking || dump_file)
/* Do not try to sanity check when we know we lost some
precision. */
&& sum->call_size_time_table->length ()
< ipa_fn_summary::max_size_time_table_size)
{
estimate_calls_size_and_time_1 (node, &old_size, NULL, &old_time, NULL,
possible_truths, avals);
gcc_assert (*size == old_size);
if (time && (*time - old_time > 1 || *time - old_time < -1)
&& dump_file)
fprintf (dump_file, "Time mismatch in call summary %f!=%f\n",
old_time.to_double (),
time->to_double ());
}
}
/* Slow path by walking all edges. */
else
estimate_calls_size_and_time_1 (node, size, min_size, time, hints,
possible_truths, avals);
}
/* Main constructor for ipa call context. Memory allocation of ARG_VALUES
is owned by the caller. INLINE_PARAM_SUMMARY is also owned by the
caller. */
ipa_call_context::ipa_call_context (cgraph_node *node, clause_t possible_truths,
clause_t nonspec_possible_truths,
vec<inline_param_summary>
inline_param_summary,
ipa_auto_call_arg_values *arg_values)
: m_node (node), m_possible_truths (possible_truths),
m_nonspec_possible_truths (nonspec_possible_truths),
m_inline_param_summary (inline_param_summary),
m_avals (arg_values)
{
}
/* Set THIS to be a duplicate of CTX. Copy all relevant info. */
void
ipa_cached_call_context::duplicate_from (const ipa_call_context &ctx)
{
m_node = ctx.m_node;
m_possible_truths = ctx.m_possible_truths;
m_nonspec_possible_truths = ctx.m_nonspec_possible_truths;
class ipa_node_params *params_summary = IPA_NODE_REF (m_node);
unsigned int nargs = params_summary
? ipa_get_param_count (params_summary) : 0;
m_inline_param_summary = vNULL;
/* Copy the info only if there is at least one useful entry. */
if (ctx.m_inline_param_summary.exists ())
{
unsigned int n = MIN (ctx.m_inline_param_summary.length (), nargs);
for (unsigned int i = 0; i < n; i++)
if (ipa_is_param_used_by_ipa_predicates (params_summary, i)
&& !ctx.m_inline_param_summary[i].useless_p ())
{
m_inline_param_summary
= ctx.m_inline_param_summary.copy ();
break;
}
}
m_avals.m_known_vals = vNULL;
if (ctx.m_avals.m_known_vals.exists ())
{
unsigned int n = MIN (ctx.m_avals.m_known_vals.length (), nargs);
for (unsigned int i = 0; i < n; i++)
if (ipa_is_param_used_by_indirect_call (params_summary, i)
&& ctx.m_avals.m_known_vals[i])
{
m_avals.m_known_vals = ctx.m_avals.m_known_vals.copy ();
break;
}
}
m_avals.m_known_contexts = vNULL;
if (ctx.m_avals.m_known_contexts.exists ())
{
unsigned int n = MIN (ctx.m_avals.m_known_contexts.length (), nargs);
for (unsigned int i = 0; i < n; i++)
if (ipa_is_param_used_by_polymorphic_call (params_summary, i)
&& !ctx.m_avals.m_known_contexts[i].useless_p ())
{
m_avals.m_known_contexts = ctx.m_avals.m_known_contexts.copy ();
break;
}
}
m_avals.m_known_aggs = vNULL;
if (ctx.m_avals.m_known_aggs.exists ())
{
unsigned int n = MIN (ctx.m_avals.m_known_aggs.length (), nargs);
for (unsigned int i = 0; i < n; i++)
if (ipa_is_param_used_by_indirect_call (params_summary, i)
&& !ctx.m_avals.m_known_aggs[i].is_empty ())
{
m_avals.m_known_aggs
= ipa_copy_agg_values (ctx.m_avals.m_known_aggs);
break;
}
}
m_avals.m_known_value_ranges = vNULL;
}
/* Release memory used by known_vals/contexts/aggs vectors. and
inline_param_summary. */
void
ipa_cached_call_context::release ()
{
/* See if context is initialized at first place. */
if (!m_node)
return;
ipa_release_agg_values (m_avals.m_known_aggs, true);
m_avals.m_known_vals.release ();
m_avals.m_known_contexts.release ();
m_inline_param_summary.release ();
}
/* Return true if CTX describes the same call context as THIS. */
bool
ipa_call_context::equal_to (const ipa_call_context &ctx)
{
if (m_node != ctx.m_node
|| m_possible_truths != ctx.m_possible_truths
|| m_nonspec_possible_truths != ctx.m_nonspec_possible_truths)
return false;
class ipa_node_params *params_summary = IPA_NODE_REF (m_node);
unsigned int nargs = params_summary
? ipa_get_param_count (params_summary) : 0;
if (m_inline_param_summary.exists () || ctx.m_inline_param_summary.exists ())
{
for (unsigned int i = 0; i < nargs; i++)
{
if (!ipa_is_param_used_by_ipa_predicates (params_summary, i))
continue;
if (i >= m_inline_param_summary.length ()
|| m_inline_param_summary[i].useless_p ())
{
if (i < ctx.m_inline_param_summary.length ()
&& !ctx.m_inline_param_summary[i].useless_p ())
return false;
continue;
}
if (i >= ctx.m_inline_param_summary.length ()
|| ctx.m_inline_param_summary[i].useless_p ())
{
if (i < m_inline_param_summary.length ()
&& !m_inline_param_summary[i].useless_p ())
return false;
continue;
}
if (!m_inline_param_summary[i].equal_to
(ctx.m_inline_param_summary[i]))
return false;
}
}
if (m_avals.m_known_vals.exists () || ctx.m_avals.m_known_vals.exists ())
{
for (unsigned int i = 0; i < nargs; i++)
{
if (!ipa_is_param_used_by_indirect_call (params_summary, i))
continue;
if (i >= m_avals.m_known_vals.length () || !m_avals.m_known_vals[i])
{
if (i < ctx.m_avals.m_known_vals.length ()
&& ctx.m_avals.m_known_vals[i])
return false;
continue;
}
if (i >= ctx.m_avals.m_known_vals.length ()
|| !ctx.m_avals.m_known_vals[i])
{
if (i < m_avals.m_known_vals.length () && m_avals.m_known_vals[i])
return false;
continue;
}
if (m_avals.m_known_vals[i] != ctx.m_avals.m_known_vals[i])
return false;
}
}
if (m_avals.m_known_contexts.exists ()
|| ctx.m_avals.m_known_contexts.exists ())
{
for (unsigned int i = 0; i < nargs; i++)
{
if (!ipa_is_param_used_by_polymorphic_call (params_summary, i))
continue;
if (i >= m_avals.m_known_contexts.length ()
|| m_avals.m_known_contexts[i].useless_p ())
{
if (i < ctx.m_avals.m_known_contexts.length ()
&& !ctx.m_avals.m_known_contexts[i].useless_p ())
return false;
continue;
}
if (i >= ctx.m_avals.m_known_contexts.length ()
|| ctx.m_avals.m_known_contexts[i].useless_p ())
{
if (i < m_avals.m_known_contexts.length ()
&& !m_avals.m_known_contexts[i].useless_p ())
return false;
continue;
}
if (!m_avals.m_known_contexts[i].equal_to
(ctx.m_avals.m_known_contexts[i]))
return false;
}
}
if (m_avals.m_known_aggs.exists () || ctx.m_avals.m_known_aggs.exists ())
{
for (unsigned int i = 0; i < nargs; i++)
{
if (!ipa_is_param_used_by_indirect_call (params_summary, i))
continue;
if (i >= m_avals.m_known_aggs.length ()
|| m_avals.m_known_aggs[i].is_empty ())
{
if (i < ctx.m_avals.m_known_aggs.length ()
&& !ctx.m_avals.m_known_aggs[i].is_empty ())
return false;
continue;
}
if (i >= ctx.m_avals.m_known_aggs.length ()
|| ctx.m_avals.m_known_aggs[i].is_empty ())
{
if (i < m_avals.m_known_aggs.length ()
&& !m_avals.m_known_aggs[i].is_empty ())
return false;
continue;
}
if (!m_avals.m_known_aggs[i].equal_to (ctx.m_avals.m_known_aggs[i]))
return false;
}
}
return true;
}
/* Fill in the selected fields in ESTIMATES with value estimated for call in
this context. Always compute size and min_size. Only compute time and
nonspecialized_time if EST_TIMES is true. Only compute hints if EST_HINTS
is true. */
void
ipa_call_context::estimate_size_and_time (ipa_call_estimates *estimates,
bool est_times, bool est_hints)
{
class ipa_fn_summary *info = ipa_fn_summaries->get (m_node);
size_time_entry *e;
int size = 0;
sreal time = 0;
int min_size = 0;
ipa_hints hints = 0;
sreal loops_with_known_iterations = 0;
sreal loops_with_known_strides = 0;
int i;
if (dump_file && (dump_flags & TDF_DETAILS))
{
bool found = false;
fprintf (dump_file, " Estimating body: %s\n"
" Known to be false: ", m_node->dump_name ());
for (i = predicate::not_inlined_condition;
i < (predicate::first_dynamic_condition
+ (int) vec_safe_length (info->conds)); i++)
if (!(m_possible_truths & (1 << i)))
{
if (found)
fprintf (dump_file, ", ");
found = true;
dump_condition (dump_file, info->conds, i);
}
}
if (m_node->callees || m_node->indirect_calls)
estimate_calls_size_and_time (m_node, &size, &min_size,
est_times ? &time : NULL,
est_hints ? &hints : NULL, m_possible_truths,
&m_avals);
sreal nonspecialized_time = time;
min_size += (*info->size_time_table)[0].size;
for (i = 0; vec_safe_iterate (info->size_time_table, i, &e); i++)
{
bool exec = e->exec_predicate.evaluate (m_nonspec_possible_truths);
/* Because predicates are conservative, it can happen that nonconst is 1
but exec is 0. */
if (exec)
{
bool nonconst = e->nonconst_predicate.evaluate (m_possible_truths);
gcc_checking_assert (e->time >= 0);
gcc_checking_assert (time >= 0);
/* We compute specialized size only because size of nonspecialized
copy is context independent.
The difference between nonspecialized execution and specialized is
that nonspecialized is not going to have optimized out computations
known to be constant in a specialized setting. */
if (nonconst)
size += e->size;
if (!est_times)
continue;
nonspecialized_time += e->time;
if (!nonconst)
;
else if (!m_inline_param_summary.exists ())
{
if (nonconst)
time += e->time;
}
else
{
int prob = e->nonconst_predicate.probability
(info->conds, m_possible_truths,
m_inline_param_summary);
gcc_checking_assert (prob >= 0);
gcc_checking_assert (prob <= REG_BR_PROB_BASE);
if (prob == REG_BR_PROB_BASE)
time += e->time;
else
time += e->time * prob / REG_BR_PROB_BASE;
}
gcc_checking_assert (time >= 0);
}
}
gcc_checking_assert ((*info->size_time_table)[0].exec_predicate == true);
gcc_checking_assert ((*info->size_time_table)[0].nonconst_predicate == true);
gcc_checking_assert (min_size >= 0);
gcc_checking_assert (size >= 0);
gcc_checking_assert (time >= 0);
/* nonspecialized_time should be always bigger than specialized time.
Roundoff issues however may get into the way. */
gcc_checking_assert ((nonspecialized_time - time * 99 / 100) >= -1);
/* Roundoff issues may make specialized time bigger than nonspecialized
time. We do not really want that to happen because some heuristics
may get confused by seeing negative speedups. */
if (time > nonspecialized_time)
time = nonspecialized_time;
if (est_hints)
{
if (info->scc_no)
hints |= INLINE_HINT_in_scc;
if (DECL_DECLARED_INLINE_P (m_node->decl))
hints |= INLINE_HINT_declared_inline;
ipa_freqcounting_predicate *fcp;
for (i = 0; vec_safe_iterate (info->loop_iterations, i, &fcp); i++)
if (!fcp->predicate->evaluate (m_possible_truths))
{
hints |= INLINE_HINT_loop_iterations;
loops_with_known_iterations += fcp->freq;
}
estimates->loops_with_known_iterations = loops_with_known_iterations;
for (i = 0; vec_safe_iterate (info->loop_strides, i, &fcp); i++)
if (!fcp->predicate->evaluate (m_possible_truths))
{
hints |= INLINE_HINT_loop_stride;
loops_with_known_strides += fcp->freq;
}
estimates->loops_with_known_strides = loops_with_known_strides;
}
size = RDIV (size, ipa_fn_summary::size_scale);
min_size = RDIV (min_size, ipa_fn_summary::size_scale);
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file, "\n size:%i", (int) size);
if (est_times)
fprintf (dump_file, " time:%f nonspec time:%f",
time.to_double (), nonspecialized_time.to_double ());
if (est_hints)
fprintf (dump_file, " loops with known iterations:%f "
"known strides:%f", loops_with_known_iterations.to_double (),
loops_with_known_strides.to_double ());
fprintf (dump_file, "\n");
}
if (est_times)
{
estimates->time = time;
estimates->nonspecialized_time = nonspecialized_time;
}
estimates->size = size;
estimates->min_size = min_size;
if (est_hints)
estimates->hints = hints;
return;
}
/* Estimate size and time needed to execute callee of EDGE assuming that
parameters known to be constant at caller of EDGE are propagated.
KNOWN_VALS and KNOWN_CONTEXTS are vectors of assumed known constant values
and types for parameters. */
void
estimate_ipcp_clone_size_and_time (struct cgraph_node *node,
ipa_auto_call_arg_values *avals,
ipa_call_estimates *estimates)
{
clause_t clause, nonspec_clause;
evaluate_conditions_for_known_args (node, false, avals, &clause,
&nonspec_clause);
ipa_call_context ctx (node, clause, nonspec_clause, vNULL, avals);
ctx.estimate_size_and_time (estimates);
}
/* Return stack frame offset where frame of NODE is supposed to start inside
of the function it is inlined to.
Return 0 for functions that are not inlined. */
HOST_WIDE_INT
ipa_get_stack_frame_offset (struct cgraph_node *node)
{
HOST_WIDE_INT offset = 0;
if (!node->inlined_to)
return 0;
node = node->callers->caller;
while (true)
{
offset += ipa_size_summaries->get (node)->estimated_self_stack_size;
if (!node->inlined_to)
return offset;
node = node->callers->caller;
}
}
/* Update summary information of inline clones after inlining.
Compute peak stack usage. */
static void
inline_update_callee_summaries (struct cgraph_node *node, int depth)
{
struct cgraph_edge *e;
ipa_propagate_frequency (node);
for (e = node->callees; e; e = e->next_callee)
{
if (!e->inline_failed)
inline_update_callee_summaries (e->callee, depth);
else
ipa_call_summaries->get (e)->loop_depth += depth;
}
for (e = node->indirect_calls; e; e = e->next_callee)
ipa_call_summaries->get (e)->loop_depth += depth;
}
/* Update change_prob and points_to_local_or_readonly_memory of EDGE after
INLINED_EDGE has been inlined.
When function A is inlined in B and A calls C with parameter that
changes with probability PROB1 and C is known to be passthrough
of argument if B that change with probability PROB2, the probability
of change is now PROB1*PROB2. */
static void
remap_edge_params (struct cgraph_edge *inlined_edge,
struct cgraph_edge *edge)
{
if (ipa_node_params_sum)
{
int i;
class ipa_edge_args *args = IPA_EDGE_REF (edge);
if (!args)
return;
class ipa_call_summary *es = ipa_call_summaries->get (edge);
class ipa_call_summary *inlined_es
= ipa_call_summaries->get (inlined_edge);
if (es->param.length () == 0)
return;
for (i = 0; i < ipa_get_cs_argument_count (args); i++)
{
struct ipa_jump_func *jfunc = ipa_get_ith_jump_func (args, i);
if (jfunc->type == IPA_JF_PASS_THROUGH
|| jfunc->type == IPA_JF_ANCESTOR)
{
int id = jfunc->type == IPA_JF_PASS_THROUGH
? ipa_get_jf_pass_through_formal_id (jfunc)
: ipa_get_jf_ancestor_formal_id (jfunc);
if (id < (int) inlined_es->param.length ())
{
int prob1 = es->param[i].change_prob;
int prob2 = inlined_es->param[id].change_prob;
int prob = combine_probabilities (prob1, prob2);
if (prob1 && prob2 && !prob)
prob = 1;
es->param[i].change_prob = prob;
if (inlined_es
->param[id].points_to_local_or_readonly_memory)
es->param[i].points_to_local_or_readonly_memory = true;
}
if (!es->param[i].points_to_local_or_readonly_memory
&& jfunc->type == IPA_JF_CONST
&& points_to_local_or_readonly_memory_p
(ipa_get_jf_constant (jfunc)))
es->param[i].points_to_local_or_readonly_memory = true;
}
}
}
}
/* Update edge summaries of NODE after INLINED_EDGE has been inlined.
Remap predicates of callees of NODE. Rest of arguments match
remap_predicate.
Also update change probabilities. */
static void
remap_edge_summaries (struct cgraph_edge *inlined_edge,
struct cgraph_node *node,
class ipa_fn_summary *info,
class ipa_node_params *params_summary,
class ipa_fn_summary *callee_info,
vec<int> operand_map,
vec<HOST_WIDE_INT> offset_map,
clause_t possible_truths,
predicate *toplev_predicate)
{
struct cgraph_edge *e, *next;
for (e = node->callees; e; e = next)
{
predicate p;
next = e->next_callee;
if (e->inline_failed)
{
class ipa_call_summary *es = ipa_call_summaries->get (e);
remap_edge_params (inlined_edge, e);
if (es->predicate)
{
p = es->predicate->remap_after_inlining
(info, params_summary,
callee_info, operand_map,
offset_map, possible_truths,
*toplev_predicate);
edge_set_predicate (e, &p);
}
else
edge_set_predicate (e, toplev_predicate);
}
else
remap_edge_summaries (inlined_edge, e->callee, info,
params_summary, callee_info,
operand_map, offset_map, possible_truths,
toplev_predicate);
}
for (e = node->indirect_calls; e; e = next)
{
class ipa_call_summary *es = ipa_call_summaries->get (e);
predicate p;
next = e->next_callee;
remap_edge_params (inlined_edge, e);
if (es->predicate)
{
p = es->predicate->remap_after_inlining
(info, params_summary,
callee_info, operand_map, offset_map,
possible_truths, *toplev_predicate);
edge_set_predicate (e, &p);
}
else
edge_set_predicate (e, toplev_predicate);
}
}
/* Run remap_after_inlining on each predicate in V. */
static void
remap_freqcounting_predicate (class ipa_fn_summary *info,
class ipa_node_params *params_summary,
class ipa_fn_summary *callee_info,
vec<ipa_freqcounting_predicate, va_gc> *v,
vec<int> operand_map,
vec<HOST_WIDE_INT> offset_map,
clause_t possible_truths,
predicate *toplev_predicate)
{
ipa_freqcounting_predicate *fcp;
for (int i = 0; vec_safe_iterate (v, i, &fcp); i++)
{
predicate p
= fcp->predicate->remap_after_inlining (info, params_summary,
callee_info, operand_map,
offset_map, possible_truths,
*toplev_predicate);
if (p != false && p != true)
*fcp->predicate &= p;
}
}
/* We inlined EDGE. Update summary of the function we inlined into. */
void
ipa_merge_fn_summary_after_inlining (struct cgraph_edge *edge)
{
ipa_fn_summary *callee_info = ipa_fn_summaries->get (edge->callee);
struct cgraph_node *to = (edge->caller->inlined_to
? edge->caller->inlined_to : edge->caller);
class ipa_fn_summary *info = ipa_fn_summaries->get (to);
clause_t clause = 0; /* not_inline is known to be false. */
size_time_entry *e;
auto_vec<int, 8> operand_map;
auto_vec<HOST_WIDE_INT, 8> offset_map;
int i;
predicate toplev_predicate;
class ipa_call_summary *es = ipa_call_summaries->get (edge);
class ipa_node_params *params_summary = (ipa_node_params_sum
? IPA_NODE_REF (to) : NULL);
if (es->predicate)
toplev_predicate = *es->predicate;
else
toplev_predicate = true;
info->fp_expressions |= callee_info->fp_expressions;
if (callee_info->conds)
{
ipa_auto_call_arg_values avals;
evaluate_properties_for_edge (edge, true, &clause, NULL, &avals, false);
}
if (ipa_node_params_sum && callee_info->conds)
{
class ipa_edge_args *args = IPA_EDGE_REF (edge);
int count = args ? ipa_get_cs_argument_count (args) : 0;
int i;
if (count)
{
operand_map.safe_grow_cleared (count, true);
offset_map.safe_grow_cleared (count, true);
}
for (i = 0; i < count; i++)
{
struct ipa_jump_func *jfunc = ipa_get_ith_jump_func (args, i);
int map = -1;
/* TODO: handle non-NOPs when merging. */
if (jfunc->type == IPA_JF_PASS_THROUGH)
{
if (ipa_get_jf_pass_through_operation (jfunc) == NOP_EXPR)
map = ipa_get_jf_pass_through_formal_id (jfunc);
if (!ipa_get_jf_pass_through_agg_preserved (jfunc))
offset_map[i] = -1;
}
else if (jfunc->type == IPA_JF_ANCESTOR)
{
HOST_WIDE_INT offset = ipa_get_jf_ancestor_offset (jfunc);
if (offset >= 0 && offset < INT_MAX)
{
map = ipa_get_jf_ancestor_formal_id (jfunc);
if (!ipa_get_jf_ancestor_agg_preserved (jfunc))
offset = -1;
offset_map[i] = offset;
}
}
operand_map[i] = map;
gcc_assert (map < ipa_get_param_count (params_summary));
}
}
sreal freq = edge->sreal_frequency ();
for (i = 0; vec_safe_iterate (callee_info->size_time_table, i, &e); i++)
{
predicate p;
p = e->exec_predicate.remap_after_inlining
(info, params_summary,
callee_info, operand_map,
offset_map, clause,
toplev_predicate);
predicate nonconstp;
nonconstp = e->nonconst_predicate.remap_after_inlining
(info, params_summary,
callee_info, operand_map,
offset_map, clause,
toplev_predicate);
if (p != false && nonconstp != false)
{
sreal add_time = ((sreal)e->time * freq);
int prob = e->nonconst_predicate.probability (callee_info->conds,
clause, es->param);
if (prob != REG_BR_PROB_BASE)
add_time = add_time * prob / REG_BR_PROB_BASE;
if (prob != REG_BR_PROB_BASE
&& dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file, "\t\tScaling time by probability:%f\n",
(double) prob / REG_BR_PROB_BASE);
}
info->account_size_time (e->size, add_time, p, nonconstp);
}
}
remap_edge_summaries (edge, edge->callee, info, params_summary,
callee_info, operand_map,
offset_map, clause, &toplev_predicate);
remap_freqcounting_predicate (info, params_summary, callee_info,
info->loop_iterations, operand_map,
offset_map, clause, &toplev_predicate);
remap_freqcounting_predicate (info, params_summary, callee_info,
info->loop_strides, operand_map,
offset_map, clause, &toplev_predicate);
HOST_WIDE_INT stack_frame_offset = ipa_get_stack_frame_offset (edge->callee);
HOST_WIDE_INT peak = stack_frame_offset + callee_info->estimated_stack_size;
if (info->estimated_stack_size < peak)
info->estimated_stack_size = peak;
inline_update_callee_summaries (edge->callee, es->loop_depth);
if (info->call_size_time_table)
{
int edge_size = 0;
sreal edge_time = 0;
estimate_edge_size_and_time (edge, &edge_size, NULL, &edge_time, NULL, 0);
/* Unaccount size and time of the optimized out call. */
info->account_size_time (-edge_size, -edge_time,
es->predicate ? *es->predicate : true,
es->predicate ? *es->predicate : true,
true);
/* Account new calls. */
summarize_calls_size_and_time (edge->callee, info);
}
/* Free summaries that are not maintained for inline clones/edges. */
ipa_call_summaries->remove (edge);
ipa_fn_summaries->remove (edge->callee);
ipa_remove_from_growth_caches (edge);
}
/* For performance reasons ipa_merge_fn_summary_after_inlining is not updating
overall size and time. Recompute it.
If RESET is true also recompute call_time_size_table. */
void
ipa_update_overall_fn_summary (struct cgraph_node *node, bool reset)
{
class ipa_fn_summary *info = ipa_fn_summaries->get (node);
class ipa_size_summary *size_info = ipa_size_summaries->get (node);
size_time_entry *e;
int i;
size_info->size = 0;
info->time = 0;
for (i = 0; vec_safe_iterate (info->size_time_table, i, &e); i++)
{
size_info->size += e->size;
info->time += e->time;
}
info->min_size = (*info->size_time_table)[0].size;
if (reset)
vec_free (info->call_size_time_table);
if (node->callees || node->indirect_calls)
estimate_calls_size_and_time (node, &size_info->size, &info->min_size,
&info->time, NULL,
~(clause_t) (1 << predicate::false_condition),
NULL);
size_info->size = RDIV (size_info->size, ipa_fn_summary::size_scale);
info->min_size = RDIV (info->min_size, ipa_fn_summary::size_scale);
}
/* This function performs intraprocedural analysis in NODE that is required to
inline indirect calls. */
static void
inline_indirect_intraprocedural_analysis (struct cgraph_node *node)
{
ipa_analyze_node (node);
if (dump_file && (dump_flags & TDF_DETAILS))
{
ipa_print_node_params (dump_file, node);
ipa_print_node_jump_functions (dump_file, node);
}
}
/* Note function body size. */
void
inline_analyze_function (struct cgraph_node *node)
{
push_cfun (DECL_STRUCT_FUNCTION (node->decl));
if (dump_file)
fprintf (dump_file, "\nAnalyzing function: %s\n", node->dump_name ());
if (opt_for_fn (node->decl, optimize) && !node->thunk.thunk_p)
inline_indirect_intraprocedural_analysis (node);
compute_fn_summary (node, false);
if (!optimize)
{
struct cgraph_edge *e;
for (e = node->callees; e; e = e->next_callee)
e->inline_failed = CIF_FUNCTION_NOT_OPTIMIZED;
for (e = node->indirect_calls; e; e = e->next_callee)
e->inline_failed = CIF_FUNCTION_NOT_OPTIMIZED;
}
pop_cfun ();
}
/* Called when new function is inserted to callgraph late. */
void
ipa_fn_summary_t::insert (struct cgraph_node *node, ipa_fn_summary *)
{
inline_analyze_function (node);
}
/* Note function body size. */
static void
ipa_fn_summary_generate (void)
{
struct cgraph_node *node;
FOR_EACH_DEFINED_FUNCTION (node)
if (DECL_STRUCT_FUNCTION (node->decl))
node->versionable = tree_versionable_function_p (node->decl);
ipa_fn_summary_alloc ();
ipa_fn_summaries->enable_insertion_hook ();
ipa_register_cgraph_hooks ();
FOR_EACH_DEFINED_FUNCTION (node)
if (!node->alias
&& (flag_generate_lto || flag_generate_offload|| flag_wpa
|| opt_for_fn (node->decl, optimize)))
inline_analyze_function (node);
}
/* Write inline summary for edge E to OB. */
static void
read_ipa_call_summary (class lto_input_block *ib, struct cgraph_edge *e,
bool prevails)
{
class ipa_call_summary *es = prevails
? ipa_call_summaries->get_create (e) : NULL;
predicate p;
int length, i;
int size = streamer_read_uhwi (ib);
int time = streamer_read_uhwi (ib);
int depth = streamer_read_uhwi (ib);
if (es)
{
es->call_stmt_size = size;
es->call_stmt_time = time;
es->loop_depth = depth;
}
bitpack_d bp = streamer_read_bitpack (ib);
if (es)
es->is_return_callee_uncaptured = bp_unpack_value (&bp, 1);
else
bp_unpack_value (&bp, 1);
p.stream_in (ib);
if (es)
edge_set_predicate (e, &p);
length = streamer_read_uhwi (ib);
if (length && es && e->possibly_call_in_translation_unit_p ())
{
es->param.safe_grow_cleared (length, true);
for (i = 0; i < length; i++)
{
es->param[i].change_prob = streamer_read_uhwi (ib);
es->param[i].points_to_local_or_readonly_memory
= streamer_read_uhwi (ib);
}
}
else
{
for (i = 0; i < length; i++)
{
streamer_read_uhwi (ib);
streamer_read_uhwi (ib);
}
}
}
/* Stream in inline summaries from the section. */
static void
inline_read_section (struct lto_file_decl_data *file_data, const char *data,
size_t len)
{
const struct lto_function_header *header =
(const struct lto_function_header *) data;
const int cfg_offset = sizeof (struct lto_function_header);
const int main_offset = cfg_offset + header->cfg_size;
const int string_offset = main_offset + header->main_size;
class data_in *data_in;
unsigned int i, count2, j;
unsigned int f_count;
lto_input_block ib ((const char *) data + main_offset, header->main_size,
file_data->mode_table);
data_in =
lto_data_in_create (file_data, (const char *) data + string_offset,
header->string_size, vNULL);
f_count = streamer_read_uhwi (&ib);
for (i = 0; i < f_count; i++)
{
unsigned int index;
struct cgraph_node *node;
class ipa_fn_summary *info;
class ipa_node_params *params_summary;
class ipa_size_summary *size_info;
lto_symtab_encoder_t encoder;
struct bitpack_d bp;
struct cgraph_edge *e;
predicate p;
index = streamer_read_uhwi (&ib);
encoder = file_data->symtab_node_encoder;
node = dyn_cast<cgraph_node *> (lto_symtab_encoder_deref (encoder,
index));
info = node->prevailing_p () ? ipa_fn_summaries->get_create (node) : NULL;
params_summary = node->prevailing_p () ? IPA_NODE_REF (node) : NULL;
size_info = node->prevailing_p ()
? ipa_size_summaries->get_create (node) : NULL;
int stack_size = streamer_read_uhwi (&ib);
int size = streamer_read_uhwi (&ib);
sreal time = sreal::stream_in (&ib);
if (info)
{
info->estimated_stack_size
= size_info->estimated_self_stack_size = stack_size;
size_info->size = size_info->self_size = size;
info->time = time;
}
bp = streamer_read_bitpack (&ib);
if (info)
{
info->inlinable = bp_unpack_value (&bp, 1);
info->fp_expressions = bp_unpack_value (&bp, 1);
}
else
{
bp_unpack_value (&bp, 1);
bp_unpack_value (&bp, 1);
}
count2 = streamer_read_uhwi (&ib);
gcc_assert (!info || !info->conds);
if (info)
vec_safe_reserve_exact (info->conds, count2);
for (j = 0; j < count2; j++)
{
struct condition c;
unsigned int k, count3;
c.operand_num = streamer_read_uhwi (&ib);
c.code = (enum tree_code) streamer_read_uhwi (&ib);
c.type = stream_read_tree (&ib, data_in);
c.val = stream_read_tree (&ib, data_in);
bp = streamer_read_bitpack (&ib);
c.agg_contents = bp_unpack_value (&bp, 1);
c.by_ref = bp_unpack_value (&bp, 1);
if (c.agg_contents)
c.offset = streamer_read_uhwi (&ib);
count3 = streamer_read_uhwi (&ib);
c.param_ops = NULL;
if (info)
vec_safe_reserve_exact (c.param_ops, count3);
if (params_summary)
ipa_set_param_used_by_ipa_predicates
(params_summary, c.operand_num, true);
for (k = 0; k < count3; k++)
{
struct expr_eval_op op;
enum gimple_rhs_class rhs_class;
op.code = (enum tree_code) streamer_read_uhwi (&ib);
op.type = stream_read_tree (&ib, data_in);
switch (rhs_class = get_gimple_rhs_class (op.code))
{
case GIMPLE_UNARY_RHS:
op.index = 0;
op.val[0] = NULL_TREE;
op.val[1] = NULL_TREE;
break;
case GIMPLE_BINARY_RHS:
case GIMPLE_TERNARY_RHS:
bp = streamer_read_bitpack (&ib);
op.index = bp_unpack_value (&bp, 2);
op.val[0] = stream_read_tree (&ib, data_in);
if (rhs_class == GIMPLE_BINARY_RHS)
op.val[1] = NULL_TREE;
else
op.val[1] = stream_read_tree (&ib, data_in);
break;
default:
fatal_error (UNKNOWN_LOCATION,
"invalid fnsummary in LTO stream");
}
if (info)
c.param_ops->quick_push (op);
}
if (info)
info->conds->quick_push (c);
}
count2 = streamer_read_uhwi (&ib);
gcc_assert (!info || !info->size_time_table);
if (info && count2)
vec_safe_reserve_exact (info->size_time_table, count2);
for (j = 0; j < count2; j++)
{
class size_time_entry e;
e.size = streamer_read_uhwi (&ib);
e.time = sreal::stream_in (&ib);
e.exec_predicate.stream_in (&ib);
e.nonconst_predicate.stream_in (&ib);
if (info)
info->size_time_table->quick_push (e);
}
count2 = streamer_read_uhwi (&ib);
for (j = 0; j < count2; j++)
{
p.stream_in (&ib);
sreal fcp_freq = sreal::stream_in (&ib);
if (info)
{
ipa_freqcounting_predicate fcp;
fcp.predicate = NULL;
set_hint_predicate (&fcp.predicate, p);
fcp.freq = fcp_freq;
vec_safe_push (info->loop_iterations, fcp);
}
}
count2 = streamer_read_uhwi (&ib);
for (j = 0; j < count2; j++)
{
p.stream_in (&ib);
sreal fcp_freq = sreal::stream_in (&ib);
if (info)
{
ipa_freqcounting_predicate fcp;
fcp.predicate = NULL;
set_hint_predicate (&fcp.predicate, p);
fcp.freq = fcp_freq;
vec_safe_push (info->loop_strides, fcp);
}
}
for (e = node->callees; e; e = e->next_callee)
read_ipa_call_summary (&ib, e, info != NULL);
for (e = node->indirect_calls; e; e = e->next_callee)
read_ipa_call_summary (&ib, e, info != NULL);
}
lto_free_section_data (file_data, LTO_section_ipa_fn_summary, NULL, data,
len);
lto_data_in_delete (data_in);
}
/* Read inline summary. Jump functions are shared among ipa-cp
and inliner, so when ipa-cp is active, we don't need to write them
twice. */
static void
ipa_fn_summary_read (void)
{
struct lto_file_decl_data **file_data_vec = lto_get_file_decl_data ();
struct lto_file_decl_data *file_data;
unsigned int j = 0;
ipa_fn_summary_alloc ();
while ((file_data = file_data_vec[j++]))
{
size_t len;
const char *data
= lto_get_summary_section_data (file_data, LTO_section_ipa_fn_summary,
&len);
if (data)
inline_read_section (file_data, data, len);
else
/* Fatal error here. We do not want to support compiling ltrans units
with different version of compiler or different flags than the WPA
unit, so this should never happen. */
fatal_error (input_location,
"ipa inline summary is missing in input file");
}
ipa_register_cgraph_hooks ();
if (!flag_ipa_cp)
ipa_prop_read_jump_functions ();
gcc_assert (ipa_fn_summaries);
ipa_fn_summaries->enable_insertion_hook ();
}
/* Write inline summary for edge E to OB. */
static void
write_ipa_call_summary (struct output_block *ob, struct cgraph_edge *e)
{
class ipa_call_summary *es = ipa_call_summaries->get (e);
int i;
streamer_write_uhwi (ob, es->call_stmt_size);
streamer_write_uhwi (ob, es->call_stmt_time);
streamer_write_uhwi (ob, es->loop_depth);
bitpack_d bp = bitpack_create (ob->main_stream);
bp_pack_value (&bp, es->is_return_callee_uncaptured, 1);
streamer_write_bitpack (&bp);
if (es->predicate)
es->predicate->stream_out (ob);
else
streamer_write_uhwi (ob, 0);
streamer_write_uhwi (ob, es->param.length ());
for (i = 0; i < (int) es->param.length (); i++)
{
streamer_write_uhwi (ob, es->param[i].change_prob);
streamer_write_uhwi (ob, es->param[i].points_to_local_or_readonly_memory);
}
}
/* Write inline summary for node in SET.
Jump functions are shared among ipa-cp and inliner, so when ipa-cp is
active, we don't need to write them twice. */
static void
ipa_fn_summary_write (void)
{
struct output_block *ob = create_output_block (LTO_section_ipa_fn_summary);
lto_symtab_encoder_iterator lsei;
lto_symtab_encoder_t encoder = ob->decl_state->symtab_node_encoder;
unsigned int count = 0;
for (lsei = lsei_start_function_in_partition (encoder); !lsei_end_p (lsei);
lsei_next_function_in_partition (&lsei))
{
cgraph_node *cnode = lsei_cgraph_node (lsei);
if (cnode->definition && !cnode->alias)
count++;
}
streamer_write_uhwi (ob, count);
for (lsei = lsei_start_function_in_partition (encoder); !lsei_end_p (lsei);
lsei_next_function_in_partition (&lsei))
{
cgraph_node *cnode = lsei_cgraph_node (lsei);
if (cnode->definition && !cnode->alias)
{
class ipa_fn_summary *info = ipa_fn_summaries->get (cnode);
class ipa_size_summary *size_info = ipa_size_summaries->get (cnode);
struct bitpack_d bp;
struct cgraph_edge *edge;
int i;
size_time_entry *e;
struct condition *c;
streamer_write_uhwi (ob, lto_symtab_encoder_encode (encoder, cnode));
streamer_write_hwi (ob, size_info->estimated_self_stack_size);
streamer_write_hwi (ob, size_info->self_size);
info->time.stream_out (ob);
bp = bitpack_create (ob->main_stream);
bp_pack_value (&bp, info->inlinable, 1);
bp_pack_value (&bp, false, 1);
bp_pack_value (&bp, info->fp_expressions, 1);
streamer_write_bitpack (&bp);
streamer_write_uhwi (ob, vec_safe_length (info->conds));
for (i = 0; vec_safe_iterate (info->conds, i, &c); i++)
{
int j;
struct expr_eval_op *op;
streamer_write_uhwi (ob, c->operand_num);
streamer_write_uhwi (ob, c->code);
stream_write_tree (ob, c->type, true);
stream_write_tree (ob, c->val, true);
bp = bitpack_create (ob->main_stream);
bp_pack_value (&bp, c->agg_contents, 1);
bp_pack_value (&bp, c->by_ref, 1);
streamer_write_bitpack (&bp);
if (c->agg_contents)
streamer_write_uhwi (ob, c->offset);
streamer_write_uhwi (ob, vec_safe_length (c->param_ops));
for (j = 0; vec_safe_iterate (c->param_ops, j, &op); j++)
{
streamer_write_uhwi (ob, op->code);
stream_write_tree (ob, op->type, true);
if (op->val[0])
{
bp = bitpack_create (ob->main_stream);
bp_pack_value (&bp, op->index, 2);
streamer_write_bitpack (&bp);
stream_write_tree (ob, op->val[0], true);
if (op->val[1])
stream_write_tree (ob, op->val[1], true);
}
}
}
streamer_write_uhwi (ob, vec_safe_length (info->size_time_table));
for (i = 0; vec_safe_iterate (info->size_time_table, i, &e); i++)
{
streamer_write_uhwi (ob, e->size);
e->time.stream_out (ob);
e->exec_predicate.stream_out (ob);
e->nonconst_predicate.stream_out (ob);
}
ipa_freqcounting_predicate *fcp;
streamer_write_uhwi (ob, vec_safe_length (info->loop_iterations));
for (i = 0; vec_safe_iterate (info->loop_iterations, i, &fcp); i++)
{
fcp->predicate->stream_out (ob);
fcp->freq.stream_out (ob);
}
streamer_write_uhwi (ob, vec_safe_length (info->loop_strides));
for (i = 0; vec_safe_iterate (info->loop_strides, i, &fcp); i++)
{
fcp->predicate->stream_out (ob);
fcp->freq.stream_out (ob);
}
for (edge = cnode->callees; edge; edge = edge->next_callee)
write_ipa_call_summary (ob, edge);
for (edge = cnode->indirect_calls; edge; edge = edge->next_callee)
write_ipa_call_summary (ob, edge);
}
}
streamer_write_char_stream (ob->main_stream, 0);
produce_asm (ob, NULL);
destroy_output_block (ob);
if (!flag_ipa_cp)
ipa_prop_write_jump_functions ();
}
/* Release function summary. */
void
ipa_free_fn_summary (void)
{
if (!ipa_call_summaries)
return;
ggc_delete (ipa_fn_summaries);
ipa_fn_summaries = NULL;
delete ipa_call_summaries;
ipa_call_summaries = NULL;
edge_predicate_pool.release ();
/* During IPA this is one of largest datastructures to release. */
if (flag_wpa)
ggc_trim ();
}
/* Release function summary. */
void
ipa_free_size_summary (void)
{
if (!ipa_size_summaries)
return;
delete ipa_size_summaries;
ipa_size_summaries = NULL;
}
namespace {
const pass_data pass_data_local_fn_summary =
{
GIMPLE_PASS, /* type */
"local-fnsummary", /* name */
OPTGROUP_INLINE, /* optinfo_flags */
TV_INLINE_PARAMETERS, /* tv_id */
0, /* properties_required */
0, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
0, /* todo_flags_finish */
};
class pass_local_fn_summary : public gimple_opt_pass
{
public:
pass_local_fn_summary (gcc::context *ctxt)
: gimple_opt_pass (pass_data_local_fn_summary, ctxt)
{}
/* opt_pass methods: */
opt_pass * clone () { return new pass_local_fn_summary (m_ctxt); }
virtual unsigned int execute (function *)
{
return compute_fn_summary_for_current ();
}
}; // class pass_local_fn_summary
} // anon namespace
gimple_opt_pass *
make_pass_local_fn_summary (gcc::context *ctxt)
{
return new pass_local_fn_summary (ctxt);
}
/* Free inline summary. */
namespace {
const pass_data pass_data_ipa_free_fn_summary =
{
SIMPLE_IPA_PASS, /* type */
"free-fnsummary", /* name */
OPTGROUP_NONE, /* optinfo_flags */
TV_IPA_FREE_INLINE_SUMMARY, /* tv_id */
0, /* properties_required */
0, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
0, /* todo_flags_finish */
};
class pass_ipa_free_fn_summary : public simple_ipa_opt_pass
{
public:
pass_ipa_free_fn_summary (gcc::context *ctxt)
: simple_ipa_opt_pass (pass_data_ipa_free_fn_summary, ctxt),
small_p (false)
{}
/* opt_pass methods: */
opt_pass *clone () { return new pass_ipa_free_fn_summary (m_ctxt); }
void set_pass_param (unsigned int n, bool param)
{
gcc_assert (n == 0);
small_p = param;
}
virtual bool gate (function *) { return true; }
virtual unsigned int execute (function *)
{
ipa_free_fn_summary ();
/* Free ipa-prop structures if they are no longer needed. */
ipa_free_all_structures_after_iinln ();
if (!flag_wpa)
ipa_free_size_summary ();
return 0;
}
private:
bool small_p;
}; // class pass_ipa_free_fn_summary
} // anon namespace
simple_ipa_opt_pass *
make_pass_ipa_free_fn_summary (gcc::context *ctxt)
{
return new pass_ipa_free_fn_summary (ctxt);
}
namespace {
const pass_data pass_data_ipa_fn_summary =
{
IPA_PASS, /* type */
"fnsummary", /* name */
OPTGROUP_INLINE, /* optinfo_flags */
TV_IPA_FNSUMMARY, /* tv_id */
0, /* properties_required */
0, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
( TODO_dump_symtab ), /* todo_flags_finish */
};
class pass_ipa_fn_summary : public ipa_opt_pass_d
{
public:
pass_ipa_fn_summary (gcc::context *ctxt)
: ipa_opt_pass_d (pass_data_ipa_fn_summary, ctxt,
ipa_fn_summary_generate, /* generate_summary */
ipa_fn_summary_write, /* write_summary */
ipa_fn_summary_read, /* read_summary */
NULL, /* write_optimization_summary */
NULL, /* read_optimization_summary */
NULL, /* stmt_fixup */
0, /* function_transform_todo_flags_start */
NULL, /* function_transform */
NULL) /* variable_transform */
{}
/* opt_pass methods: */
virtual unsigned int execute (function *) { return 0; }
}; // class pass_ipa_fn_summary
} // anon namespace
ipa_opt_pass_d *
make_pass_ipa_fn_summary (gcc::context *ctxt)
{
return new pass_ipa_fn_summary (ctxt);
}
/* Reset all state within ipa-fnsummary.c so that we can rerun the compiler
within the same process. For use by toplev::finalize. */
void
ipa_fnsummary_c_finalize (void)
{
ipa_free_fn_summary ();
}
|
kernel.c | #define _POSIX_C_SOURCE 200809L
#include "stdlib.h"
#include "math.h"
#include "sys/time.h"
struct dataobj
{
void *restrict data;
int * size;
int * npsize;
int * dsize;
int * hsize;
int * hofs;
int * oofs;
} ;
struct profiler
{
double section0;
} ;
int Kernel(const float dt,
const float h_x,
const float h_y,
struct dataobj *restrict u_vec,
const int time_M,
const int time_m,
struct profiler * timers,
const int x_M,
const int x_m,
const int xi_ltkn,
const int xi_rtkn,
const int y_M,
const int y_m,
const int yi_ltkn,
const int yi_rtkn){
float (*restrict u)[u_vec->size[1]][u_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[u_vec->size[1]][u_vec->size[2]]) u_vec->data;
for (int time = time_m, t0 = (time)%(2), t1 = (time + 1)%(2); time <= time_M; time += 1, t0 = (time)%(2), t1 = (time + 1)%(2)){
struct timeval start_section0, end_section0;
gettimeofday(&start_section0, NULL);
/* Begin section0 */
#pragma omp parallel for num_threads(4) schedule(static, 1)
for (int xi = x_m + xi_ltkn; xi <= x_M - xi_rtkn; xi += 1){
#pragma omp simd aligned(u:32)
for (int yi = y_m + yi_ltkn; yi <= y_M - yi_rtkn; yi += 1){
u[t1][xi + 1][yi + 1] = 1.0F*(dt*h_x*u[t0][xi + 1][yi] - dt*h_x*u[t0][xi + 1][yi + 1] + dt*h_y*u[t0][xi][yi + 1] - dt*h_y*u[t0][xi + 1][yi + 1] + h_x*h_y*u[t0][xi + 1][yi + 1])/(h_x*h_y);
}
}
/* End section0 */
gettimeofday(&end_section0, NULL);
timers->section0 += (double)(end_section0.tv_sec-start_section0.tv_sec)+(double)(end_section0.tv_usec-start_section0.tv_usec)/1000000;
}
return 0;
}
|
4-16t.c | #include <stdio.h>
#include <omp.h>
int main()
{
int i;
omp_set_num_threads(16);
#pragma omp parallel for
for (i=0; i<16; i++)
{
printf("Hello from thread number: %d Iteration: %d \n",
omp_get_thread_num(), i);
}
printf("\n GoodBye – Team Destroyed – Exiting Program \n\n");
}
|
Sema.h | //===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Sema class, which performs semantic analysis and
// builds ASTs.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_SEMA_SEMA_H
#define LLVM_CLANG_SEMA_SEMA_H
#include "clang/AST/ASTConcept.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Availability.h"
#include "clang/AST/ComparisonCategories.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/LocInfoType.h"
#include "clang/AST/MangleNumberingContext.h"
#include "clang/AST/NSAPI.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/TypeLoc.h"
#include "clang/AST/TypeOrdering.h"
#include "clang/Basic/ExpressionTraits.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/PragmaKinds.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TemplateKinds.h"
#include "clang/Basic/TypeTraits.h"
#include "clang/Sema/AnalysisBasedWarnings.h"
#include "clang/Sema/CleanupInfo.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ExternalSemaSource.h"
#include "clang/Sema/IdentifierResolver.h"
#include "clang/Sema/ObjCMethodList.h"
#include "clang/Sema/Ownership.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/TypoCorrection.h"
#include "clang/Sema/Weak.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/TinyPtrVector.h"
#include <deque>
#include <memory>
#include <string>
#include <tuple>
#include <vector>
namespace llvm {
class APSInt;
template <typename ValueT> struct DenseMapInfo;
template <typename ValueT, typename ValueInfoT> class DenseSet;
class SmallBitVector;
struct InlineAsmIdentifierInfo;
}
namespace clang {
class ADLResult;
class ASTConsumer;
class ASTContext;
class ASTMutationListener;
class ASTReader;
class ASTWriter;
class ArrayType;
class ParsedAttr;
class BindingDecl;
class BlockDecl;
class CapturedDecl;
class CXXBasePath;
class CXXBasePaths;
class CXXBindTemporaryExpr;
typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath;
class CXXConstructorDecl;
class CXXConversionDecl;
class CXXDeleteExpr;
class CXXDestructorDecl;
class CXXFieldCollector;
class CXXMemberCallExpr;
class CXXMethodDecl;
class CXXScopeSpec;
class CXXTemporary;
class CXXTryStmt;
class CallExpr;
class ClassTemplateDecl;
class ClassTemplatePartialSpecializationDecl;
class ClassTemplateSpecializationDecl;
class VarTemplatePartialSpecializationDecl;
class CodeCompleteConsumer;
class CodeCompletionAllocator;
class CodeCompletionTUInfo;
class CodeCompletionResult;
class CoroutineBodyStmt;
class Decl;
class DeclAccessPair;
class DeclContext;
class DeclRefExpr;
class DeclaratorDecl;
class DeducedTemplateArgument;
class DependentDiagnostic;
class DesignatedInitExpr;
class Designation;
class EnableIfAttr;
class EnumConstantDecl;
class Expr;
class ExtVectorType;
class FormatAttr;
class FriendDecl;
class FunctionDecl;
class FunctionProtoType;
class FunctionTemplateDecl;
class ImplicitConversionSequence;
typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList;
class InitListExpr;
class InitializationKind;
class InitializationSequence;
class InitializedEntity;
class IntegerLiteral;
class LabelStmt;
class LambdaExpr;
class LangOptions;
class LocalInstantiationScope;
class LookupResult;
class MacroInfo;
typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath;
class ModuleLoader;
class MultiLevelTemplateArgumentList;
class NamedDecl;
class ObjCCategoryDecl;
class ObjCCategoryImplDecl;
class ObjCCompatibleAliasDecl;
class ObjCContainerDecl;
class ObjCImplDecl;
class ObjCImplementationDecl;
class ObjCInterfaceDecl;
class ObjCIvarDecl;
template <class T> class ObjCList;
class ObjCMessageExpr;
class ObjCMethodDecl;
class ObjCPropertyDecl;
class ObjCProtocolDecl;
class OMPThreadPrivateDecl;
class OMPRequiresDecl;
class OMPDeclareReductionDecl;
class OMPDeclareSimdDecl;
class OMPClause;
struct OMPVarListLocTy;
struct OverloadCandidate;
enum class OverloadCandidateParamOrder : char;
enum OverloadCandidateRewriteKind : unsigned;
class OverloadCandidateSet;
class OverloadExpr;
class ParenListExpr;
class ParmVarDecl;
class Preprocessor;
class PseudoDestructorTypeStorage;
class PseudoObjectExpr;
class QualType;
class StandardConversionSequence;
class Stmt;
class StringLiteral;
class SwitchStmt;
class TemplateArgument;
class TemplateArgumentList;
class TemplateArgumentLoc;
class TemplateDecl;
class TemplateInstantiationCallback;
class TemplateParameterList;
class TemplatePartialOrderingContext;
class TemplateTemplateParmDecl;
class Token;
class TypeAliasDecl;
class TypedefDecl;
class TypedefNameDecl;
class TypeLoc;
class TypoCorrectionConsumer;
class UnqualifiedId;
class UnresolvedLookupExpr;
class UnresolvedMemberExpr;
class UnresolvedSetImpl;
class UnresolvedSetIterator;
class UsingDecl;
class UsingShadowDecl;
class ValueDecl;
class VarDecl;
class VarTemplateSpecializationDecl;
class VisibilityAttr;
class VisibleDeclConsumer;
class IndirectFieldDecl;
struct DeductionFailureInfo;
class TemplateSpecCandidateSet;
namespace sema {
class AccessedEntity;
class BlockScopeInfo;
class Capture;
class CapturedRegionScopeInfo;
class CapturingScopeInfo;
class CompoundScopeInfo;
class DelayedDiagnostic;
class DelayedDiagnosticPool;
class FunctionScopeInfo;
class LambdaScopeInfo;
class PossiblyUnreachableDiag;
class SemaPPCallbacks;
class TemplateDeductionInfo;
}
namespace threadSafety {
class BeforeSet;
void threadSafetyCleanup(BeforeSet* Cache);
}
// FIXME: No way to easily map from TemplateTypeParmTypes to
// TemplateTypeParmDecls, so we have this horrible PointerUnion.
typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>,
SourceLocation> UnexpandedParameterPack;
/// Describes whether we've seen any nullability information for the given
/// file.
struct FileNullability {
/// The first pointer declarator (of any pointer kind) in the file that does
/// not have a corresponding nullability annotation.
SourceLocation PointerLoc;
/// The end location for the first pointer declarator in the file. Used for
/// placing fix-its.
SourceLocation PointerEndLoc;
/// Which kind of pointer declarator we saw.
uint8_t PointerKind;
/// Whether we saw any type nullability annotations in the given file.
bool SawTypeNullability = false;
};
/// A mapping from file IDs to a record of whether we've seen nullability
/// information in that file.
class FileNullabilityMap {
/// A mapping from file IDs to the nullability information for each file ID.
llvm::DenseMap<FileID, FileNullability> Map;
/// A single-element cache based on the file ID.
struct {
FileID File;
FileNullability Nullability;
} Cache;
public:
FileNullability &operator[](FileID file) {
// Check the single-element cache.
if (file == Cache.File)
return Cache.Nullability;
// It's not in the single-element cache; flush the cache if we have one.
if (!Cache.File.isInvalid()) {
Map[Cache.File] = Cache.Nullability;
}
// Pull this entry into the cache.
Cache.File = file;
Cache.Nullability = Map[file];
return Cache.Nullability;
}
};
// TODO SYCL Integration header approach relies on an assumption that kernel
// lambda objects created by the host compiler and any of the device compilers
// will be identical wrt to field types, order and offsets. Some verification
// mechanism should be developed to enforce that.
// TODO FIXME SYCL Support for SYCL in FE should be refactored:
// - kernel identification and generation should be made a separate pass over
// AST. RecursiveASTVisitor + VisitFunctionTemplateDecl +
// FunctionTemplateDecl::getSpecializations() mechanism could be used for that.
// - All SYCL stuff on Sema level should be encapsulated into a single Sema
// field
// - Move SYCL stuff into a separate header
// Represents contents of a SYCL integration header file produced by a SYCL
// device compiler and used by SYCL host compiler (via forced inclusion into
// compiled SYCL source):
// - SYCL kernel names
// - SYCL kernel parameters and offsets of corresponding actual arguments
class SYCLIntegrationHeader {
public:
// Kind of kernel's parameters as captured by the compiler in the
// kernel lambda or function object
enum kernel_param_kind_t {
kind_first,
kind_accessor = kind_first,
kind_std_layout,
kind_sampler,
kind_pointer,
kind_last = kind_pointer
};
public:
SYCLIntegrationHeader(DiagnosticsEngine &Diag, bool UnnamedLambdaSupport);
/// Emits contents of the header into given stream.
void emit(raw_ostream &Out);
/// Emits contents of the header into a file with given name.
/// Returns true/false on success/failure.
bool emit(const StringRef &MainSrc);
/// Signals that subsequent parameter descriptor additions will go to
/// the kernel with given name. Starts new kernel invocation descriptor.
void startKernel(StringRef KernelName, QualType KernelNameType,
StringRef KernelStableName);
/// Adds a kernel parameter descriptor to current kernel invocation
/// descriptor.
void addParamDesc(kernel_param_kind_t Kind, int Info, unsigned Offset);
/// Signals that addition of parameter descriptors to current kernel
/// invocation descriptor has finished.
void endKernel();
private:
// Kernel actual parameter descriptor.
struct KernelParamDesc {
// Represents a parameter kind.
kernel_param_kind_t Kind = kind_last;
// If Kind is kind_scalar or kind_struct, then
// denotes parameter size in bytes (includes padding for structs)
// If Kind is kind_accessor
// denotes access target; possible access targets are defined in
// access/access.hpp
int Info = 0;
// Offset of the captured parameter value in the lambda or function object.
unsigned Offset = 0;
KernelParamDesc() = default;
};
// Kernel invocation descriptor
struct KernelDesc {
/// Kernel name.
std::string Name;
/// Kernel name type.
QualType NameType;
/// Kernel name with stable lambda name mangling
std::string StableName;
/// Descriptor of kernel actual parameters.
SmallVector<KernelParamDesc, 8> Params;
KernelDesc() = default;
};
/// Returns the latest invocation descriptor started by
/// SYCLIntegrationHeader::startKernel
KernelDesc *getCurKernelDesc() {
return KernelDescs.size() > 0 ? &KernelDescs[KernelDescs.size() - 1]
: nullptr;
}
/// Emits a forward declaration for given declaration.
void emitFwdDecl(raw_ostream &O, const Decl *D);
/// Emits forward declarations of classes and template classes on which
/// declaration of given type depends. See example in the comments for the
/// implementation.
/// \param O
/// stream to emit to
/// \param T
/// type to emit forward declarations for
/// \param Emitted
/// a set of declarations forward declrations has been emitted for already
void emitForwardClassDecls(raw_ostream &O, QualType T,
llvm::SmallPtrSetImpl<const void*> &Emitted);
private:
/// Keeps invocation descriptors for each kernel invocation started by
/// SYCLIntegrationHeader::startKernel
SmallVector<KernelDesc, 4> KernelDescs;
/// Used for emitting diagnostics.
DiagnosticsEngine &Diag;
/// Whether header is generated with unnamed lambda support
bool UnnamedLambdaSupport;
};
/// Keeps track of expected type during expression parsing. The type is tied to
/// a particular token, all functions that update or consume the type take a
/// start location of the token they are looking at as a parameter. This allows
/// to avoid updating the type on hot paths in the parser.
class PreferredTypeBuilder {
public:
PreferredTypeBuilder() = default;
explicit PreferredTypeBuilder(QualType Type) : Type(Type) {}
void enterCondition(Sema &S, SourceLocation Tok);
void enterReturn(Sema &S, SourceLocation Tok);
void enterVariableInit(SourceLocation Tok, Decl *D);
/// Computing a type for the function argument may require running
/// overloading, so we postpone its computation until it is actually needed.
///
/// Clients should be very careful when using this funciton, as it stores a
/// function_ref, clients should make sure all calls to get() with the same
/// location happen while function_ref is alive.
void enterFunctionArgument(SourceLocation Tok,
llvm::function_ref<QualType()> ComputeType);
void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc);
void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind,
SourceLocation OpLoc);
void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op);
void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base);
void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS);
/// Handles all type casts, including C-style cast, C++ casts, etc.
void enterTypeCast(SourceLocation Tok, QualType CastType);
QualType get(SourceLocation Tok) const {
if (Tok != ExpectedLoc)
return QualType();
if (!Type.isNull())
return Type;
if (ComputeType)
return ComputeType();
return QualType();
}
private:
/// Start position of a token for which we store expected type.
SourceLocation ExpectedLoc;
/// Expected type for a token starting at ExpectedLoc.
QualType Type;
/// A function to compute expected type at ExpectedLoc. It is only considered
/// if Type is null.
llvm::function_ref<QualType()> ComputeType;
};
/// Sema - This implements semantic analysis and AST building for C.
class Sema final {
Sema(const Sema &) = delete;
void operator=(const Sema &) = delete;
/// A key method to reduce duplicate debug info from Sema.
virtual void anchor();
///Source of additional semantic information.
ExternalSemaSource *ExternalSource;
///Whether Sema has generated a multiplexer and has to delete it.
bool isMultiplexExternalSource;
static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD);
bool isVisibleSlow(const NamedDecl *D);
/// Determine whether two declarations should be linked together, given that
/// the old declaration might not be visible and the new declaration might
/// not have external linkage.
bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old,
const NamedDecl *New) {
if (isVisible(Old))
return true;
// See comment in below overload for why it's safe to compute the linkage
// of the new declaration here.
if (New->isExternallyDeclarable()) {
assert(Old->isExternallyDeclarable() &&
"should not have found a non-externally-declarable previous decl");
return true;
}
return false;
}
bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New);
void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem,
QualType ResultTy,
ArrayRef<QualType> Args);
public:
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef OpaquePtr<QualType> TypeTy;
OpenCLOptions OpenCLFeatures;
FPOptions FPFeatures;
const LangOptions &LangOpts;
Preprocessor &PP;
ASTContext &Context;
ASTConsumer &Consumer;
DiagnosticsEngine &Diags;
SourceManager &SourceMgr;
/// Flag indicating whether or not to collect detailed statistics.
bool CollectStats;
/// Code-completion consumer.
CodeCompleteConsumer *CodeCompleter;
/// CurContext - This is the current declaration context of parsing.
DeclContext *CurContext;
/// Generally null except when we temporarily switch decl contexts,
/// like in \see ActOnObjCTemporaryExitContainerContext.
DeclContext *OriginalLexicalContext;
/// VAListTagName - The declaration name corresponding to __va_list_tag.
/// This is used as part of a hack to omit that class from ADL results.
DeclarationName VAListTagName;
bool MSStructPragmaOn; // True when \#pragma ms_struct on
/// Controls member pointer representation format under the MS ABI.
LangOptions::PragmaMSPointersToMembersKind
MSPointerToMemberRepresentationMethod;
/// Stack of active SEH __finally scopes. Can be empty.
SmallVector<Scope*, 2> CurrentSEHFinally;
/// Source location for newly created implicit MSInheritanceAttrs
SourceLocation ImplicitMSInheritanceAttrLoc;
/// Holds TypoExprs that are created from `createDelayedTypo`. This is used by
/// `TransformTypos` in order to keep track of any TypoExprs that are created
/// recursively during typo correction and wipe them away if the correction
/// fails.
llvm::SmallVector<TypoExpr *, 2> TypoExprs;
/// pragma clang section kind
enum PragmaClangSectionKind {
PCSK_Invalid = 0,
PCSK_BSS = 1,
PCSK_Data = 2,
PCSK_Rodata = 3,
PCSK_Text = 4,
PCSK_Relro = 5
};
enum PragmaClangSectionAction {
PCSA_Set = 0,
PCSA_Clear = 1
};
struct PragmaClangSection {
std::string SectionName;
bool Valid = false;
SourceLocation PragmaLocation;
void Act(SourceLocation PragmaLocation,
PragmaClangSectionAction Action,
StringLiteral* Name);
};
PragmaClangSection PragmaClangBSSSection;
PragmaClangSection PragmaClangDataSection;
PragmaClangSection PragmaClangRodataSection;
PragmaClangSection PragmaClangRelroSection;
PragmaClangSection PragmaClangTextSection;
enum PragmaMsStackAction {
PSK_Reset = 0x0, // #pragma ()
PSK_Set = 0x1, // #pragma (value)
PSK_Push = 0x2, // #pragma (push[, id])
PSK_Pop = 0x4, // #pragma (pop[, id])
PSK_Show = 0x8, // #pragma (show) -- only for "pack"!
PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value)
PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value)
};
template<typename ValueType>
struct PragmaStack {
struct Slot {
llvm::StringRef StackSlotLabel;
ValueType Value;
SourceLocation PragmaLocation;
SourceLocation PragmaPushLocation;
Slot(llvm::StringRef StackSlotLabel, ValueType Value,
SourceLocation PragmaLocation, SourceLocation PragmaPushLocation)
: StackSlotLabel(StackSlotLabel), Value(Value),
PragmaLocation(PragmaLocation),
PragmaPushLocation(PragmaPushLocation) {}
};
void Act(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
ValueType Value);
// MSVC seems to add artificial slots to #pragma stacks on entering a C++
// method body to restore the stacks on exit, so it works like this:
//
// struct S {
// #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>)
// void Method {}
// #pragma <name>(pop, InternalPragmaSlot)
// };
//
// It works even with #pragma vtordisp, although MSVC doesn't support
// #pragma vtordisp(push [, id], n)
// syntax.
//
// Push / pop a named sentinel slot.
void SentinelAction(PragmaMsStackAction Action, StringRef Label) {
assert((Action == PSK_Push || Action == PSK_Pop) &&
"Can only push / pop #pragma stack sentinels!");
Act(CurrentPragmaLocation, Action, Label, CurrentValue);
}
// Constructors.
explicit PragmaStack(const ValueType &Default)
: DefaultValue(Default), CurrentValue(Default) {}
bool hasValue() const { return CurrentValue != DefaultValue; }
SmallVector<Slot, 2> Stack;
ValueType DefaultValue; // Value used for PSK_Reset action.
ValueType CurrentValue;
SourceLocation CurrentPragmaLocation;
};
// FIXME: We should serialize / deserialize these if they occur in a PCH (but
// we shouldn't do so if they're in a module).
/// Whether to insert vtordisps prior to virtual bases in the Microsoft
/// C++ ABI. Possible values are 0, 1, and 2, which mean:
///
/// 0: Suppress all vtordisps
/// 1: Insert vtordisps in the presence of vbase overrides and non-trivial
/// structors
/// 2: Always insert vtordisps to support RTTI on partially constructed
/// objects
PragmaStack<MSVtorDispMode> VtorDispStack;
// #pragma pack.
// Sentinel to represent when the stack is set to mac68k alignment.
static const unsigned kMac68kAlignmentSentinel = ~0U;
PragmaStack<unsigned> PackStack;
// The current #pragma pack values and locations at each #include.
struct PackIncludeState {
unsigned CurrentValue;
SourceLocation CurrentPragmaLocation;
bool HasNonDefaultValue, ShouldWarnOnInclude;
};
SmallVector<PackIncludeState, 8> PackIncludeStack;
// Segment #pragmas.
PragmaStack<StringLiteral *> DataSegStack;
PragmaStack<StringLiteral *> BSSSegStack;
PragmaStack<StringLiteral *> ConstSegStack;
PragmaStack<StringLiteral *> CodeSegStack;
// RAII object to push / pop sentinel slots for all MS #pragma stacks.
// Actions should be performed only if we enter / exit a C++ method body.
class PragmaStackSentinelRAII {
public:
PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct);
~PragmaStackSentinelRAII();
private:
Sema &S;
StringRef SlotLabel;
bool ShouldAct;
};
/// A mapping that describes the nullability we've seen in each header file.
FileNullabilityMap NullabilityMap;
/// Last section used with #pragma init_seg.
StringLiteral *CurInitSeg;
SourceLocation CurInitSegLoc;
/// VisContext - Manages the stack for \#pragma GCC visibility.
void *VisContext; // Really a "PragmaVisStack*"
/// This an attribute introduced by \#pragma clang attribute.
struct PragmaAttributeEntry {
SourceLocation Loc;
ParsedAttr *Attribute;
SmallVector<attr::SubjectMatchRule, 4> MatchRules;
bool IsUsed;
};
/// A push'd group of PragmaAttributeEntries.
struct PragmaAttributeGroup {
/// The location of the push attribute.
SourceLocation Loc;
/// The namespace of this push group.
const IdentifierInfo *Namespace;
SmallVector<PragmaAttributeEntry, 2> Entries;
};
SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack;
/// The declaration that is currently receiving an attribute from the
/// #pragma attribute stack.
const Decl *PragmaAttributeCurrentTargetDecl;
/// This represents the last location of a "#pragma clang optimize off"
/// directive if such a directive has not been closed by an "on" yet. If
/// optimizations are currently "on", this is set to an invalid location.
SourceLocation OptimizeOffPragmaLocation;
/// Flag indicating if Sema is building a recovery call expression.
///
/// This flag is used to avoid building recovery call expressions
/// if Sema is already doing so, which would cause infinite recursions.
bool IsBuildingRecoveryCallExpr;
/// Used to control the generation of ExprWithCleanups.
CleanupInfo Cleanup;
/// ExprCleanupObjects - This is the stack of objects requiring
/// cleanup that are created by the current full expression. The
/// element type here is ExprWithCleanups::Object.
SmallVector<BlockDecl*, 8> ExprCleanupObjects;
/// Store a set of either DeclRefExprs or MemberExprs that contain a reference
/// to a variable (constant) that may or may not be odr-used in this Expr, and
/// we won't know until all lvalue-to-rvalue and discarded value conversions
/// have been applied to all subexpressions of the enclosing full expression.
/// This is cleared at the end of each full expression.
using MaybeODRUseExprSet = llvm::SmallPtrSet<Expr *, 2>;
MaybeODRUseExprSet MaybeODRUseExprs;
std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope;
/// Stack containing information about each of the nested
/// function, block, and method scopes that are currently active.
SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes;
typedef LazyVector<TypedefNameDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadExtVectorDecls, 2, 2>
ExtVectorDeclsType;
/// ExtVectorDecls - This is a list all the extended vector types. This allows
/// us to associate a raw vector type with one of the ext_vector type names.
/// This is only necessary for issuing pretty diagnostics.
ExtVectorDeclsType ExtVectorDecls;
/// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes.
std::unique_ptr<CXXFieldCollector> FieldCollector;
typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType;
/// Set containing all declared private fields that are not used.
NamedDeclSetType UnusedPrivateFields;
/// Set containing all typedefs that are likely unused.
llvm::SmallSetVector<const TypedefNameDecl *, 4>
UnusedLocalTypedefNameCandidates;
/// Delete-expressions to be analyzed at the end of translation unit
///
/// This list contains class members, and locations of delete-expressions
/// that could not be proven as to whether they mismatch with new-expression
/// used in initializer of the field.
typedef std::pair<SourceLocation, bool> DeleteExprLoc;
typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs;
llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs;
typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy;
/// PureVirtualClassDiagSet - a set of class declarations which we have
/// emitted a list of pure virtual functions. Used to prevent emitting the
/// same list more than once.
std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet;
/// ParsingInitForAutoVars - a set of declarations with auto types for which
/// we are currently parsing the initializer.
llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars;
/// Look for a locally scoped extern "C" declaration by the given name.
NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name);
typedef LazyVector<VarDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadTentativeDefinitions, 2, 2>
TentativeDefinitionsType;
/// All the tentative definitions encountered in the TU.
TentativeDefinitionsType TentativeDefinitions;
/// All the external declarations encoutered and used in the TU.
SmallVector<VarDecl *, 4> ExternalDeclarations;
typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2>
UnusedFileScopedDeclsType;
/// The set of file scoped decls seen so far that have not been used
/// and must warn if not used. Only contains the first declaration.
UnusedFileScopedDeclsType UnusedFileScopedDecls;
typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadDelegatingConstructors, 2, 2>
DelegatingCtorDeclsType;
/// All the delegating constructors seen so far in the file, used for
/// cycle detection at the end of the TU.
DelegatingCtorDeclsType DelegatingCtorDecls;
/// All the overriding functions seen during a class definition
/// that had their exception spec checks delayed, plus the overridden
/// function.
SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2>
DelayedOverridingExceptionSpecChecks;
/// All the function redeclarations seen during a class definition that had
/// their exception spec checks delayed, plus the prior declaration they
/// should be checked against. Except during error recovery, the new decl
/// should always be a friend declaration, as that's the only valid way to
/// redeclare a special member before its class is complete.
SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2>
DelayedEquivalentExceptionSpecChecks;
typedef llvm::MapVector<const FunctionDecl *,
std::unique_ptr<LateParsedTemplate>>
LateParsedTemplateMapT;
LateParsedTemplateMapT LateParsedTemplateMap;
/// Callback to the parser to parse templated functions when needed.
typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT);
typedef void LateTemplateParserCleanupCB(void *P);
LateTemplateParserCB *LateTemplateParser;
LateTemplateParserCleanupCB *LateTemplateParserCleanup;
void *OpaqueParser;
void SetLateTemplateParser(LateTemplateParserCB *LTP,
LateTemplateParserCleanupCB *LTPCleanup,
void *P) {
LateTemplateParser = LTP;
LateTemplateParserCleanup = LTPCleanup;
OpaqueParser = P;
}
class DelayedDiagnostics;
class DelayedDiagnosticsState {
sema::DelayedDiagnosticPool *SavedPool;
friend class Sema::DelayedDiagnostics;
};
typedef DelayedDiagnosticsState ParsingDeclState;
typedef DelayedDiagnosticsState ProcessingContextState;
/// A class which encapsulates the logic for delaying diagnostics
/// during parsing and other processing.
class DelayedDiagnostics {
/// The current pool of diagnostics into which delayed
/// diagnostics should go.
sema::DelayedDiagnosticPool *CurPool;
public:
DelayedDiagnostics() : CurPool(nullptr) {}
/// Adds a delayed diagnostic.
void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h
/// Determines whether diagnostics should be delayed.
bool shouldDelayDiagnostics() { return CurPool != nullptr; }
/// Returns the current delayed-diagnostics pool.
sema::DelayedDiagnosticPool *getCurrentPool() const {
return CurPool;
}
/// Enter a new scope. Access and deprecation diagnostics will be
/// collected in this pool.
DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = &pool;
return state;
}
/// Leave a delayed-diagnostic state that was previously pushed.
/// Do not emit any of the diagnostics. This is performed as part
/// of the bookkeeping of popping a pool "properly".
void popWithoutEmitting(DelayedDiagnosticsState state) {
CurPool = state.SavedPool;
}
/// Enter a new scope where access and deprecation diagnostics are
/// not delayed.
DelayedDiagnosticsState pushUndelayed() {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = nullptr;
return state;
}
/// Undo a previous pushUndelayed().
void popUndelayed(DelayedDiagnosticsState state) {
assert(CurPool == nullptr);
CurPool = state.SavedPool;
}
} DelayedDiagnostics;
/// A RAII object to temporarily push a declaration context.
class ContextRAII {
private:
Sema &S;
DeclContext *SavedContext;
ProcessingContextState SavedContextState;
QualType SavedCXXThisTypeOverride;
public:
ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true)
: S(S), SavedContext(S.CurContext),
SavedContextState(S.DelayedDiagnostics.pushUndelayed()),
SavedCXXThisTypeOverride(S.CXXThisTypeOverride)
{
assert(ContextToPush && "pushing null context");
S.CurContext = ContextToPush;
if (NewThisContext)
S.CXXThisTypeOverride = QualType();
}
void pop() {
if (!SavedContext) return;
S.CurContext = SavedContext;
S.DelayedDiagnostics.popUndelayed(SavedContextState);
S.CXXThisTypeOverride = SavedCXXThisTypeOverride;
SavedContext = nullptr;
}
~ContextRAII() {
pop();
}
};
/// Used to change context to isConstantEvaluated without pushing a heavy
/// ExpressionEvaluationContextRecord object.
bool isConstantEvaluatedOverride;
bool isConstantEvaluated() {
return ExprEvalContexts.back().isConstantEvaluated() ||
isConstantEvaluatedOverride;
}
/// RAII object to handle the state changes required to synthesize
/// a function body.
class SynthesizedFunctionScope {
Sema &S;
Sema::ContextRAII SavedContext;
bool PushedCodeSynthesisContext = false;
public:
SynthesizedFunctionScope(Sema &S, DeclContext *DC)
: S(S), SavedContext(S, DC) {
S.PushFunctionScope();
S.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
if (auto *FD = dyn_cast<FunctionDecl>(DC))
FD->setWillHaveBody(true);
else
assert(isa<ObjCMethodDecl>(DC));
}
void addContextNote(SourceLocation UseLoc) {
assert(!PushedCodeSynthesisContext);
Sema::CodeSynthesisContext Ctx;
Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction;
Ctx.PointOfInstantiation = UseLoc;
Ctx.Entity = cast<Decl>(S.CurContext);
S.pushCodeSynthesisContext(Ctx);
PushedCodeSynthesisContext = true;
}
~SynthesizedFunctionScope() {
if (PushedCodeSynthesisContext)
S.popCodeSynthesisContext();
if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext))
FD->setWillHaveBody(false);
S.PopExpressionEvaluationContext();
S.PopFunctionScopeInfo();
}
};
/// WeakUndeclaredIdentifiers - Identifiers contained in
/// \#pragma weak before declared. rare. may alias another
/// identifier, declared or undeclared
llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers;
/// ExtnameUndeclaredIdentifiers - Identifiers contained in
/// \#pragma redefine_extname before declared. Used in Solaris system headers
/// to define functions that occur in multiple standards to call the version
/// in the currently selected standard.
llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers;
/// Load weak undeclared identifiers from the external source.
void LoadExternalWeakUndeclaredIdentifiers();
/// WeakTopLevelDecl - Translation-unit scoped declarations generated by
/// \#pragma weak during processing of other Decls.
/// I couldn't figure out a clean way to generate these in-line, so
/// we store them here and handle separately -- which is a hack.
/// It would be best to refactor this.
SmallVector<Decl*,2> WeakTopLevelDecl;
IdentifierResolver IdResolver;
/// Translation Unit Scope - useful to Objective-C actions that need
/// to lookup file scope declarations in the "ordinary" C decl namespace.
/// For example, user-defined classes, built-in "id" type, etc.
Scope *TUScope;
/// The C++ "std" namespace, where the standard library resides.
LazyDeclPtr StdNamespace;
/// The C++ "std::bad_alloc" class, which is defined by the C++
/// standard library.
LazyDeclPtr StdBadAlloc;
/// The C++ "std::align_val_t" enum class, which is defined by the C++
/// standard library.
LazyDeclPtr StdAlignValT;
/// The C++ "std::experimental" namespace, where the experimental parts
/// of the standard library resides.
NamespaceDecl *StdExperimentalNamespaceCache;
/// The C++ "std::initializer_list" template, which is defined in
/// \<initializer_list>.
ClassTemplateDecl *StdInitializerList;
/// The C++ "std::coroutine_traits" template, which is defined in
/// \<coroutine_traits>
ClassTemplateDecl *StdCoroutineTraitsCache;
/// The C++ "type_info" declaration, which is defined in \<typeinfo>.
RecordDecl *CXXTypeInfoDecl;
/// The MSVC "_GUID" struct, which is defined in MSVC header files.
RecordDecl *MSVCGuidDecl;
/// Caches identifiers/selectors for NSFoundation APIs.
std::unique_ptr<NSAPI> NSAPIObj;
/// The declaration of the Objective-C NSNumber class.
ObjCInterfaceDecl *NSNumberDecl;
/// The declaration of the Objective-C NSValue class.
ObjCInterfaceDecl *NSValueDecl;
/// Pointer to NSNumber type (NSNumber *).
QualType NSNumberPointer;
/// Pointer to NSValue type (NSValue *).
QualType NSValuePointer;
/// The Objective-C NSNumber methods used to create NSNumber literals.
ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods];
/// The declaration of the Objective-C NSString class.
ObjCInterfaceDecl *NSStringDecl;
/// Pointer to NSString type (NSString *).
QualType NSStringPointer;
/// The declaration of the stringWithUTF8String: method.
ObjCMethodDecl *StringWithUTF8StringMethod;
/// The declaration of the valueWithBytes:objCType: method.
ObjCMethodDecl *ValueWithBytesObjCTypeMethod;
/// The declaration of the Objective-C NSArray class.
ObjCInterfaceDecl *NSArrayDecl;
/// The declaration of the arrayWithObjects:count: method.
ObjCMethodDecl *ArrayWithObjectsMethod;
/// The declaration of the Objective-C NSDictionary class.
ObjCInterfaceDecl *NSDictionaryDecl;
/// The declaration of the dictionaryWithObjects:forKeys:count: method.
ObjCMethodDecl *DictionaryWithObjectsMethod;
/// id<NSCopying> type.
QualType QIDNSCopying;
/// will hold 'respondsToSelector:'
Selector RespondsToSelectorSel;
/// A flag to remember whether the implicit forms of operator new and delete
/// have been declared.
bool GlobalNewDeleteDeclared;
/// A flag to indicate that we're in a context that permits abstract
/// references to fields. This is really a
bool AllowAbstractFieldReference;
/// Describes how the expressions currently being parsed are
/// evaluated at run-time, if at all.
enum class ExpressionEvaluationContext {
/// The current expression and its subexpressions occur within an
/// unevaluated operand (C++11 [expr]p7), such as the subexpression of
/// \c sizeof, where the type of the expression may be significant but
/// no code will be generated to evaluate the value of the expression at
/// run time.
Unevaluated,
/// The current expression occurs within a braced-init-list within
/// an unevaluated operand. This is mostly like a regular unevaluated
/// context, except that we still instantiate constexpr functions that are
/// referenced here so that we can perform narrowing checks correctly.
UnevaluatedList,
/// The current expression occurs within a discarded statement.
/// This behaves largely similarly to an unevaluated operand in preventing
/// definitions from being required, but not in other ways.
DiscardedStatement,
/// The current expression occurs within an unevaluated
/// operand that unconditionally permits abstract references to
/// fields, such as a SIZE operator in MS-style inline assembly.
UnevaluatedAbstract,
/// The current context is "potentially evaluated" in C++11 terms,
/// but the expression is evaluated at compile-time (like the values of
/// cases in a switch statement).
ConstantEvaluated,
/// The current expression is potentially evaluated at run time,
/// which means that code may be generated to evaluate the value of the
/// expression at run time.
PotentiallyEvaluated,
/// The current expression is potentially evaluated, but any
/// declarations referenced inside that expression are only used if
/// in fact the current expression is used.
///
/// This value is used when parsing default function arguments, for which
/// we would like to provide diagnostics (e.g., passing non-POD arguments
/// through varargs) but do not want to mark declarations as "referenced"
/// until the default argument is used.
PotentiallyEvaluatedIfUsed
};
/// Data structure used to record current or nested
/// expression evaluation contexts.
struct ExpressionEvaluationContextRecord {
/// The expression evaluation context.
ExpressionEvaluationContext Context;
/// Whether the enclosing context needed a cleanup.
CleanupInfo ParentCleanup;
/// Whether we are in a decltype expression.
bool IsDecltype;
/// The number of active cleanup objects when we entered
/// this expression evaluation context.
unsigned NumCleanupObjects;
/// The number of typos encountered during this expression evaluation
/// context (i.e. the number of TypoExprs created).
unsigned NumTypos;
MaybeODRUseExprSet SavedMaybeODRUseExprs;
/// The lambdas that are present within this context, if it
/// is indeed an unevaluated context.
SmallVector<LambdaExpr *, 2> Lambdas;
/// The declaration that provides context for lambda expressions
/// and block literals if the normal declaration context does not
/// suffice, e.g., in a default function argument.
Decl *ManglingContextDecl;
/// If we are processing a decltype type, a set of call expressions
/// for which we have deferred checking the completeness of the return type.
SmallVector<CallExpr *, 8> DelayedDecltypeCalls;
/// If we are processing a decltype type, a set of temporary binding
/// expressions for which we have deferred checking the destructor.
SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds;
llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs;
/// Expressions appearing as the LHS of a volatile assignment in this
/// context. We produce a warning for these when popping the context if
/// they are not discarded-value expressions nor unevaluated operands.
SmallVector<Expr*, 2> VolatileAssignmentLHSs;
/// \brief Describes whether we are in an expression constext which we have
/// to handle differently.
enum ExpressionKind {
EK_Decltype, EK_TemplateArgument, EK_Other
} ExprContext;
ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context,
unsigned NumCleanupObjects,
CleanupInfo ParentCleanup,
Decl *ManglingContextDecl,
ExpressionKind ExprContext)
: Context(Context), ParentCleanup(ParentCleanup),
NumCleanupObjects(NumCleanupObjects), NumTypos(0),
ManglingContextDecl(ManglingContextDecl), ExprContext(ExprContext) {}
bool isUnevaluated() const {
return Context == ExpressionEvaluationContext::Unevaluated ||
Context == ExpressionEvaluationContext::UnevaluatedAbstract ||
Context == ExpressionEvaluationContext::UnevaluatedList;
}
bool isConstantEvaluated() const {
return Context == ExpressionEvaluationContext::ConstantEvaluated;
}
};
/// A stack of expression evaluation contexts.
SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts;
/// Emit a warning for all pending noderef expressions that we recorded.
void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec);
/// Compute the mangling number context for a lambda expression or
/// block literal. Also return the extra mangling decl if any.
///
/// \param DC - The DeclContext containing the lambda expression or
/// block literal.
std::tuple<MangleNumberingContext *, Decl *>
getCurrentMangleNumberContext(const DeclContext *DC);
/// SpecialMemberOverloadResult - The overloading result for a special member
/// function.
///
/// This is basically a wrapper around PointerIntPair. The lowest bits of the
/// integer are used to determine whether overload resolution succeeded.
class SpecialMemberOverloadResult {
public:
enum Kind {
NoMemberOrDeleted,
Ambiguous,
Success
};
private:
llvm::PointerIntPair<CXXMethodDecl*, 2> Pair;
public:
SpecialMemberOverloadResult() : Pair() {}
SpecialMemberOverloadResult(CXXMethodDecl *MD)
: Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {}
CXXMethodDecl *getMethod() const { return Pair.getPointer(); }
void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); }
Kind getKind() const { return static_cast<Kind>(Pair.getInt()); }
void setKind(Kind K) { Pair.setInt(K); }
};
class SpecialMemberOverloadResultEntry
: public llvm::FastFoldingSetNode,
public SpecialMemberOverloadResult {
public:
SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID)
: FastFoldingSetNode(ID)
{}
};
/// A cache of special member function overload resolution results
/// for C++ records.
llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache;
/// A cache of the flags available in enumerations with the flag_bits
/// attribute.
mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache;
/// The kind of translation unit we are processing.
///
/// When we're processing a complete translation unit, Sema will perform
/// end-of-translation-unit semantic tasks (such as creating
/// initializers for tentative definitions in C) once parsing has
/// completed. Modules and precompiled headers perform different kinds of
/// checks.
TranslationUnitKind TUKind;
llvm::BumpPtrAllocator BumpAlloc;
/// The number of SFINAE diagnostics that have been trapped.
unsigned NumSFINAEErrors;
typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>>
UnparsedDefaultArgInstantiationsMap;
/// A mapping from parameters with unparsed default arguments to the
/// set of instantiations of each parameter.
///
/// This mapping is a temporary data structure used when parsing
/// nested class templates or nested classes of class templates,
/// where we might end up instantiating an inner class before the
/// default arguments of its methods have been parsed.
UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations;
// Contains the locations of the beginning of unparsed default
// argument locations.
llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs;
/// UndefinedInternals - all the used, undefined objects which require a
/// definition in this translation unit.
llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed;
/// Determine if VD, which must be a variable or function, is an external
/// symbol that nonetheless can't be referenced from outside this translation
/// unit because its type has no linkage and it's not extern "C".
bool isExternalWithNoLinkageType(ValueDecl *VD);
/// Obtain a sorted list of functions that are undefined but ODR-used.
void getUndefinedButUsed(
SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined);
/// Retrieves list of suspicious delete-expressions that will be checked at
/// the end of translation unit.
const llvm::MapVector<FieldDecl *, DeleteLocs> &
getMismatchingDeleteExpressions() const;
typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods;
typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool;
/// Method Pool - allows efficient lookup when typechecking messages to "id".
/// We need to maintain a list, since selectors can have differing signatures
/// across classes. In Cocoa, this happens to be extremely uncommon (only 1%
/// of selectors are "overloaded").
/// At the head of the list it is recorded whether there were 0, 1, or >= 2
/// methods inside categories with a particular selector.
GlobalMethodPool MethodPool;
/// Method selectors used in a \@selector expression. Used for implementation
/// of -Wselector.
llvm::MapVector<Selector, SourceLocation> ReferencedSelectors;
/// List of SourceLocations where 'self' is implicitly retained inside a
/// block.
llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1>
ImplicitlyRetainedSelfLocs;
/// Kinds of C++ special members.
enum CXXSpecialMember {
CXXDefaultConstructor,
CXXCopyConstructor,
CXXMoveConstructor,
CXXCopyAssignment,
CXXMoveAssignment,
CXXDestructor,
CXXInvalid
};
typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember>
SpecialMemberDecl;
/// The C++ special members which we are currently in the process of
/// declaring. If this process recursively triggers the declaration of the
/// same special member, we should act as if it is not yet declared.
llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared;
/// Kinds of defaulted comparison operator functions.
enum class DefaultedComparisonKind : unsigned char {
/// This is not a defaultable comparison operator.
None,
/// This is an operator== that should be implemented as a series of
/// subobject comparisons.
Equal,
/// This is an operator<=> that should be implemented as a series of
/// subobject comparisons.
ThreeWay,
/// This is an operator!= that should be implemented as a rewrite in terms
/// of a == comparison.
NotEqual,
/// This is an <, <=, >, or >= that should be implemented as a rewrite in
/// terms of a <=> comparison.
Relational,
};
/// The function definitions which were renamed as part of typo-correction
/// to match their respective declarations. We want to keep track of them
/// to ensure that we don't emit a "redefinition" error if we encounter a
/// correctly named definition after the renamed definition.
llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions;
/// Stack of types that correspond to the parameter entities that are
/// currently being copy-initialized. Can be empty.
llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes;
void ReadMethodPool(Selector Sel);
void updateOutOfDateSelector(Selector Sel);
/// Private Helper predicate to check for 'self'.
bool isSelfExpr(Expr *RExpr);
bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method);
/// Cause the active diagnostic on the DiagosticsEngine to be
/// emitted. This is closely coupled to the SemaDiagnosticBuilder class and
/// should not be used elsewhere.
void EmitCurrentDiagnostic(unsigned DiagID);
/// Records and restores the FP_CONTRACT state on entry/exit of compound
/// statements.
class FPContractStateRAII {
public:
FPContractStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.FPFeatures) {}
~FPContractStateRAII() { S.FPFeatures = OldFPFeaturesState; }
private:
Sema& S;
FPOptions OldFPFeaturesState;
};
void addImplicitTypedef(StringRef Name, QualType T);
bool WarnedStackExhausted = false;
public:
Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
TranslationUnitKind TUKind = TU_Complete,
CodeCompleteConsumer *CompletionConsumer = nullptr);
~Sema();
/// Perform initialization that occurs after the parser has been
/// initialized but before it parses anything.
void Initialize();
const LangOptions &getLangOpts() const { return LangOpts; }
OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; }
FPOptions &getFPOptions() { return FPFeatures; }
DiagnosticsEngine &getDiagnostics() const { return Diags; }
SourceManager &getSourceManager() const { return SourceMgr; }
Preprocessor &getPreprocessor() const { return PP; }
ASTContext &getASTContext() const { return Context; }
ASTConsumer &getASTConsumer() const { return Consumer; }
ASTMutationListener *getASTMutationListener() const;
ExternalSemaSource* getExternalSource() const { return ExternalSource; }
///Registers an external source. If an external source already exists,
/// creates a multiplex external source and appends to it.
///
///\param[in] E - A non-null external sema source.
///
void addExternalSource(ExternalSemaSource *E);
void PrintStats() const;
/// Warn that the stack is nearly exhausted.
void warnStackExhausted(SourceLocation Loc);
/// Run some code with "sufficient" stack space. (Currently, at least 256K is
/// guaranteed). Produces a warning if we're low on stack space and allocates
/// more in that case. Use this in code that may recurse deeply (for example,
/// in template instantiation) to avoid stack overflow.
void runWithSufficientStackSpace(SourceLocation Loc,
llvm::function_ref<void()> Fn);
/// Helper class that creates diagnostics with optional
/// template instantiation stacks.
///
/// This class provides a wrapper around the basic DiagnosticBuilder
/// class that emits diagnostics. SemaDiagnosticBuilder is
/// responsible for emitting the diagnostic (as DiagnosticBuilder
/// does) and, if the diagnostic comes from inside a template
/// instantiation, printing the template instantiation stack as
/// well.
class SemaDiagnosticBuilder : public DiagnosticBuilder {
Sema &SemaRef;
unsigned DiagID;
public:
SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID)
: DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { }
// This is a cunning lie. DiagnosticBuilder actually performs move
// construction in its copy constructor (but due to varied uses, it's not
// possible to conveniently express this as actual move construction). So
// the default copy ctor here is fine, because the base class disables the
// source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op
// in that case anwyay.
SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default;
~SemaDiagnosticBuilder() {
// If we aren't active, there is nothing to do.
if (!isActive()) return;
// Otherwise, we need to emit the diagnostic. First flush the underlying
// DiagnosticBuilder data, and clear the diagnostic builder itself so it
// won't emit the diagnostic in its own destructor.
//
// This seems wasteful, in that as written the DiagnosticBuilder dtor will
// do its own needless checks to see if the diagnostic needs to be
// emitted. However, because we take care to ensure that the builder
// objects never escape, a sufficiently smart compiler will be able to
// eliminate that code.
FlushCounts();
Clear();
// Dispatch to Sema to emit the diagnostic.
SemaRef.EmitCurrentDiagnostic(DiagID);
}
/// Teach operator<< to produce an object of the correct type.
template<typename T>
friend const SemaDiagnosticBuilder &operator<<(
const SemaDiagnosticBuilder &Diag, const T &Value) {
const DiagnosticBuilder &BaseDiag = Diag;
BaseDiag << Value;
return Diag;
}
};
/// Emit a diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) {
DiagnosticBuilder DB = Diags.Report(Loc, DiagID);
return SemaDiagnosticBuilder(DB, *this, DiagID);
}
/// Emit a partial diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD);
/// Build a partial diagnostic.
PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h
bool findMacroSpelling(SourceLocation &loc, StringRef name);
/// Get a string to suggest for zero-initialization of a type.
std::string
getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const;
std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const;
/// Calls \c Lexer::getLocForEndOfToken()
SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0);
/// Retrieve the module loader associated with the preprocessor.
ModuleLoader &getModuleLoader() const;
void emitAndClearUnusedLocalTypedefWarnings();
enum TUFragmentKind {
/// The global module fragment, between 'module;' and a module-declaration.
Global,
/// A normal translation unit fragment. For a non-module unit, this is the
/// entire translation unit. Otherwise, it runs from the module-declaration
/// to the private-module-fragment (if any) or the end of the TU (if not).
Normal,
/// The private module fragment, between 'module :private;' and the end of
/// the translation unit.
Private
};
void ActOnStartOfTranslationUnit();
void ActOnEndOfTranslationUnit();
void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind);
void CheckDelegatingCtorCycles();
Scope *getScopeForContext(DeclContext *Ctx);
void PushFunctionScope();
void PushBlockScope(Scope *BlockScope, BlockDecl *Block);
sema::LambdaScopeInfo *PushLambdaScope();
/// This is used to inform Sema what the current TemplateParameterDepth
/// is during Parsing. Currently it is used to pass on the depth
/// when parsing generic lambda 'auto' parameters.
void RecordParsingTemplateParameterDepth(unsigned Depth);
void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD,
RecordDecl *RD, CapturedRegionKind K,
unsigned OpenMPCaptureLevel = 0);
/// Custom deleter to allow FunctionScopeInfos to be kept alive for a short
/// time after they've been popped.
class PoppedFunctionScopeDeleter {
Sema *Self;
public:
explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {}
void operator()(sema::FunctionScopeInfo *Scope) const;
};
using PoppedFunctionScopePtr =
std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>;
PoppedFunctionScopePtr
PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr,
const Decl *D = nullptr,
QualType BlockType = QualType());
sema::FunctionScopeInfo *getCurFunction() const {
return FunctionScopes.empty() ? nullptr : FunctionScopes.back();
}
sema::FunctionScopeInfo *getEnclosingFunction() const;
void setFunctionHasBranchIntoScope();
void setFunctionHasBranchProtectedScope();
void setFunctionHasIndirectGoto();
void PushCompoundScope(bool IsStmtExpr);
void PopCompoundScope();
sema::CompoundScopeInfo &getCurCompoundScope() const;
bool hasAnyUnrecoverableErrorsInThisFunction() const;
/// Retrieve the current block, if any.
sema::BlockScopeInfo *getCurBlock();
/// Get the innermost lambda enclosing the current location, if any. This
/// looks through intervening non-lambda scopes such as local functions and
/// blocks.
sema::LambdaScopeInfo *getEnclosingLambda() const;
/// Retrieve the current lambda scope info, if any.
/// \param IgnoreNonLambdaCapturingScope true if should find the top-most
/// lambda scope info ignoring all inner capturing scopes that are not
/// lambda scopes.
sema::LambdaScopeInfo *
getCurLambda(bool IgnoreNonLambdaCapturingScope = false);
/// Retrieve the current generic lambda info, if any.
sema::LambdaScopeInfo *getCurGenericLambda();
/// Retrieve the current captured region, if any.
sema::CapturedRegionScopeInfo *getCurCapturedRegion();
/// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls
SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; }
void ActOnComment(SourceRange Comment);
//===--------------------------------------------------------------------===//
// Type Analysis / Processing: SemaType.cpp.
//
QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs,
const DeclSpec *DS = nullptr);
QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA,
const DeclSpec *DS = nullptr);
QualType BuildPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildReferenceType(QualType T, bool LValueRef,
SourceLocation Loc, DeclarationName Entity);
QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
Expr *ArraySize, unsigned Quals,
SourceRange Brackets, DeclarationName Entity);
QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc);
QualType BuildExtVectorType(QualType T, Expr *ArraySize,
SourceLocation AttrLoc);
QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace,
SourceLocation AttrLoc);
/// Same as above, but constructs the AddressSpace index if not provided.
QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace,
SourceLocation AttrLoc);
SYCLIntelFPGAIVDepAttr *
BuildSYCLIntelFPGAIVDepAttr(const AttributeCommonInfo &CI, Expr *Expr1,
Expr *Expr2);
template <typename FPGALoopAttrT>
FPGALoopAttrT *BuildSYCLIntelFPGALoopAttr(const AttributeCommonInfo &A,
Expr *E);
bool CheckQualifiedFunctionForTypeId(QualType T, SourceLocation Loc);
bool CheckFunctionReturnType(QualType T, SourceLocation Loc);
/// Build a function type.
///
/// This routine checks the function type according to C++ rules and
/// under the assumption that the result type and parameter types have
/// just been instantiated from a template. It therefore duplicates
/// some of the behavior of GetTypeForDeclarator, but in a much
/// simpler form that is only suitable for this narrow use case.
///
/// \param T The return type of the function.
///
/// \param ParamTypes The parameter types of the function. This array
/// will be modified to account for adjustments to the types of the
/// function parameters.
///
/// \param Loc The location of the entity whose type involves this
/// function type or, if there is no such entity, the location of the
/// type that will have function type.
///
/// \param Entity The name of the entity that involves the function
/// type, if known.
///
/// \param EPI Extra information about the function type. Usually this will
/// be taken from an existing function with the same prototype.
///
/// \returns A suitable function type, if there are no errors. The
/// unqualified type will always be a FunctionProtoType.
/// Otherwise, returns a NULL type.
QualType BuildFunctionType(QualType T,
MutableArrayRef<QualType> ParamTypes,
SourceLocation Loc, DeclarationName Entity,
const FunctionProtoType::ExtProtoInfo &EPI);
QualType BuildMemberPointerType(QualType T, QualType Class,
SourceLocation Loc,
DeclarationName Entity);
QualType BuildBlockPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildParenType(QualType T);
QualType BuildAtomicType(QualType T, SourceLocation Loc);
QualType BuildReadPipeType(QualType T,
SourceLocation Loc);
QualType BuildWritePipeType(QualType T,
SourceLocation Loc);
TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S);
TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy);
/// Package the given type and TSI into a ParsedType.
ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo);
DeclarationNameInfo GetNameForDeclarator(Declarator &D);
DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name);
static QualType GetTypeFromParser(ParsedType Ty,
TypeSourceInfo **TInfo = nullptr);
CanThrowResult canThrow(const Expr *E);
const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc,
const FunctionProtoType *FPT);
void UpdateExceptionSpec(FunctionDecl *FD,
const FunctionProtoType::ExceptionSpecInfo &ESI);
bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range);
bool CheckDistantExceptionSpec(QualType T);
bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New);
bool CheckEquivalentExceptionSpec(
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool CheckEquivalentExceptionSpec(
const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool handlerCanCatch(QualType HandlerType, QualType ExceptionType);
bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID,
const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const PartialDiagnostic &NoThrowDiagID,
const FunctionProtoType *Superset,
SourceLocation SuperLoc,
const FunctionProtoType *Subset,
SourceLocation SubLoc);
bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const FunctionProtoType *Target,
SourceLocation TargetLoc,
const FunctionProtoType *Source,
SourceLocation SourceLoc);
TypeResult ActOnTypeName(Scope *S, Declarator &D);
/// The parser has parsed the context-sensitive type 'instancetype'
/// in an Objective-C message declaration. Return the appropriate type.
ParsedType ActOnObjCInstanceType(SourceLocation Loc);
/// Abstract class used to diagnose incomplete types.
struct TypeDiagnoser {
TypeDiagnoser() {}
virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0;
virtual ~TypeDiagnoser() {}
};
static int getPrintable(int I) { return I; }
static unsigned getPrintable(unsigned I) { return I; }
static bool getPrintable(bool B) { return B; }
static const char * getPrintable(const char *S) { return S; }
static StringRef getPrintable(StringRef S) { return S; }
static const std::string &getPrintable(const std::string &S) { return S; }
static const IdentifierInfo *getPrintable(const IdentifierInfo *II) {
return II;
}
static DeclarationName getPrintable(DeclarationName N) { return N; }
static QualType getPrintable(QualType T) { return T; }
static SourceRange getPrintable(SourceRange R) { return R; }
static SourceRange getPrintable(SourceLocation L) { return L; }
static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); }
static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();}
template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser {
unsigned DiagID;
std::tuple<const Ts &...> Args;
template <std::size_t... Is>
void emit(const SemaDiagnosticBuilder &DB,
std::index_sequence<Is...>) const {
// Apply all tuple elements to the builder in order.
bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...};
(void)Dummy;
}
public:
BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args)
: TypeDiagnoser(), DiagID(DiagID), Args(Args...) {
assert(DiagID != 0 && "no diagnostic for type diagnoser");
}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID);
emit(DB, std::index_sequence_for<Ts...>());
DB << T;
}
};
private:
/// Methods for marking which expressions involve dereferencing a pointer
/// marked with the 'noderef' attribute. Expressions are checked bottom up as
/// they are parsed, meaning that a noderef pointer may not be accessed. For
/// example, in `&*p` where `p` is a noderef pointer, we will first parse the
/// `*p`, but need to check that `address of` is called on it. This requires
/// keeping a container of all pending expressions and checking if the address
/// of them are eventually taken.
void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E);
void CheckAddressOfNoDeref(const Expr *E);
void CheckMemberAccessOfNoDeref(const MemberExpr *E);
bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
TypeDiagnoser *Diagnoser);
struct ModuleScope {
SourceLocation BeginLoc;
clang::Module *Module = nullptr;
bool ModuleInterface = false;
bool ImplicitGlobalModuleFragment = false;
VisibleModuleSet OuterVisibleModules;
};
/// The modules we're currently parsing.
llvm::SmallVector<ModuleScope, 16> ModuleScopes;
/// Namespace definitions that we will export when they finish.
llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces;
/// Get the module whose scope we are currently within.
Module *getCurrentModule() const {
return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module;
}
VisibleModuleSet VisibleModules;
public:
/// Get the module owning an entity.
Module *getOwningModule(Decl *Entity) { return Entity->getOwningModule(); }
/// Make a merged definition of an existing hidden definition \p ND
/// visible at the specified location.
void makeMergedDefinitionVisible(NamedDecl *ND);
bool isModuleVisible(const Module *M, bool ModulePrivate = false);
/// Determine whether a declaration is visible to name lookup.
bool isVisible(const NamedDecl *D) {
return !D->isHidden() || isVisibleSlow(D);
}
/// Determine whether any declaration of an entity is visible.
bool
hasVisibleDeclaration(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr) {
return isVisible(D) || hasVisibleDeclarationSlow(D, Modules);
}
bool hasVisibleDeclarationSlow(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules);
bool hasVisibleMergedDefinition(NamedDecl *Def);
bool hasMergedDefinitionInCurrentModule(NamedDecl *Def);
/// Determine if \p D and \p Suggested have a structurally compatible
/// layout as described in C11 6.2.7/1.
bool hasStructuralCompatLayout(Decl *D, Decl *Suggested);
/// Determine if \p D has a visible definition. If not, suggest a declaration
/// that should be made visible to expose the definition.
bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested,
bool OnlyNeedComplete = false);
bool hasVisibleDefinition(const NamedDecl *D) {
NamedDecl *Hidden;
return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden);
}
/// Determine if the template parameter \p D has a visible default argument.
bool
hasVisibleDefaultArgument(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is an explicit
/// specialization declaration for a specialization of a template. (For a
/// member specialization, use hasVisibleMemberSpecialization.)
bool hasVisibleExplicitSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is a member
/// specialization declaration (as opposed to an instantiated declaration).
bool hasVisibleMemberSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if \p A and \p B are equivalent internal linkage declarations
/// from different modules, and thus an ambiguity error can be downgraded to
/// an extension warning.
bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A,
const NamedDecl *B);
void diagnoseEquivalentInternalLinkageDeclarations(
SourceLocation Loc, const NamedDecl *D,
ArrayRef<const NamedDecl *> Equiv);
bool isUsualDeallocationFunction(const CXXMethodDecl *FD);
bool isCompleteType(SourceLocation Loc, QualType T) {
return !RequireCompleteTypeImpl(Loc, T, nullptr);
}
bool RequireCompleteType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireCompleteType(SourceLocation Loc, QualType T,
unsigned DiagID);
template <typename... Ts>
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, Diagnoser);
}
void completeExprArrayBound(Expr *E);
bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser);
bool RequireCompleteExprType(Expr *E, unsigned DiagID);
template <typename... Ts>
bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, Diagnoser);
}
bool RequireLiteralType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID);
template <typename... Ts>
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireLiteralType(Loc, T, Diagnoser);
}
QualType getElaboratedType(ElaboratedTypeKeyword Keyword,
const CXXScopeSpec &SS, QualType T,
TagDecl *OwnedTagDecl = nullptr);
QualType BuildTypeofExprType(Expr *E, SourceLocation Loc);
/// If AsUnevaluated is false, E is treated as though it were an evaluated
/// context, such as when building a type for decltype(auto).
QualType BuildDecltypeType(Expr *E, SourceLocation Loc,
bool AsUnevaluated = true);
QualType BuildUnaryTransformType(QualType BaseType,
UnaryTransformType::UTTKind UKind,
SourceLocation Loc);
//===--------------------------------------------------------------------===//
// Symbol table / Decl tracking callbacks: SemaDecl.cpp.
//
struct SkipBodyInfo {
SkipBodyInfo()
: ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr),
New(nullptr) {}
bool ShouldSkip;
bool CheckSameAsPrevious;
NamedDecl *Previous;
NamedDecl *New;
};
DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr);
void DiagnoseUseOfUnimplementedSelectors();
bool isSimpleTypeSpecifier(tok::TokenKind Kind) const;
ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec *SS = nullptr,
bool isClassName = false, bool HasTrailingDot = false,
ParsedType ObjectType = nullptr,
bool IsCtorOrDtorName = false,
bool WantNontrivialTypeSourceInfo = false,
bool IsClassTemplateDeductionContext = true,
IdentifierInfo **CorrectedII = nullptr);
TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S);
bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S);
void DiagnoseUnknownTypeName(IdentifierInfo *&II,
SourceLocation IILoc,
Scope *S,
CXXScopeSpec *SS,
ParsedType &SuggestedType,
bool IsTemplateName = false);
/// Attempt to behave like MSVC in situations where lookup of an unqualified
/// type name has failed in a dependent context. In these situations, we
/// automatically form a DependentTypeName that will retry lookup in a related
/// scope during instantiation.
ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II,
SourceLocation NameLoc,
bool IsTemplateTypeArg);
/// Describes the result of the name lookup and resolution performed
/// by \c ClassifyName().
enum NameClassificationKind {
/// This name is not a type or template in this context, but might be
/// something else.
NC_Unknown,
/// Classification failed; an error has been produced.
NC_Error,
/// The name has been typo-corrected to a keyword.
NC_Keyword,
/// The name was classified as a type.
NC_Type,
/// The name was classified as a specific non-type, non-template
/// declaration. ActOnNameClassifiedAsNonType should be called to
/// convert the declaration to an expression.
NC_NonType,
/// The name was classified as an ADL-only function name.
/// ActOnNameClassifiedAsUndeclaredNonType should be called to convert the
/// result to an expression.
NC_UndeclaredNonType,
/// The name denotes a member of a dependent type that could not be
/// resolved. ActOnNameClassifiedAsDependentNonType should be called to
/// convert the result to an expression.
NC_DependentNonType,
/// The name was classified as a non-type, and an expression representing
/// that name has been formed.
NC_ContextIndependentExpr,
/// The name was classified as a template whose specializations are types.
NC_TypeTemplate,
/// The name was classified as a variable template name.
NC_VarTemplate,
/// The name was classified as a function template name.
NC_FunctionTemplate,
/// The name was classified as an ADL-only function template name.
NC_UndeclaredTemplate,
};
class NameClassification {
NameClassificationKind Kind;
union {
ExprResult Expr;
NamedDecl *NonTypeDecl;
TemplateName Template;
ParsedType Type;
};
explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {}
public:
NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {}
NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {}
static NameClassification Error() {
return NameClassification(NC_Error);
}
static NameClassification Unknown() {
return NameClassification(NC_Unknown);
}
static NameClassification ContextIndependentExpr(ExprResult E) {
NameClassification Result(NC_ContextIndependentExpr);
Result.Expr = E;
return Result;
}
static NameClassification NonType(NamedDecl *D) {
NameClassification Result(NC_NonType);
Result.NonTypeDecl = D;
return Result;
}
static NameClassification UndeclaredNonType() {
return NameClassification(NC_UndeclaredNonType);
}
static NameClassification DependentNonType() {
return NameClassification(NC_DependentNonType);
}
static NameClassification TypeTemplate(TemplateName Name) {
NameClassification Result(NC_TypeTemplate);
Result.Template = Name;
return Result;
}
static NameClassification VarTemplate(TemplateName Name) {
NameClassification Result(NC_VarTemplate);
Result.Template = Name;
return Result;
}
static NameClassification FunctionTemplate(TemplateName Name) {
NameClassification Result(NC_FunctionTemplate);
Result.Template = Name;
return Result;
}
static NameClassification UndeclaredTemplate(TemplateName Name) {
NameClassification Result(NC_UndeclaredTemplate);
Result.Template = Name;
return Result;
}
NameClassificationKind getKind() const { return Kind; }
ExprResult getExpression() const {
assert(Kind == NC_ContextIndependentExpr);
return Expr;
}
ParsedType getType() const {
assert(Kind == NC_Type);
return Type;
}
NamedDecl *getNonTypeDecl() const {
assert(Kind == NC_NonType);
return NonTypeDecl;
}
TemplateName getTemplateName() const {
assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate ||
Kind == NC_VarTemplate || Kind == NC_UndeclaredTemplate);
return Template;
}
TemplateNameKind getTemplateNameKind() const {
switch (Kind) {
case NC_TypeTemplate:
return TNK_Type_template;
case NC_FunctionTemplate:
return TNK_Function_template;
case NC_VarTemplate:
return TNK_Var_template;
case NC_UndeclaredTemplate:
return TNK_Undeclared_template;
default:
llvm_unreachable("unsupported name classification.");
}
}
};
/// Perform name lookup on the given name, classifying it based on
/// the results of name lookup and the following token.
///
/// This routine is used by the parser to resolve identifiers and help direct
/// parsing. When the identifier cannot be found, this routine will attempt
/// to correct the typo and classify based on the resulting name.
///
/// \param S The scope in which we're performing name lookup.
///
/// \param SS The nested-name-specifier that precedes the name.
///
/// \param Name The identifier. If typo correction finds an alternative name,
/// this pointer parameter will be updated accordingly.
///
/// \param NameLoc The location of the identifier.
///
/// \param NextToken The token following the identifier. Used to help
/// disambiguate the name.
///
/// \param CCC The correction callback, if typo correction is desired.
NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS,
IdentifierInfo *&Name, SourceLocation NameLoc,
const Token &NextToken,
CorrectionCandidateCallback *CCC = nullptr);
/// Act on the result of classifying a name as an undeclared (ADL-only)
/// non-type declaration.
ExprResult ActOnNameClassifiedAsUndeclaredNonType(IdentifierInfo *Name,
SourceLocation NameLoc);
/// Act on the result of classifying a name as an undeclared member of a
/// dependent base class.
ExprResult ActOnNameClassifiedAsDependentNonType(const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsAddressOfOperand);
/// Act on the result of classifying a name as a specific non-type
/// declaration.
ExprResult ActOnNameClassifiedAsNonType(Scope *S, const CXXScopeSpec &SS,
NamedDecl *Found,
SourceLocation NameLoc,
const Token &NextToken);
/// Describes the detailed kind of a template name. Used in diagnostics.
enum class TemplateNameKindForDiagnostics {
ClassTemplate,
FunctionTemplate,
VarTemplate,
AliasTemplate,
TemplateTemplateParam,
Concept,
DependentTemplate
};
TemplateNameKindForDiagnostics
getTemplateNameKindForDiagnostics(TemplateName Name);
/// Determine whether it's plausible that E was intended to be a
/// template-name.
bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) {
if (!getLangOpts().CPlusPlus || E.isInvalid())
return false;
Dependent = false;
if (auto *DRE = dyn_cast<DeclRefExpr>(E.get()))
return !DRE->hasExplicitTemplateArgs();
if (auto *ME = dyn_cast<MemberExpr>(E.get()))
return !ME->hasExplicitTemplateArgs();
Dependent = true;
if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get()))
return !DSDRE->hasExplicitTemplateArgs();
if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get()))
return !DSME->hasExplicitTemplateArgs();
// Any additional cases recognized here should also be handled by
// diagnoseExprIntendedAsTemplateName.
return false;
}
void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName,
SourceLocation Less,
SourceLocation Greater);
Decl *ActOnDeclarator(Scope *S, Declarator &D);
NamedDecl *HandleDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParameterLists);
void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S);
bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info);
bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
DeclarationName Name, SourceLocation Loc,
bool IsTemplateId);
void
diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals,
SourceLocation FallbackLoc,
SourceLocation ConstQualLoc = SourceLocation(),
SourceLocation VolatileQualLoc = SourceLocation(),
SourceLocation RestrictQualLoc = SourceLocation(),
SourceLocation AtomicQualLoc = SourceLocation(),
SourceLocation UnalignedQualLoc = SourceLocation());
static bool adjustContextForLocalExternDecl(DeclContext *&DC);
void DiagnoseFunctionSpecifiers(const DeclSpec &DS);
NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D,
const LookupResult &R);
NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R);
void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl,
const LookupResult &R);
void CheckShadow(Scope *S, VarDecl *D);
/// Warn if 'E', which is an expression that is about to be modified, refers
/// to a shadowing declaration.
void CheckShadowingDeclModification(Expr *E, SourceLocation Loc);
void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI);
private:
/// Map of current shadowing declarations to shadowed declarations. Warn if
/// it looks like the user is trying to modify the shadowing declaration.
llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls;
public:
void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange);
void handleTagNumbering(const TagDecl *Tag, Scope *TagScope);
void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec,
TypedefNameDecl *NewTD);
void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D);
NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous);
NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D,
LookupResult &Previous, bool &Redeclaration);
NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope,
ArrayRef<BindingDecl *> Bindings = None);
NamedDecl *
ActOnDecompositionDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists);
// Returns true if the variable declaration is a redeclaration
bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous);
void CheckVariableDeclarationType(VarDecl *NewVD);
bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit,
Expr *Init);
void CheckCompleteVariableDeclaration(VarDecl *VD);
void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD);
void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D);
NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope);
bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD);
enum class CheckConstexprKind {
/// Diagnose issues that are non-constant or that are extensions.
Diagnose,
/// Identify whether this function satisfies the formal rules for constexpr
/// functions in the current lanugage mode (with no extensions).
CheckValid
};
bool CheckConstexprFunctionDefinition(const FunctionDecl *FD,
CheckConstexprKind Kind);
void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD);
void FindHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
void NoteHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
// Returns true if the function declaration is a redeclaration
bool CheckFunctionDeclaration(Scope *S,
FunctionDecl *NewFD, LookupResult &Previous,
bool IsMemberSpecialization);
bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl);
bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD,
QualType NewT, QualType OldT);
void CheckMain(FunctionDecl *FD, const DeclSpec &D);
void CheckMSVCRTEntryPoint(FunctionDecl *FD);
Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD,
bool IsDefinition);
void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D);
Decl *ActOnParamDeclarator(Scope *S, Declarator &D);
ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC,
SourceLocation Loc,
QualType T);
ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc,
SourceLocation NameLoc, IdentifierInfo *Name,
QualType T, TypeSourceInfo *TSInfo,
StorageClass SC);
void ActOnParamDefaultArgument(Decl *param,
SourceLocation EqualLoc,
Expr *defarg);
void ActOnParamUnparsedDefaultArgument(Decl *param,
SourceLocation EqualLoc,
SourceLocation ArgLoc);
void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc);
bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
SourceLocation EqualLoc);
// Contexts where using non-trivial C union types can be disallowed. This is
// passed to err_non_trivial_c_union_in_invalid_context.
enum NonTrivialCUnionContext {
// Function parameter.
NTCUC_FunctionParam,
// Function return.
NTCUC_FunctionReturn,
// Default-initialized object.
NTCUC_DefaultInitializedObject,
// Variable with automatic storage duration.
NTCUC_AutoVar,
// Initializer expression that might copy from another object.
NTCUC_CopyInit,
// Assignment.
NTCUC_Assignment,
// Compound literal.
NTCUC_CompoundLiteral,
// Block capture.
NTCUC_BlockCapture,
// lvalue-to-rvalue conversion of volatile type.
NTCUC_LValueToRValueVolatile,
};
/// Emit diagnostics if the initializer or any of its explicit or
/// implicitly-generated subexpressions require copying or
/// default-initializing a type that is or contains a C union type that is
/// non-trivial to copy or default-initialize.
void checkNonTrivialCUnionInInitializer(const Expr *Init, SourceLocation Loc);
// These flags are passed to checkNonTrivialCUnion.
enum NonTrivialCUnionKind {
NTCUK_Init = 0x1,
NTCUK_Destruct = 0x2,
NTCUK_Copy = 0x4,
};
/// Emit diagnostics if a non-trivial C union type or a struct that contains
/// a non-trivial C union is used in an invalid context.
void checkNonTrivialCUnion(QualType QT, SourceLocation Loc,
NonTrivialCUnionContext UseContext,
unsigned NonTrivialKind);
void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit);
void ActOnUninitializedDecl(Decl *dcl);
void ActOnInitializerError(Decl *Dcl);
void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc);
void ActOnCXXForRangeDecl(Decl *D);
StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
IdentifierInfo *Ident,
ParsedAttributes &Attrs,
SourceLocation AttrEnd);
void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc);
void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc);
void CheckStaticLocalForDllExport(VarDecl *VD);
void FinalizeDeclaration(Decl *D);
DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
ArrayRef<Decl *> Group);
DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group);
/// Should be called on all declarations that might have attached
/// documentation comments.
void ActOnDocumentableDecl(Decl *D);
void ActOnDocumentableDecls(ArrayRef<Decl *> Group);
void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
SourceLocation LocAfterDecls);
void CheckForFunctionRedefinition(
FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D,
SkipBodyInfo *SkipBody = nullptr);
void ActOnStartOfObjCMethodDef(Scope *S, Decl *D);
bool isObjCMethodDecl(Decl *D) {
return D && isa<ObjCMethodDecl>(D);
}
/// Determine whether we can delay parsing the body of a function or
/// function template until it is used, assuming we don't care about emitting
/// code for that function.
///
/// This will be \c false if we may need the body of the function in the
/// middle of parsing an expression (where it's impractical to switch to
/// parsing a different function), for instance, if it's constexpr in C++11
/// or has an 'auto' return type in C++14. These cases are essentially bugs.
bool canDelayFunctionBody(const Declarator &D);
/// Determine whether we can skip parsing the body of a function
/// definition, assuming we don't care about analyzing its body or emitting
/// code for that function.
///
/// This will be \c false only if we may need the body of the function in
/// order to parse the rest of the program (for instance, if it is
/// \c constexpr in C++11 or has an 'auto' return type in C++14).
bool canSkipFunctionBody(Decl *D);
void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation);
Decl *ActOnSkippedFunctionBody(Decl *Decl);
void ActOnFinishInlineFunctionDef(FunctionDecl *D);
/// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an
/// attribute for which parsing is delayed.
void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs);
/// Diagnose any unused parameters in the given sequence of
/// ParmVarDecl pointers.
void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters);
/// Diagnose whether the size of parameters or return value of a
/// function or obj-c method definition is pass-by-value and larger than a
/// specified threshold.
void
DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters,
QualType ReturnTy, NamedDecl *D);
void DiagnoseInvalidJumps(Stmt *Body);
Decl *ActOnFileScopeAsmDecl(Expr *expr,
SourceLocation AsmLoc,
SourceLocation RParenLoc);
/// Handle a C++11 empty-declaration and attribute-declaration.
Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList,
SourceLocation SemiLoc);
enum class ModuleDeclKind {
Interface, ///< 'export module X;'
Implementation, ///< 'module X;'
};
/// The parser has processed a module-declaration that begins the definition
/// of a module interface or implementation.
DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc,
SourceLocation ModuleLoc, ModuleDeclKind MDK,
ModuleIdPath Path, bool IsFirstDecl);
/// The parser has processed a global-module-fragment declaration that begins
/// the definition of the global module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc);
/// The parser has processed a private-module-fragment declaration that begins
/// the definition of the private module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
/// \param PrivateLoc The location of the 'private' keyword.
DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc,
SourceLocation PrivateLoc);
/// The parser has processed a module import declaration.
///
/// \param StartLoc The location of the first token in the declaration. This
/// could be the location of an '@', 'export', or 'import'.
/// \param ExportLoc The location of the 'export' keyword, if any.
/// \param ImportLoc The location of the 'import' keyword.
/// \param Path The module access path.
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, ModuleIdPath Path);
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, Module *M,
ModuleIdPath Path = {});
/// The parser has processed a module import translated from a
/// #include or similar preprocessing directive.
void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
/// The parsed has entered a submodule.
void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod);
/// The parser has left a submodule.
void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod);
/// Create an implicit import of the given module at the given
/// source location, for error recovery, if possible.
///
/// This routine is typically used when an entity found by name lookup
/// is actually hidden within a module that we know about but the user
/// has forgotten to import.
void createImplicitModuleImportForErrorRecovery(SourceLocation Loc,
Module *Mod);
/// Kinds of missing import. Note, the values of these enumerators correspond
/// to %select values in diagnostics.
enum class MissingImportKind {
Declaration,
Definition,
DefaultArgument,
ExplicitSpecialization,
PartialSpecialization
};
/// Diagnose that the specified declaration needs to be visible but
/// isn't, and suggest a module import that would resolve the problem.
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
MissingImportKind MIK, bool Recover = true);
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
SourceLocation DeclLoc, ArrayRef<Module *> Modules,
MissingImportKind MIK, bool Recover);
Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc,
SourceLocation LBraceLoc);
Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl,
SourceLocation RBraceLoc);
/// We've found a use of a templated declaration that would trigger an
/// implicit instantiation. Check that any relevant explicit specializations
/// and partial specializations are visible, and diagnose if not.
void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec);
/// We've found a use of a template specialization that would select a
/// partial specialization. Check that the partial specialization is visible,
/// and diagnose if not.
void checkPartialSpecializationVisibility(SourceLocation Loc,
NamedDecl *Spec);
/// Retrieve a suitable printing policy for diagnostics.
PrintingPolicy getPrintingPolicy() const {
return getPrintingPolicy(Context, PP);
}
/// Retrieve a suitable printing policy for diagnostics.
static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx,
const Preprocessor &PP);
/// Scope actions.
void ActOnPopScope(SourceLocation Loc, Scope *S);
void ActOnTranslationUnitScope(Scope *S);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
RecordDecl *&AnonRecord);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
MultiTemplateParamsArg TemplateParams,
bool IsExplicitInstantiation,
RecordDecl *&AnonRecord);
Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
AccessSpecifier AS,
RecordDecl *Record,
const PrintingPolicy &Policy);
Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
RecordDecl *Record);
/// Common ways to introduce type names without a tag for use in diagnostics.
/// Keep in sync with err_tag_reference_non_tag.
enum NonTagKind {
NTK_NonStruct,
NTK_NonClass,
NTK_NonUnion,
NTK_NonEnum,
NTK_Typedef,
NTK_TypeAlias,
NTK_Template,
NTK_TypeAliasTemplate,
NTK_TemplateTemplateArgument,
};
/// Given a non-tag type declaration, returns an enum useful for indicating
/// what kind of non-tag type this is.
NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK);
bool isAcceptableTagRedeclaration(const TagDecl *Previous,
TagTypeKind NewTag, bool isDefinition,
SourceLocation NewTagLoc,
const IdentifierInfo *Name);
enum TagUseKind {
TUK_Reference, // Reference to a tag: 'struct foo *X;'
TUK_Declaration, // Fwd decl of a tag: 'struct foo;'
TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;'
TUK_Friend // Friend declaration: 'friend struct foo;'
};
Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc, const ParsedAttributesView &Attr,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl,
bool &IsDependent, SourceLocation ScopedEnumKWLoc,
bool ScopedEnumUsesClassTag, TypeResult UnderlyingType,
bool IsTypeSpecifier, bool IsTemplateParamOrArg,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
unsigned TagSpec, SourceLocation TagLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr,
MultiTemplateParamsArg TempParamLists);
TypeResult ActOnDependentTag(Scope *S,
unsigned TagSpec,
TagUseKind TUK,
const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation TagLoc,
SourceLocation NameLoc);
void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart,
IdentifierInfo *ClassName,
SmallVectorImpl<Decl *> &Decls);
Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth);
FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS);
MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD,
SourceLocation DeclStart, Declarator &D,
Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS,
const ParsedAttr &MSPropertyAttr);
FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T,
TypeSourceInfo *TInfo,
RecordDecl *Record, SourceLocation Loc,
bool Mutable, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
SourceLocation TSSL,
AccessSpecifier AS, NamedDecl *PrevDecl,
Declarator *D = nullptr);
bool CheckNontrivialField(FieldDecl *FD);
void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM);
enum TrivialABIHandling {
/// The triviality of a method unaffected by "trivial_abi".
TAH_IgnoreTrivialABI,
/// The triviality of a method affected by "trivial_abi".
TAH_ConsiderTrivialABI
};
bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
TrivialABIHandling TAH = TAH_IgnoreTrivialABI,
bool Diagnose = false);
/// For a defaulted function, the kind of defaulted function that it is.
class DefaultedFunctionKind {
CXXSpecialMember SpecialMember : 8;
DefaultedComparisonKind Comparison : 8;
public:
DefaultedFunctionKind()
: SpecialMember(CXXInvalid), Comparison(DefaultedComparisonKind::None) {
}
DefaultedFunctionKind(CXXSpecialMember CSM)
: SpecialMember(CSM), Comparison(DefaultedComparisonKind::None) {}
DefaultedFunctionKind(DefaultedComparisonKind Comp)
: SpecialMember(CXXInvalid), Comparison(Comp) {}
bool isSpecialMember() const { return SpecialMember != CXXInvalid; }
bool isComparison() const {
return Comparison != DefaultedComparisonKind::None;
}
explicit operator bool() const {
return isSpecialMember() || isComparison();
}
CXXSpecialMember asSpecialMember() const { return SpecialMember; }
DefaultedComparisonKind asComparison() const { return Comparison; }
/// Get the index of this function kind for use in diagnostics.
unsigned getDiagnosticIndex() const {
static_assert(CXXInvalid > CXXDestructor,
"invalid should have highest index");
static_assert((unsigned)DefaultedComparisonKind::None == 0,
"none should be equal to zero");
return SpecialMember + (unsigned)Comparison;
}
};
DefaultedFunctionKind getDefaultedFunctionKind(const FunctionDecl *FD);
CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD) {
return getDefaultedFunctionKind(MD).asSpecialMember();
}
DefaultedComparisonKind getDefaultedComparisonKind(const FunctionDecl *FD) {
return getDefaultedFunctionKind(FD).asComparison();
}
void ActOnLastBitfield(SourceLocation DeclStart,
SmallVectorImpl<Decl *> &AllIvarDecls);
Decl *ActOnIvar(Scope *S, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
tok::ObjCKeywordKind visibility);
// This is used for both record definitions and ObjC interface declarations.
void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl,
ArrayRef<Decl *> Fields, SourceLocation LBrac,
SourceLocation RBrac, const ParsedAttributesView &AttrList);
/// ActOnTagStartDefinition - Invoked when we have entered the
/// scope of a tag's definition (e.g., for an enumeration, class,
/// struct, or union).
void ActOnTagStartDefinition(Scope *S, Decl *TagDecl);
/// Perform ODR-like check for C/ObjC when merging tag types from modules.
/// Differently from C++, actually parse the body and reject / error out
/// in case of a structural mismatch.
bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev,
SkipBodyInfo &SkipBody);
typedef void *SkippedDefinitionContext;
/// Invoked when we enter a tag definition that we're skipping.
SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD);
Decl *ActOnObjCContainerStartDefinition(Decl *IDecl);
/// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a
/// C++ record definition's base-specifiers clause and are starting its
/// member declarations.
void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl,
SourceLocation FinalLoc,
bool IsFinalSpelledSealed,
SourceLocation LBraceLoc);
/// ActOnTagFinishDefinition - Invoked once we have finished parsing
/// the definition of a tag (enumeration, class, struct, or union).
void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl,
SourceRange BraceRange);
void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context);
void ActOnObjCContainerFinishDefinition();
/// Invoked when we must temporarily exit the objective-c container
/// scope for parsing/looking-up C constructs.
///
/// Must be followed by a call to \see ActOnObjCReenterContainerContext
void ActOnObjCTemporaryExitContainerContext(DeclContext *DC);
void ActOnObjCReenterContainerContext(DeclContext *DC);
/// ActOnTagDefinitionError - Invoked when there was an unrecoverable
/// error parsing the definition of a tag.
void ActOnTagDefinitionError(Scope *S, Decl *TagDecl);
EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum,
EnumConstantDecl *LastEnumConst,
SourceLocation IdLoc,
IdentifierInfo *Id,
Expr *val);
bool CheckEnumUnderlyingType(TypeSourceInfo *TI);
bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped,
QualType EnumUnderlyingTy, bool IsFixed,
const EnumDecl *Prev);
/// Determine whether the body of an anonymous enumeration should be skipped.
/// \param II The name of the first enumerator.
SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II,
SourceLocation IILoc);
Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant,
SourceLocation IdLoc, IdentifierInfo *Id,
const ParsedAttributesView &Attrs,
SourceLocation EqualLoc, Expr *Val);
void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange,
Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S,
const ParsedAttributesView &Attr);
DeclContext *getContainingDC(DeclContext *DC);
/// Set the current declaration context until it gets popped.
void PushDeclContext(Scope *S, DeclContext *DC);
void PopDeclContext();
/// EnterDeclaratorContext - Used when we must lookup names in the context
/// of a declarator's nested name specifier.
void EnterDeclaratorContext(Scope *S, DeclContext *DC);
void ExitDeclaratorContext(Scope *S);
/// Push the parameters of D, which must be a function, into scope.
void ActOnReenterFunctionContext(Scope* S, Decl* D);
void ActOnExitFunctionContext();
DeclContext *getFunctionLevelDeclContext();
/// getCurFunctionDecl - If inside of a function body, this returns a pointer
/// to the function decl for the function being parsed. If we're currently
/// in a 'block', this returns the containing context.
FunctionDecl *getCurFunctionDecl();
/// getCurMethodDecl - If inside of a method body, this returns a pointer to
/// the method decl for the method being parsed. If we're currently
/// in a 'block', this returns the containing context.
ObjCMethodDecl *getCurMethodDecl();
/// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method
/// or C function we're in, otherwise return null. If we're currently
/// in a 'block', this returns the containing context.
NamedDecl *getCurFunctionOrMethodDecl();
/// Add this decl to the scope shadowed decl chains.
void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true);
/// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true
/// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns
/// true if 'D' belongs to the given declaration context.
///
/// \param AllowInlineNamespace If \c true, allow the declaration to be in the
/// enclosing namespace set of the context, rather than contained
/// directly within it.
bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr,
bool AllowInlineNamespace = false);
/// Finds the scope corresponding to the given decl context, if it
/// happens to be an enclosing scope. Otherwise return NULL.
static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC);
/// Subroutines of ActOnDeclarator().
TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
TypeSourceInfo *TInfo);
bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New);
/// Describes the kind of merge to perform for availability
/// attributes (including "deprecated", "unavailable", and "availability").
enum AvailabilityMergeKind {
/// Don't merge availability attributes at all.
AMK_None,
/// Merge availability attributes for a redeclaration, which requires
/// an exact match.
AMK_Redeclaration,
/// Merge availability attributes for an override, which requires
/// an exact match or a weakening of constraints.
AMK_Override,
/// Merge availability attributes for an implementation of
/// a protocol requirement.
AMK_ProtocolImplementation,
};
/// Describes the kind of priority given to an availability attribute.
///
/// The sum of priorities deteremines the final priority of the attribute.
/// The final priority determines how the attribute will be merged.
/// An attribute with a lower priority will always remove higher priority
/// attributes for the specified platform when it is being applied. An
/// attribute with a higher priority will not be applied if the declaration
/// already has an availability attribute with a lower priority for the
/// specified platform. The final prirority values are not expected to match
/// the values in this enumeration, but instead should be treated as a plain
/// integer value. This enumeration just names the priority weights that are
/// used to calculate that final vaue.
enum AvailabilityPriority : int {
/// The availability attribute was specified explicitly next to the
/// declaration.
AP_Explicit = 0,
/// The availability attribute was applied using '#pragma clang attribute'.
AP_PragmaClangAttribute = 1,
/// The availability attribute for a specific platform was inferred from
/// an availability attribute for another platform.
AP_InferredFromOtherPlatform = 2
};
/// Attribute merging methods. Return true if a new attribute was added.
AvailabilityAttr *
mergeAvailabilityAttr(NamedDecl *D, const AttributeCommonInfo &CI,
IdentifierInfo *Platform, bool Implicit,
VersionTuple Introduced, VersionTuple Deprecated,
VersionTuple Obsoleted, bool IsUnavailable,
StringRef Message, bool IsStrict, StringRef Replacement,
AvailabilityMergeKind AMK, int Priority);
TypeVisibilityAttr *
mergeTypeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI,
TypeVisibilityAttr::VisibilityType Vis);
VisibilityAttr *mergeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI,
VisibilityAttr::VisibilityType Vis);
UuidAttr *mergeUuidAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Uuid);
DLLImportAttr *mergeDLLImportAttr(Decl *D, const AttributeCommonInfo &CI);
DLLExportAttr *mergeDLLExportAttr(Decl *D, const AttributeCommonInfo &CI);
MSInheritanceAttr *mergeMSInheritanceAttr(Decl *D,
const AttributeCommonInfo &CI,
bool BestCase,
MSInheritanceModel Model);
FormatAttr *mergeFormatAttr(Decl *D, const AttributeCommonInfo &CI,
IdentifierInfo *Format, int FormatIdx,
int FirstArg);
SectionAttr *mergeSectionAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name);
CodeSegAttr *mergeCodeSegAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name);
AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D,
const AttributeCommonInfo &CI,
const IdentifierInfo *Ident);
MinSizeAttr *mergeMinSizeAttr(Decl *D, const AttributeCommonInfo &CI);
NoSpeculativeLoadHardeningAttr *
mergeNoSpeculativeLoadHardeningAttr(Decl *D,
const NoSpeculativeLoadHardeningAttr &AL);
SpeculativeLoadHardeningAttr *
mergeSpeculativeLoadHardeningAttr(Decl *D,
const SpeculativeLoadHardeningAttr &AL);
OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D,
const AttributeCommonInfo &CI);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D,
const InternalLinkageAttr &AL);
CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL);
CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL);
void mergeDeclAttributes(NamedDecl *New, Decl *Old,
AvailabilityMergeKind AMK = AMK_Redeclaration);
void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New,
LookupResult &OldDecls);
bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S,
bool MergeTypeWithOld);
bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old,
Scope *S, bool MergeTypeWithOld);
void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old);
void MergeVarDecl(VarDecl *New, LookupResult &Previous);
void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld);
void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old);
bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn);
void notePreviousDefinition(const NamedDecl *Old, SourceLocation New);
bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S);
// AssignmentAction - This is used by all the assignment diagnostic functions
// to represent what is actually causing the operation
enum AssignmentAction {
AA_Assigning,
AA_Passing,
AA_Returning,
AA_Converting,
AA_Initializing,
AA_Sending,
AA_Casting,
AA_Passing_CFAudited
};
/// C++ Overloading.
enum OverloadKind {
/// This is a legitimate overload: the existing declarations are
/// functions or function templates with different signatures.
Ovl_Overload,
/// This is not an overload because the signature exactly matches
/// an existing declaration.
Ovl_Match,
/// This is not an overload because the lookup results contain a
/// non-function.
Ovl_NonFunction
};
OverloadKind CheckOverload(Scope *S,
FunctionDecl *New,
const LookupResult &OldDecls,
NamedDecl *&OldDecl,
bool IsForUsingDecl);
bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl,
bool ConsiderCudaAttrs = true);
ImplicitConversionSequence
TryImplicitConversion(Expr *From, QualType ToType,
bool SuppressUserConversions,
bool AllowExplicit,
bool InOverloadResolution,
bool CStyle,
bool AllowObjCWritebackConversion);
bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType);
bool IsFloatingPointPromotion(QualType FromType, QualType ToType);
bool IsComplexPromotion(QualType FromType, QualType ToType);
bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCWritebackConversion(QualType FromType, QualType ToType,
QualType &ConvertedType);
bool IsBlockPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType);
bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType,
const FunctionProtoType *NewType,
unsigned *ArgPos = nullptr);
void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
QualType FromType, QualType ToType);
void maybeExtendBlockObject(ExprResult &E);
CastKind PrepareCastToObjCObjectPointer(ExprResult &E);
bool CheckPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath& BasePath,
bool IgnoreBaseAccess,
bool Diagnose = true);
bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType &ConvertedType);
bool CheckMemberPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath &BasePath,
bool IgnoreBaseAccess);
bool IsQualificationConversion(QualType FromType, QualType ToType,
bool CStyle, bool &ObjCLifetimeConversion);
bool IsFunctionConversion(QualType FromType, QualType ToType,
QualType &ResultTy);
bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType);
bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg);
ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
const VarDecl *NRVOCandidate,
QualType ResultType,
Expr *Value,
bool AllowNRVO = true);
bool CanPerformAggregateInitializationForOverloadResolution(
const InitializedEntity &Entity, InitListExpr *From);
bool CanPerformCopyInitialization(const InitializedEntity &Entity,
ExprResult Init);
ExprResult PerformCopyInitialization(const InitializedEntity &Entity,
SourceLocation EqualLoc,
ExprResult Init,
bool TopLevelOfInitList = false,
bool AllowExplicit = false);
ExprResult PerformObjectArgumentInitialization(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
CXXMethodDecl *Method);
/// Check that the lifetime of the initializer (and its subobjects) is
/// sufficient for initializing the entity, and perform lifetime extension
/// (when permitted) if not.
void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init);
ExprResult PerformContextuallyConvertToBool(Expr *From);
ExprResult PerformContextuallyConvertToObjCPointer(Expr *From);
/// Contexts in which a converted constant expression is required.
enum CCEKind {
CCEK_CaseValue, ///< Expression in a case label.
CCEK_Enumerator, ///< Enumerator value with fixed underlying type.
CCEK_TemplateArg, ///< Value of a non-type template parameter.
CCEK_NewExpr, ///< Constant expression in a noptr-new-declarator.
CCEK_ConstexprIf, ///< Condition in a constexpr if statement.
CCEK_ExplicitBool ///< Condition in an explicit(bool) specifier.
};
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
llvm::APSInt &Value, CCEKind CCE);
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
APValue &Value, CCEKind CCE);
/// Abstract base class used to perform a contextual implicit
/// conversion from an expression to any type passing a filter.
class ContextualImplicitConverter {
public:
bool Suppress;
bool SuppressConversion;
ContextualImplicitConverter(bool Suppress = false,
bool SuppressConversion = false)
: Suppress(Suppress), SuppressConversion(SuppressConversion) {}
/// Determine whether the specified type is a valid destination type
/// for this conversion.
virtual bool match(QualType T) = 0;
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the expression has incomplete class type.
virtual SemaDiagnosticBuilder
diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the only matching conversion function
/// is explicit.
virtual SemaDiagnosticBuilder diagnoseExplicitConv(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
/// Emits a note for the explicit conversion function.
virtual SemaDiagnosticBuilder
noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when there are multiple possible conversion
/// functions.
virtual SemaDiagnosticBuilder
diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a note for one of the candidate conversions.
virtual SemaDiagnosticBuilder
noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when we picked a conversion function
/// (for cases when we are not allowed to pick a conversion function).
virtual SemaDiagnosticBuilder diagnoseConversion(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
virtual ~ContextualImplicitConverter() {}
};
class ICEConvertDiagnoser : public ContextualImplicitConverter {
bool AllowScopedEnumerations;
public:
ICEConvertDiagnoser(bool AllowScopedEnumerations,
bool Suppress, bool SuppressConversion)
: ContextualImplicitConverter(Suppress, SuppressConversion),
AllowScopedEnumerations(AllowScopedEnumerations) {}
/// Match an integral or (possibly scoped) enumeration type.
bool match(QualType T) override;
SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override {
return diagnoseNotInt(S, Loc, T);
}
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0;
};
/// Perform a contextual implicit conversion.
ExprResult PerformContextualImplicitConversion(
SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter);
enum ObjCSubscriptKind {
OS_Array,
OS_Dictionary,
OS_Error
};
ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE);
// Note that LK_String is intentionally after the other literals, as
// this is used for diagnostics logic.
enum ObjCLiteralKind {
LK_Array,
LK_Dictionary,
LK_Numeric,
LK_Boxed,
LK_String,
LK_Block,
LK_None
};
ObjCLiteralKind CheckLiteralKind(Expr *FromE);
ExprResult PerformObjectMemberConversion(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
NamedDecl *Member);
// Members have to be NamespaceDecl* or TranslationUnitDecl*.
// TODO: make this is a typesafe union.
typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet;
typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet;
using ADLCallKind = CallExpr::ADLCallKind;
void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool AllowExplicit = true,
bool AllowExplicitConversion = false,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
ConversionSequenceList EarlyConversions = None,
OverloadCandidateParamOrder PO = {});
void AddFunctionCandidates(const UnresolvedSetImpl &Functions,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool FirstArgumentIsBase = false);
void AddMethodCandidate(DeclAccessPair FoundDecl,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversion = false,
OverloadCandidateParamOrder PO = {});
void AddMethodCandidate(CXXMethodDecl *Method,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
ConversionSequenceList EarlyConversions = None,
OverloadCandidateParamOrder PO = {});
void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
OverloadCandidateParamOrder PO = {});
void AddTemplateOverloadCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false,
bool PartialOverloading = false, bool AllowExplicit = true,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
OverloadCandidateParamOrder PO = {});
bool CheckNonDependentConversions(
FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes,
ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet,
ConversionSequenceList &Conversions, bool SuppressUserConversions,
CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(),
Expr::Classification ObjectClassification = {},
OverloadCandidateParamOrder PO = {});
void AddConversionCandidate(
CXXConversionDecl *Conversion, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddTemplateConversionCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddSurrogateCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
const FunctionProtoType *Proto,
Expr *Object, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddNonMemberOperatorCandidates(
const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
void AddMemberOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
OverloadCandidateParamOrder PO = {});
void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool IsAssignmentOperator = false,
unsigned NumContextualBoolArguments = 0);
void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddArgumentDependentLookupCandidates(DeclarationName Name,
SourceLocation Loc,
ArrayRef<Expr *> Args,
TemplateArgumentListInfo *ExplicitTemplateArgs,
OverloadCandidateSet& CandidateSet,
bool PartialOverloading = false);
// Emit as a 'note' the specific overload candidate
void NoteOverloadCandidate(
NamedDecl *Found, FunctionDecl *Fn,
OverloadCandidateRewriteKind RewriteKind = OverloadCandidateRewriteKind(),
QualType DestType = QualType(), bool TakingAddress = false);
// Emit as a series of 'note's all template and non-templates identified by
// the expression Expr
void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(),
bool TakingAddress = false);
/// Check the enable_if expressions on the given function. Returns the first
/// failing attribute, or NULL if they were all successful.
EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args,
bool MissingImplicitThis = false);
/// Find the failed Boolean condition within a given Boolean
/// constant expression, and describe it with a string.
std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// non-ArgDependent DiagnoseIfAttrs.
///
/// Argument-dependent diagnose_if attributes should be checked each time a
/// function is used as a direct callee of a function call.
///
/// Returns true if any errors were emitted.
bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function,
const Expr *ThisArg,
ArrayRef<const Expr *> Args,
SourceLocation Loc);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// ArgDependent DiagnoseIfAttrs.
///
/// Argument-independent diagnose_if attributes should be checked on every use
/// of a function.
///
/// Returns true if any errors were emitted.
bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND,
SourceLocation Loc);
/// Returns whether the given function's address can be taken or not,
/// optionally emitting a diagnostic if the address can't be taken.
///
/// Returns false if taking the address of the function is illegal.
bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function,
bool Complain = false,
SourceLocation Loc = SourceLocation());
// [PossiblyAFunctionType] --> [Return]
// NonFunctionType --> NonFunctionType
// R (A) --> R(A)
// R (*)(A) --> R (A)
// R (&)(A) --> R (A)
// R (S::*)(A) --> R (A)
QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType);
FunctionDecl *
ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr,
QualType TargetType,
bool Complain,
DeclAccessPair &Found,
bool *pHadMultipleCandidates = nullptr);
FunctionDecl *
resolveAddressOfOnlyViableOverloadCandidate(Expr *E,
DeclAccessPair &FoundResult);
bool resolveAndFixAddressOfOnlyViableOverloadCandidate(
ExprResult &SrcExpr, bool DoFunctionPointerConversion = false);
FunctionDecl *
ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
bool Complain = false,
DeclAccessPair *Found = nullptr);
bool ResolveAndFixSingleFunctionTemplateSpecialization(
ExprResult &SrcExpr,
bool DoFunctionPointerConverion = false,
bool Complain = false,
SourceRange OpRangeForComplaining = SourceRange(),
QualType DestTypeForComplaining = QualType(),
unsigned DiagIDForComplaining = 0);
Expr *FixOverloadedFunctionReference(Expr *E,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
ExprResult FixOverloadedFunctionReference(ExprResult,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool PartialOverloading = false);
// An enum used to represent the different possible results of building a
// range-based for loop.
enum ForRangeStatus {
FRS_Success,
FRS_NoViableFunction,
FRS_DiagnosticIssued
};
ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc,
SourceLocation RangeLoc,
const DeclarationNameInfo &NameInfo,
LookupResult &MemberLookup,
OverloadCandidateSet *CandidateSet,
Expr *Range, ExprResult *CallExpr);
ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn,
UnresolvedLookupExpr *ULE,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc,
Expr *ExecConfig,
bool AllowTypoCorrection=true,
bool CalleesAddressIsTaken=false);
bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE,
MultiExprArg Args, SourceLocation RParenLoc,
OverloadCandidateSet *CandidateSet,
ExprResult *Result);
ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc,
UnaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *input, bool RequiresADL = true);
void LookupOverloadedBinOp(OverloadCandidateSet &CandidateSet,
OverloadedOperatorKind Op,
const UnresolvedSetImpl &Fns,
ArrayRef<Expr *> Args, bool RequiresADL = true);
ExprResult CreateOverloadedBinOp(SourceLocation OpLoc,
BinaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
bool RequiresADL = true,
bool AllowRewrittenCandidates = true,
FunctionDecl *DefaultedFn = nullptr);
ExprResult BuildSynthesizedThreeWayComparison(SourceLocation OpLoc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
FunctionDecl *DefaultedFn);
ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
SourceLocation RLoc,
Expr *Base,Expr *Idx);
ExprResult
BuildCallToMemberFunction(Scope *S, Expr *MemExpr,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult
BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
bool *NoArrowOperatorFound = nullptr);
/// CheckCallReturnType - Checks that a call expression's return type is
/// complete. Returns true on failure. The location passed in is the location
/// that best represents the call.
bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc,
CallExpr *CE, FunctionDecl *FD);
/// Helpers for dealing with blocks and functions.
bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters,
bool CheckParameterNames);
void CheckCXXDefaultArguments(FunctionDecl *FD);
void CheckExtraCXXDefaultArguments(Declarator &D);
Scope *getNonFieldDeclScope(Scope *S);
/// \name Name lookup
///
/// These routines provide name lookup that is used during semantic
/// analysis to resolve the various kinds of names (identifiers,
/// overloaded operator names, constructor names, etc.) into zero or
/// more declarations within a particular scope. The major entry
/// points are LookupName, which performs unqualified name lookup,
/// and LookupQualifiedName, which performs qualified name lookup.
///
/// All name lookup is performed based on some specific criteria,
/// which specify what names will be visible to name lookup and how
/// far name lookup should work. These criteria are important both
/// for capturing language semantics (certain lookups will ignore
/// certain names, for example) and for performance, since name
/// lookup is often a bottleneck in the compilation of C++. Name
/// lookup criteria is specified via the LookupCriteria enumeration.
///
/// The results of name lookup can vary based on the kind of name
/// lookup performed, the current language, and the translation
/// unit. In C, for example, name lookup will either return nothing
/// (no entity found) or a single declaration. In C++, name lookup
/// can additionally refer to a set of overloaded functions or
/// result in an ambiguity. All of the possible results of name
/// lookup are captured by the LookupResult class, which provides
/// the ability to distinguish among them.
//@{
/// Describes the kind of name lookup to perform.
enum LookupNameKind {
/// Ordinary name lookup, which finds ordinary names (functions,
/// variables, typedefs, etc.) in C and most kinds of names
/// (functions, variables, members, types, etc.) in C++.
LookupOrdinaryName = 0,
/// Tag name lookup, which finds the names of enums, classes,
/// structs, and unions.
LookupTagName,
/// Label name lookup.
LookupLabel,
/// Member name lookup, which finds the names of
/// class/struct/union members.
LookupMemberName,
/// Look up of an operator name (e.g., operator+) for use with
/// operator overloading. This lookup is similar to ordinary name
/// lookup, but will ignore any declarations that are class members.
LookupOperatorName,
/// Look up of a name that precedes the '::' scope resolution
/// operator in C++. This lookup completely ignores operator, object,
/// function, and enumerator names (C++ [basic.lookup.qual]p1).
LookupNestedNameSpecifierName,
/// Look up a namespace name within a C++ using directive or
/// namespace alias definition, ignoring non-namespace names (C++
/// [basic.lookup.udir]p1).
LookupNamespaceName,
/// Look up all declarations in a scope with the given name,
/// including resolved using declarations. This is appropriate
/// for checking redeclarations for a using declaration.
LookupUsingDeclName,
/// Look up an ordinary name that is going to be redeclared as a
/// name with linkage. This lookup ignores any declarations that
/// are outside of the current scope unless they have linkage. See
/// C99 6.2.2p4-5 and C++ [basic.link]p6.
LookupRedeclarationWithLinkage,
/// Look up a friend of a local class. This lookup does not look
/// outside the innermost non-class scope. See C++11 [class.friend]p11.
LookupLocalFriendName,
/// Look up the name of an Objective-C protocol.
LookupObjCProtocolName,
/// Look up implicit 'self' parameter of an objective-c method.
LookupObjCImplicitSelfParam,
/// Look up the name of an OpenMP user-defined reduction operation.
LookupOMPReductionName,
/// Look up the name of an OpenMP user-defined mapper.
LookupOMPMapperName,
/// Look up any declaration with any name.
LookupAnyName
};
/// Specifies whether (or how) name lookup is being performed for a
/// redeclaration (vs. a reference).
enum RedeclarationKind {
/// The lookup is a reference to this name that is not for the
/// purpose of redeclaring the name.
NotForRedeclaration = 0,
/// The lookup results will be used for redeclaration of a name,
/// if an entity by that name already exists and is visible.
ForVisibleRedeclaration,
/// The lookup results will be used for redeclaration of a name
/// with external linkage; non-visible lookup results with external linkage
/// may also be found.
ForExternalRedeclaration
};
RedeclarationKind forRedeclarationInCurContext() {
// A declaration with an owning module for linkage can never link against
// anything that is not visible. We don't need to check linkage here; if
// the context has internal linkage, redeclaration lookup won't find things
// from other TUs, and we can't safely compute linkage yet in general.
if (cast<Decl>(CurContext)
->getOwningModuleForLinkage(/*IgnoreLinkage*/true))
return ForVisibleRedeclaration;
return ForExternalRedeclaration;
}
/// The possible outcomes of name lookup for a literal operator.
enum LiteralOperatorLookupResult {
/// The lookup resulted in an error.
LOLR_Error,
/// The lookup found no match but no diagnostic was issued.
LOLR_ErrorNoDiagnostic,
/// The lookup found a single 'cooked' literal operator, which
/// expects a normal literal to be built and passed to it.
LOLR_Cooked,
/// The lookup found a single 'raw' literal operator, which expects
/// a string literal containing the spelling of the literal token.
LOLR_Raw,
/// The lookup found an overload set of literal operator templates,
/// which expect the characters of the spelling of the literal token to be
/// passed as a non-type template argument pack.
LOLR_Template,
/// The lookup found an overload set of literal operator templates,
/// which expect the character type and characters of the spelling of the
/// string literal token to be passed as template arguments.
LOLR_StringTemplate
};
SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D,
CXXSpecialMember SM,
bool ConstArg,
bool VolatileArg,
bool RValueThis,
bool ConstThis,
bool VolatileThis);
typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator;
typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)>
TypoRecoveryCallback;
private:
bool CppLookupName(LookupResult &R, Scope *S);
struct TypoExprState {
std::unique_ptr<TypoCorrectionConsumer> Consumer;
TypoDiagnosticGenerator DiagHandler;
TypoRecoveryCallback RecoveryHandler;
TypoExprState();
TypoExprState(TypoExprState &&other) noexcept;
TypoExprState &operator=(TypoExprState &&other) noexcept;
};
/// The set of unhandled TypoExprs and their associated state.
llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos;
/// Creates a new TypoExpr AST node.
TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC);
// The set of known/encountered (unique, canonicalized) NamespaceDecls.
//
// The boolean value will be true to indicate that the namespace was loaded
// from an AST/PCH file, or false otherwise.
llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces;
/// Whether we have already loaded known namespaces from an extenal
/// source.
bool LoadedExternalKnownNamespaces;
/// Helper for CorrectTypo and CorrectTypoDelayed used to create and
/// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction
/// should be skipped entirely.
std::unique_ptr<TypoCorrectionConsumer>
makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
DeclContext *MemberContext, bool EnteringContext,
const ObjCObjectPointerType *OPT,
bool ErrorRecovery);
public:
const TypoExprState &getTypoExprState(TypoExpr *TE) const;
/// Clears the state of the given TypoExpr.
void clearDelayedTypo(TypoExpr *TE);
/// Look up a name, looking for a single declaration. Return
/// null if the results were absent, ambiguous, or overloaded.
///
/// It is preferable to use the elaborated form and explicitly handle
/// ambiguity and overloaded.
NamedDecl *LookupSingleName(Scope *S, DeclarationName Name,
SourceLocation Loc,
LookupNameKind NameKind,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupBuiltin(LookupResult &R);
bool LookupName(LookupResult &R, Scope *S,
bool AllowBuiltinCreation = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
bool InUnqualifiedLookup = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
CXXScopeSpec &SS);
bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
bool AllowBuiltinCreation = false,
bool EnteringContext = false);
ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class);
void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S,
QualType T1, QualType T2,
UnresolvedSetImpl &Functions);
LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc,
SourceLocation GnuLabelLoc = SourceLocation());
DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class);
CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class);
CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class);
bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id);
LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R,
ArrayRef<QualType> ArgTys,
bool AllowRaw,
bool AllowTemplate,
bool AllowStringTemplate,
bool DiagnoseMissing);
bool isKnownName(StringRef name);
/// Status of the function emission on the CUDA/HIP/OpenMP host/device attrs.
enum class FunctionEmissionStatus {
Emitted,
CUDADiscarded, // Discarded due to CUDA/HIP hostness
OMPDiscarded, // Discarded due to OpenMP hostness
TemplateDiscarded, // Discarded due to uninstantiated templates
Unknown,
};
FunctionEmissionStatus getEmissionStatus(FunctionDecl *Decl);
// Whether the callee should be ignored in CUDA/HIP/OpenMP host/device check.
bool shouldIgnoreInHostDeviceCheck(FunctionDecl *Callee);
void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc,
ArrayRef<Expr *> Args, ADLResult &Functions);
void LookupVisibleDecls(Scope *S, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool LoadExternal = true);
void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool IncludeDependentBases = false,
bool LoadExternal = true);
enum CorrectTypoKind {
CTK_NonError, // CorrectTypo used in a non error recovery situation.
CTK_ErrorRecovery // CorrectTypo used in normal error recovery.
};
TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind,
Scope *S, CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr,
bool RecordFailure = true);
TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr);
/// Process any TypoExprs in the given Expr and its children,
/// generating diagnostics as appropriate and returning a new Expr if there
/// were typos that were all successfully corrected and ExprError if one or
/// more typos could not be corrected.
///
/// \param E The Expr to check for TypoExprs.
///
/// \param InitDecl A VarDecl to avoid because the Expr being corrected is its
/// initializer.
///
/// \param Filter A function applied to a newly rebuilt Expr to determine if
/// it is an acceptable/usable result from a single combination of typo
/// corrections. As long as the filter returns ExprError, different
/// combinations of corrections will be tried until all are exhausted.
ExprResult
CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; });
ExprResult
CorrectDelayedTyposInExpr(Expr *E,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(E, nullptr, Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; }) {
return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(ER, nullptr, Filter);
}
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
bool ErrorRecovery = true);
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
const PartialDiagnostic &PrevNote,
bool ErrorRecovery = true);
void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F);
void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc,
ArrayRef<Expr *> Args,
AssociatedNamespaceSet &AssociatedNamespaces,
AssociatedClassSet &AssociatedClasses);
void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S,
bool ConsiderLinkage, bool AllowInlineNamespace);
bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old);
void DiagnoseAmbiguousLookup(LookupResult &Result);
//@}
ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id,
SourceLocation IdLoc,
bool TypoCorrection = false);
NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
Scope *S, bool ForRedeclaration,
SourceLocation Loc);
NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II,
Scope *S);
void AddKnownFunctionAttributes(FunctionDecl *FD);
// More parsing and symbol table subroutines.
void ProcessPragmaWeak(Scope *S, Decl *D);
// Decl attributes - this routine is the top level dispatcher.
void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD);
// Helper for delayed processing of attributes.
void ProcessDeclAttributeDelayed(Decl *D,
const ParsedAttributesView &AttrList);
void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL,
bool IncludeCXX11Attributes = true);
bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl,
const ParsedAttributesView &AttrList);
void checkUnusedDeclAttributes(Declarator &D);
/// Determine if type T is a valid subject for a nonnull and similar
/// attributes. By default, we look through references (the behavior used by
/// nonnull), but if the second parameter is true, then we treat a reference
/// type as valid.
bool isValidPointerAttrType(QualType T, bool RefOkay = false);
bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value);
bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC,
const FunctionDecl *FD = nullptr);
bool CheckAttrTarget(const ParsedAttr &CurrAttr);
bool CheckAttrNoArgs(const ParsedAttr &CurrAttr);
bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum,
StringRef &Str,
SourceLocation *ArgLocation = nullptr);
bool checkSectionName(SourceLocation LiteralLoc, StringRef Str);
bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str);
bool checkMSInheritanceAttrOnDefinition(
CXXRecordDecl *RD, SourceRange Range, bool BestCase,
MSInheritanceModel SemanticSpelling);
void CheckAlignasUnderalignment(Decl *D);
/// Adjust the calling convention of a method to be the ABI default if it
/// wasn't specified explicitly. This handles method types formed from
/// function type typedefs and typename template arguments.
void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor,
SourceLocation Loc);
// Check if there is an explicit attribute, but only look through parens.
// The intent is to look for an attribute on the current declarator, but not
// one that came from a typedef.
bool hasExplicitCallingConv(QualType T);
/// Get the outermost AttributedType node that sets a calling convention.
/// Valid types should not have multiple attributes with different CCs.
const AttributedType *getCallingConvAttributedType(QualType T) const;
/// Stmt attributes - this routine is the top level dispatcher.
StmtResult ProcessStmtAttributes(Stmt *Stmt,
const ParsedAttributesView &Attrs,
SourceRange Range);
void WarnConflictingTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
void CheckConflictingOverridingMethod(ObjCMethodDecl *Method,
ObjCMethodDecl *Overridden,
bool IsProtocolMethodDecl);
/// WarnExactTypedMethods - This routine issues a warning if method
/// implementation declaration matches exactly that of its declaration.
void WarnExactTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
typedef llvm::SmallPtrSet<Selector, 8> SelectorSet;
/// CheckImplementationIvars - This routine checks if the instance variables
/// listed in the implelementation match those listed in the interface.
void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
ObjCIvarDecl **Fields, unsigned nIvars,
SourceLocation Loc);
/// ImplMethodsVsClassMethods - This is main routine to warn if any method
/// remains unimplemented in the class or category \@implementation.
void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool IncompleteImpl = false);
/// DiagnoseUnimplementedProperties - This routine warns on those properties
/// which must be implemented by this implementation.
void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl *CDecl,
bool SynthesizeProperties);
/// Diagnose any null-resettable synthesized setters.
void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl);
/// DefaultSynthesizeProperties - This routine default synthesizes all
/// properties which must be synthesized in the class's \@implementation.
void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl,
ObjCInterfaceDecl *IDecl,
SourceLocation AtEnd);
void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd);
/// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is
/// an ivar synthesized for 'Method' and 'Method' is a property accessor
/// declared in class 'IFace'.
bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace,
ObjCMethodDecl *Method, ObjCIvarDecl *IV);
/// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which
/// backs the property is not used in the property's accessor.
void DiagnoseUnusedBackingIvarInAccessor(Scope *S,
const ObjCImplementationDecl *ImplD);
/// GetIvarBackingPropertyAccessor - If method is a property setter/getter and
/// it property has a backing ivar, returns this ivar; otherwise, returns NULL.
/// It also returns ivar's property on success.
ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method,
const ObjCPropertyDecl *&PDecl) const;
/// Called by ActOnProperty to handle \@property declarations in
/// class extensions.
ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
unsigned &Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind);
/// Called by ActOnProperty and HandlePropertyInClassExtension to
/// handle creating the ObjcPropertyDecl for a category or \@interface.
ObjCPropertyDecl *CreatePropertyDecl(Scope *S,
ObjCContainerDecl *CDecl,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
const unsigned Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
/// AtomicPropertySetterGetterRules - This routine enforces the rule (via
/// warning) when atomic property has one but not the other user-declared
/// setter or getter.
void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl,
ObjCInterfaceDecl* IDecl);
void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D);
void DiagnoseMissingDesignatedInitOverrides(
const ObjCImplementationDecl *ImplD,
const ObjCInterfaceDecl *IFD);
void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID);
enum MethodMatchStrategy {
MMS_loose,
MMS_strict
};
/// MatchTwoMethodDeclarations - Checks if two methods' type match and returns
/// true, or false, accordingly.
bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method,
const ObjCMethodDecl *PrevMethod,
MethodMatchStrategy strategy = MMS_strict);
/// MatchAllMethodDeclarations - Check methods declaraed in interface or
/// or protocol against those declared in their implementations.
void MatchAllMethodDeclarations(const SelectorSet &InsMap,
const SelectorSet &ClsMap,
SelectorSet &InsMapSeen,
SelectorSet &ClsMapSeen,
ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool &IncompleteImpl,
bool ImmediateClass,
bool WarnCategoryMethodImpl=false);
/// CheckCategoryVsClassMethodMatches - Checks that methods implemented in
/// category matches with those implemented in its primary class and
/// warns each time an exact match is found.
void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP);
/// Add the given method to the list of globally-known methods.
void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method);
/// Returns default addr space for method qualifiers.
LangAS getDefaultCXXMethodAddrSpace() const;
private:
/// AddMethodToGlobalPool - Add an instance or factory method to the global
/// pool. See descriptoin of AddInstanceMethodToGlobalPool.
void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance);
/// LookupMethodInGlobalPool - Returns the instance or factory method and
/// optionally warns if there are multiple signatures.
ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass,
bool instance);
public:
/// - Returns instance or factory methods in global method pool for
/// given selector. It checks the desired kind first, if none is found, and
/// parameter checkTheOther is set, it then checks the other kind. If no such
/// method or only one method is found, function returns false; otherwise, it
/// returns true.
bool
CollectMultipleMethodsInGlobalPool(Selector Sel,
SmallVectorImpl<ObjCMethodDecl*>& Methods,
bool InstanceFirst, bool CheckTheOther,
const ObjCObjectType *TypeBound = nullptr);
bool
AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod,
SourceRange R, bool receiverIdOrClass,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
void
DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods,
Selector Sel, SourceRange R,
bool receiverIdOrClass);
private:
/// - Returns a selector which best matches given argument list or
/// nullptr if none could be found
ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args,
bool IsInstance,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
/// Record the typo correction failure and return an empty correction.
TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc,
bool RecordFailure = true) {
if (RecordFailure)
TypoCorrectionFailures[Typo].insert(TypoLoc);
return TypoCorrection();
}
public:
/// AddInstanceMethodToGlobalPool - All instance methods in a translation
/// unit are added to a global pool. This allows us to efficiently associate
/// a selector with a method declaraation for purposes of typechecking
/// messages sent to "id" (where the class of the object is unknown).
void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/true);
}
/// AddFactoryMethodToGlobalPool - Same as above, but for factory methods.
void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/false);
}
/// AddAnyMethodToGlobalPool - Add any method, instance or factory to global
/// pool.
void AddAnyMethodToGlobalPool(Decl *D);
/// LookupInstanceMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/true);
}
/// LookupFactoryMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/false);
}
const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel,
QualType ObjectType=QualType());
/// LookupImplementedMethodInGlobalPool - Returns the method which has an
/// implementation.
ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel);
/// CollectIvarsToConstructOrDestruct - Collect those ivars which require
/// initialization.
void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI,
SmallVectorImpl<ObjCIvarDecl*> &Ivars);
//===--------------------------------------------------------------------===//
// Statement Parsing Callbacks: SemaStmt.cpp.
public:
class FullExprArg {
public:
FullExprArg() : E(nullptr) { }
FullExprArg(Sema &actions) : E(nullptr) { }
ExprResult release() {
return E;
}
Expr *get() const { return E; }
Expr *operator->() {
return E;
}
private:
// FIXME: No need to make the entire Sema class a friend when it's just
// Sema::MakeFullExpr that needs access to the constructor below.
friend class Sema;
explicit FullExprArg(Expr *expr) : E(expr) {}
Expr *E;
};
FullExprArg MakeFullExpr(Expr *Arg) {
return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation());
}
FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) {
return FullExprArg(
ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get());
}
FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) {
ExprResult FE =
ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(),
/*DiscardedValue*/ true);
return FullExprArg(FE.get());
}
StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true);
StmtResult ActOnExprStmtError();
StmtResult ActOnNullStmt(SourceLocation SemiLoc,
bool HasLeadingEmptyMacro = false);
void ActOnStartOfCompoundStmt(bool IsStmtExpr);
void ActOnFinishOfCompoundStmt();
StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R,
ArrayRef<Stmt *> Elts, bool isStmtExpr);
/// A RAII object to enter scope of a compound statement.
class CompoundScopeRAII {
public:
CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) {
S.ActOnStartOfCompoundStmt(IsStmtExpr);
}
~CompoundScopeRAII() {
S.ActOnFinishOfCompoundStmt();
}
private:
Sema &S;
};
/// An RAII helper that pops function a function scope on exit.
struct FunctionScopeRAII {
Sema &S;
bool Active;
FunctionScopeRAII(Sema &S) : S(S), Active(true) {}
~FunctionScopeRAII() {
if (Active)
S.PopFunctionScopeInfo();
}
void disable() { Active = false; }
};
StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ActOnForEachDeclStmt(DeclGroupPtrTy Decl);
StmtResult ActOnForEachLValueExpr(Expr *E);
ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val);
StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS,
SourceLocation DotDotDotLoc, ExprResult RHS,
SourceLocation ColonLoc);
void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt);
StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc,
SourceLocation ColonLoc,
Stmt *SubStmt, Scope *CurScope);
StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
SourceLocation ColonLoc, Stmt *SubStmt);
StmtResult ActOnAttributedStmt(SourceLocation AttrLoc,
ArrayRef<const Attr*> Attrs,
Stmt *SubStmt);
bool CheckRebuiltAttributedStmtAttributes(ArrayRef<const Attr *> Attrs);
class ConditionResult;
StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr,
Stmt *InitStmt,
ConditionResult Cond, Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr,
Stmt *InitStmt,
ConditionResult Cond, Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc,
Stmt *InitStmt,
ConditionResult Cond);
StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc,
Stmt *Switch, Stmt *Body);
StmtResult ActOnWhileStmt(SourceLocation WhileLoc, ConditionResult Cond,
Stmt *Body);
StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body,
SourceLocation WhileLoc, SourceLocation CondLParen,
Expr *Cond, SourceLocation CondRParen);
StmtResult ActOnForStmt(SourceLocation ForLoc,
SourceLocation LParenLoc,
Stmt *First,
ConditionResult Second,
FullExprArg Third,
SourceLocation RParenLoc,
Stmt *Body);
ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc,
Expr *collection);
StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc,
Stmt *First, Expr *collection,
SourceLocation RParenLoc);
StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body);
enum BuildForRangeKind {
/// Initial building of a for-range statement.
BFRK_Build,
/// Instantiation or recovery rebuild of a for-range statement. Don't
/// attempt any typo-correction.
BFRK_Rebuild,
/// Determining whether a for-range statement could be built. Avoid any
/// unnecessary or irreversible actions.
BFRK_Check
};
StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
Stmt *LoopVar,
SourceLocation ColonLoc, Expr *Collection,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
SourceLocation ColonLoc,
Stmt *RangeDecl, Stmt *Begin, Stmt *End,
Expr *Cond, Expr *Inc,
Stmt *LoopVarDecl,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body);
StmtResult ActOnGotoStmt(SourceLocation GotoLoc,
SourceLocation LabelLoc,
LabelDecl *TheDecl);
StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc,
SourceLocation StarLoc,
Expr *DestExp);
StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope);
StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope);
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind, unsigned NumParams);
typedef std::pair<StringRef, QualType> CapturedParamNameType;
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind,
ArrayRef<CapturedParamNameType> Params,
unsigned OpenMPCaptureLevel = 0);
StmtResult ActOnCapturedRegionEnd(Stmt *S);
void ActOnCapturedRegionError();
RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD,
SourceLocation Loc,
unsigned NumParams);
enum CopyElisionSemanticsKind {
CES_Strict = 0,
CES_AllowParameters = 1,
CES_AllowDifferentTypes = 2,
CES_AllowExceptionVariables = 4,
CES_FormerDefault = (CES_AllowParameters),
CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes),
CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes |
CES_AllowExceptionVariables),
};
VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E,
CopyElisionSemanticsKind CESK);
bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD,
CopyElisionSemanticsKind CESK);
StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
Scope *CurScope);
StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
bool IsVolatile, unsigned NumOutputs,
unsigned NumInputs, IdentifierInfo **Names,
MultiExprArg Constraints, MultiExprArg Exprs,
Expr *AsmString, MultiExprArg Clobbers,
unsigned NumLabels,
SourceLocation RParenLoc);
void FillInlineAsmIdentifierInfo(Expr *Res,
llvm::InlineAsmIdentifierInfo &Info);
ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Id,
bool IsUnevaluatedContext);
bool LookupInlineAsmField(StringRef Base, StringRef Member,
unsigned &Offset, SourceLocation AsmLoc);
ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member,
SourceLocation AsmLoc);
StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc,
ArrayRef<Token> AsmToks,
StringRef AsmString,
unsigned NumOutputs, unsigned NumInputs,
ArrayRef<StringRef> Constraints,
ArrayRef<StringRef> Clobbers,
ArrayRef<Expr*> Exprs,
SourceLocation EndLoc);
LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName,
SourceLocation Location,
bool AlwaysCreate);
VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType,
SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
bool Invalid = false);
Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D);
StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen,
Decl *Parm, Stmt *Body);
StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body);
StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try,
MultiStmtArg Catch, Stmt *Finally);
StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw);
StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw,
Scope *CurScope);
ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc,
Expr *operand);
StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc,
Expr *SynchExpr,
Stmt *SynchBody);
StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body);
VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo,
SourceLocation StartLoc,
SourceLocation IdLoc,
IdentifierInfo *Id);
Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D);
StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc,
Decl *ExDecl, Stmt *HandlerBlock);
StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
ArrayRef<Stmt *> Handlers);
StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ?
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
StmtResult ActOnSEHExceptBlock(SourceLocation Loc,
Expr *FilterExpr,
Stmt *Block);
void ActOnStartSEHFinallyBlock();
void ActOnAbortSEHFinallyBlock();
StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block);
StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope);
void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock);
bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const;
/// If it's a file scoped decl that must warn if not used, keep track
/// of it.
void MarkUnusedFileScopedDecl(const DeclaratorDecl *D);
/// DiagnoseUnusedExprResult - If the statement passed in is an expression
/// whose result is unused, warn.
void DiagnoseUnusedExprResult(const Stmt *S);
void DiagnoseUnusedNestedTypedefs(const RecordDecl *D);
void DiagnoseUnusedDecl(const NamedDecl *ND);
/// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null
/// statement as a \p Body, and it is located on the same line.
///
/// This helps prevent bugs due to typos, such as:
/// if (condition);
/// do_stuff();
void DiagnoseEmptyStmtBody(SourceLocation StmtLoc,
const Stmt *Body,
unsigned DiagID);
/// Warn if a for/while loop statement \p S, which is followed by
/// \p PossibleBody, has a suspicious null statement as a body.
void DiagnoseEmptyLoopBody(const Stmt *S,
const Stmt *PossibleBody);
/// Warn if a value is moved to itself.
void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr,
SourceLocation OpLoc);
/// Warn if we're implicitly casting from a _Nullable pointer type to a
/// _Nonnull one.
void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType,
SourceLocation Loc);
/// Warn when implicitly casting 0 to nullptr.
void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E);
ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) {
return DelayedDiagnostics.push(pool);
}
void PopParsingDeclaration(ParsingDeclState state, Decl *decl);
typedef ProcessingContextState ParsingClassState;
ParsingClassState PushParsingClass() {
ParsingClassDepth++;
return DelayedDiagnostics.pushUndelayed();
}
void PopParsingClass(ParsingClassState state) {
ParsingClassDepth--;
DelayedDiagnostics.popUndelayed(state);
}
void redelayDiagnostics(sema::DelayedDiagnosticPool &pool);
void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass,
bool ObjCPropertyAccess,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReceiver = nullptr);
bool makeUnavailableInSystemHeader(SourceLocation loc,
UnavailableAttr::ImplicitReason reason);
/// Issue any -Wunguarded-availability warnings in \c FD
void DiagnoseUnguardedAvailabilityViolations(Decl *FD);
//===--------------------------------------------------------------------===//
// Expression Parsing Callbacks: SemaExpr.cpp.
bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid);
bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass = nullptr,
bool ObjCPropertyAccess = false,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReciever = nullptr);
void NoteDeletedFunction(FunctionDecl *FD);
void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD);
bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD,
ObjCMethodDecl *Getter,
SourceLocation Loc);
void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
ArrayRef<Expr *> Args);
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl };
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
void PopExpressionEvaluationContext();
void DiscardCleanupsInEvaluationContext();
ExprResult TransformToPotentiallyEvaluated(Expr *E);
ExprResult HandleExprEvaluationContextForTypeof(Expr *E);
ExprResult CheckUnevaluatedOperand(Expr *E);
void CheckUnusedVolatileAssignment(Expr *E);
ExprResult ActOnConstantExpression(ExprResult Res);
// Functions for marking a declaration referenced. These functions also
// contain the relevant logic for marking if a reference to a function or
// variable is an odr-use (in the C++11 sense). There are separate variants
// for expressions referring to a decl; these exist because odr-use marking
// needs to be delayed for some constant variables when we build one of the
// named expressions.
//
// MightBeOdrUse indicates whether the use could possibly be an odr-use, and
// should usually be true. This only needs to be set to false if the lack of
// odr-use cannot be determined from the current context (for instance,
// because the name denotes a virtual function and was written without an
// explicit nested-name-specifier).
void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse);
void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
bool MightBeOdrUse = true);
void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var);
void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr);
void MarkMemberReferenced(MemberExpr *E);
void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E);
void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc,
unsigned CapturingScopeIndex);
ExprResult CheckLValueToRValueConversionOperand(Expr *E);
void CleanupVarDeclMarking();
enum TryCaptureKind {
TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef
};
/// Try to capture the given variable.
///
/// \param Var The variable to capture.
///
/// \param Loc The location at which the capture occurs.
///
/// \param Kind The kind of capture, which may be implicit (for either a
/// block or a lambda), or explicit by-value or by-reference (for a lambda).
///
/// \param EllipsisLoc The location of the ellipsis, if one is provided in
/// an explicit lambda capture.
///
/// \param BuildAndDiagnose Whether we are actually supposed to add the
/// captures or diagnose errors. If false, this routine merely check whether
/// the capture can occur without performing the capture itself or complaining
/// if the variable cannot be captured.
///
/// \param CaptureType Will be set to the type of the field used to capture
/// this variable in the innermost block or lambda. Only valid when the
/// variable can be captured.
///
/// \param DeclRefType Will be set to the type of a reference to the capture
/// from within the current scope. Only valid when the variable can be
/// captured.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// variables that may or may not be used in certain specializations of
/// a nested generic lambda.
///
/// \returns true if an error occurred (i.e., the variable cannot be
/// captured) and false if the capture succeeded.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind,
SourceLocation EllipsisLoc, bool BuildAndDiagnose,
QualType &CaptureType,
QualType &DeclRefType,
const unsigned *const FunctionScopeIndexToStopAt);
/// Try to capture the given variable.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc,
TryCaptureKind Kind = TryCapture_Implicit,
SourceLocation EllipsisLoc = SourceLocation());
/// Checks if the variable must be captured.
bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc);
/// Given a variable, determine the type that a reference to that
/// variable will have in the given scope.
QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc);
/// Mark all of the declarations referenced within a particular AST node as
/// referenced. Used when template instantiation instantiates a non-dependent
/// type -- entities referenced by the type are now referenced.
void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T);
void MarkDeclarationsReferencedInExpr(Expr *E,
bool SkipLocalVariables = false);
/// Try to recover by turning the given expression into a
/// call. Returns true if recovery was attempted or an error was
/// emitted; this may also leave the ExprResult invalid.
bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
bool ForceComplain = false,
bool (*IsPlausibleResult)(QualType) = nullptr);
/// Figure out if an expression could be turned into a call.
bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
UnresolvedSetImpl &NonTemplateOverloads);
/// Conditionally issue a diagnostic based on the current
/// evaluation context.
///
/// \param Statement If Statement is non-null, delay reporting the
/// diagnostic until the function body is parsed, and then do a basic
/// reachability analysis to determine if the statement is reachable.
/// If it is unreachable, the diagnostic will not be emitted.
bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement,
const PartialDiagnostic &PD);
/// Similar, but diagnostic is only produced if all the specified statements
/// are reachable.
bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts,
const PartialDiagnostic &PD);
// Primary Expressions.
SourceRange getExprRange(Expr *E) const;
ExprResult ActOnIdExpression(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand,
CorrectionCandidateCallback *CCC = nullptr,
bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr);
void DecomposeUnqualifiedId(const UnqualifiedId &Id,
TemplateArgumentListInfo &Buffer,
DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *&TemplateArgs);
bool
DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
CorrectionCandidateCallback &CCC,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr);
DeclResult LookupIvarInObjCMethod(LookupResult &Lookup, Scope *S,
IdentifierInfo *II);
ExprResult BuildIvarRefExpr(Scope *S, SourceLocation Loc, ObjCIvarDecl *IV);
ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S,
IdentifierInfo *II,
bool AllowBuiltinCreation=false);
ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
bool isAddressOfOperand,
const TemplateArgumentListInfo *TemplateArgs);
/// If \p D cannot be odr-used in the current expression evaluation context,
/// return a reason explaining why. Otherwise, return NOUR_None.
NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D);
DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
SourceLocation Loc,
const CXXScopeSpec *SS = nullptr);
DeclRefExpr *
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
const CXXScopeSpec *SS = nullptr,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
DeclRefExpr *
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
NestedNameSpecifierLoc NNS,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
ExprResult
BuildAnonymousStructUnionMemberReference(
const CXXScopeSpec &SS,
SourceLocation nameLoc,
IndirectFieldDecl *indirectField,
DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none),
Expr *baseObjectExpr = nullptr,
SourceLocation opLoc = SourceLocation());
ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S);
ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
bool IsDefiniteInstance,
const Scope *S);
bool UseArgumentDependentLookup(const CXXScopeSpec &SS,
const LookupResult &R,
bool HasTrailingLParen);
ExprResult
BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
bool IsAddressOfOperand, const Scope *S,
TypeSourceInfo **RecoveryTSI = nullptr);
ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS,
LookupResult &R,
bool NeedsADL,
bool AcceptInvalidDecl = false);
ExprResult BuildDeclarationNameExpr(
const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr,
bool AcceptInvalidDecl = false);
ExprResult BuildLiteralOperatorCall(LookupResult &R,
DeclarationNameInfo &SuffixInfo,
ArrayRef<Expr *> Args,
SourceLocation LitEndLoc,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
ExprResult BuildPredefinedExpr(SourceLocation Loc,
PredefinedExpr::IdentKind IK);
ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind);
ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val);
ExprResult BuildUniqueStableName(SourceLocation OpLoc,
TypeSourceInfo *Operand);
ExprResult BuildUniqueStableName(SourceLocation OpLoc, Expr *E);
ExprResult ActOnUniqueStableNameExpr(SourceLocation OpLoc, SourceLocation L,
SourceLocation R, ParsedType Ty);
ExprResult ActOnUniqueStableNameExpr(SourceLocation OpLoc, SourceLocation L,
SourceLocation R, Expr *Operand);
bool CheckLoopHintExpr(Expr *E, SourceLocation Loc);
ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr);
ExprResult ActOnCharacterConstant(const Token &Tok,
Scope *UDLScope = nullptr);
ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E);
ExprResult ActOnParenListExpr(SourceLocation L,
SourceLocation R,
MultiExprArg Val);
/// ActOnStringLiteral - The specified tokens were lexed as pasted string
/// fragments (e.g. "foo" "bar" L"baz").
ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks,
Scope *UDLScope = nullptr);
ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<ParsedType> ArgTypes,
ArrayRef<Expr *> ArgExprs);
ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<TypeSourceInfo *> Types,
ArrayRef<Expr *> Exprs);
// Binary/Unary Operators. 'Tok' is the token for the operator.
ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
Expr *InputExpr);
ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opc, Expr *Input);
ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Op, Expr *Input);
bool isQualifiedMemberAccess(Expr *E);
QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc);
ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
SourceRange R);
ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind);
ExprResult
ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
bool IsType, void *TyOrEx,
SourceRange ArgRange);
ExprResult CheckPlaceholderExpr(Expr *E);
bool CheckVecStepExpr(Expr *E);
bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind);
bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc,
SourceRange ExprRange,
UnaryExprOrTypeTrait ExprKind);
ExprResult ActOnSizeofParameterPackExpr(Scope *S,
SourceLocation OpLoc,
IdentifierInfo &Name,
SourceLocation NameLoc,
SourceLocation RParenLoc);
ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Kind, Expr *Input);
ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
Expr *LowerBound, SourceLocation ColonLoc,
Expr *Length, SourceLocation RBLoc);
// This struct is for use by ActOnMemberAccess to allow
// BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after
// changing the access operator from a '.' to a '->' (to see if that is the
// change needed to fix an error about an unknown member, e.g. when the class
// defines a custom operator->).
struct ActOnMemberAccessExtraArgs {
Scope *S;
UnqualifiedId &Id;
Decl *ObjCImpDecl;
};
ExprResult BuildMemberReferenceExpr(
Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow,
CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult
BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc,
bool IsArrow, const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
bool SuppressQualifierCheck = false,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow,
SourceLocation OpLoc,
const CXXScopeSpec &SS, FieldDecl *Field,
DeclAccessPair FoundDecl,
const DeclarationNameInfo &MemberNameInfo);
ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow);
bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType,
const CXXScopeSpec &SS,
const LookupResult &R);
ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType,
bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Member,
Decl *ObjCImpDecl);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec *SS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
void ActOnDefaultCtorInitializers(Decl *CDtorDecl);
bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
FunctionDecl *FDecl,
const FunctionProtoType *Proto,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
bool ExecConfig = false);
void CheckStaticArrayArgument(SourceLocation CallLoc,
ParmVarDecl *Param,
const Expr *ArgExpr);
/// ActOnCallExpr - Handle a call to Fn with the specified array of arguments.
/// This provides the location of the left/right parens and a list of comma
/// locations.
ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr);
ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr,
bool IsExecConfig = false);
enum class AtomicArgumentOrder { API, AST };
ExprResult
BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
SourceLocation RParenLoc, MultiExprArg Args,
AtomicExpr::AtomicOp Op,
AtomicArgumentOrder ArgOrder = AtomicArgumentOrder::API);
ExprResult
BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc,
ArrayRef<Expr *> Arg, SourceLocation RParenLoc,
Expr *Config = nullptr, bool IsExecConfig = false,
ADLCallKind UsesADL = ADLCallKind::NotADL);
ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
MultiExprArg ExecConfig,
SourceLocation GGGLoc);
ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
Declarator &D, ParsedType &Ty,
SourceLocation RParenLoc, Expr *CastExpr);
ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc,
TypeSourceInfo *Ty,
SourceLocation RParenLoc,
Expr *Op);
CastKind PrepareScalarCast(ExprResult &src, QualType destType);
/// Build an altivec or OpenCL literal.
ExprResult BuildVectorLiteral(SourceLocation LParenLoc,
SourceLocation RParenLoc, Expr *E,
TypeSourceInfo *TInfo);
ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME);
ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc,
Expr *InitExpr);
ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc,
TypeSourceInfo *TInfo,
SourceLocation RParenLoc,
Expr *LiteralExpr);
ExprResult ActOnInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult BuildInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult ActOnDesignatedInitializer(Designation &Desig,
SourceLocation EqualOrColonLoc,
bool GNUSyntax,
ExprResult Init);
private:
static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind);
public:
ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc,
tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr);
ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr);
ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc,
Expr *LHSExpr, Expr *RHSExpr);
void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc);
/// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null
/// in the case of a the GNU conditional expr extension.
ExprResult ActOnConditionalOp(SourceLocation QuestionLoc,
SourceLocation ColonLoc,
Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr);
/// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo".
ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc,
LabelDecl *TheDecl);
void ActOnStartStmtExpr();
ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc); // "({..})"
// Handle the final expression in a statement expression.
ExprResult ActOnStmtExprResult(ExprResult E);
void ActOnStmtExprError();
// __builtin_offsetof(type, identifier(.identifier|[expr])*)
struct OffsetOfComponent {
SourceLocation LocStart, LocEnd;
bool isBrackets; // true if [expr], false if .ident
union {
IdentifierInfo *IdentInfo;
Expr *E;
} U;
};
/// __builtin_offsetof(type, a.b[123][456].c)
ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
TypeSourceInfo *TInfo,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
ExprResult ActOnBuiltinOffsetOf(Scope *S,
SourceLocation BuiltinLoc,
SourceLocation TypeLoc,
ParsedType ParsedArgTy,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
// __builtin_choose_expr(constExpr, expr1, expr2)
ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc,
Expr *CondExpr, Expr *LHSExpr,
Expr *RHSExpr, SourceLocation RPLoc);
// __builtin_va_arg(expr, type)
ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty,
SourceLocation RPLoc);
ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E,
TypeSourceInfo *TInfo, SourceLocation RPLoc);
// __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(),
// __builtin_COLUMN()
ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc,
SourceLocation RPLoc);
// Build a potentially resolved SourceLocExpr.
ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc, SourceLocation RPLoc,
DeclContext *ParentContext);
// __null
ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc);
bool CheckCaseExpression(Expr *E);
/// Describes the result of an "if-exists" condition check.
enum IfExistsResult {
/// The symbol exists.
IER_Exists,
/// The symbol does not exist.
IER_DoesNotExist,
/// The name is a dependent name, so the results will differ
/// from one instantiation to the next.
IER_Dependent,
/// An error occurred.
IER_Error
};
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS,
const DeclarationNameInfo &TargetNameInfo);
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc,
bool IsIfExists, CXXScopeSpec &SS,
UnqualifiedId &Name);
StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
NestedNameSpecifierLoc QualifierLoc,
DeclarationNameInfo NameInfo,
Stmt *Nested);
StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
CXXScopeSpec &SS, UnqualifiedId &Name,
Stmt *Nested);
//===------------------------- "Block" Extension ------------------------===//
/// ActOnBlockStart - This callback is invoked when a block literal is
/// started.
void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockArguments - This callback allows processing of block arguments.
/// If there are no arguments, this is still invoked.
void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
Scope *CurScope);
/// ActOnBlockError - If there is an error parsing a block, this callback
/// is invoked to pop the information about the block from the action impl.
void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockStmtExpr - This is called when the body of a block statement
/// literal was successfully completed. ^(int x){...}
ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body,
Scope *CurScope);
//===---------------------------- Clang Extensions ----------------------===//
/// __builtin_convertvector(...)
ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- OpenCL Features -----------------------===//
/// __builtin_astype(...)
ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- C++ Features --------------------------===//
// Act on C++ namespaces
Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc,
SourceLocation NamespaceLoc,
SourceLocation IdentLoc, IdentifierInfo *Ident,
SourceLocation LBrace,
const ParsedAttributesView &AttrList,
UsingDirectiveDecl *&UsingDecl);
void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace);
NamespaceDecl *getStdNamespace() const;
NamespaceDecl *getOrCreateStdNamespace();
NamespaceDecl *lookupStdExperimentalNamespace();
CXXRecordDecl *getStdBadAlloc() const;
EnumDecl *getStdAlignValT() const;
private:
// A cache representing if we've fully checked the various comparison category
// types stored in ASTContext. The bit-index corresponds to the integer value
// of a ComparisonCategoryType enumerator.
llvm::SmallBitVector FullyCheckedComparisonCategories;
ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl,
CXXScopeSpec &SS,
ParsedType TemplateTypeTy,
IdentifierInfo *MemberOrBase);
public:
/// Lookup the specified comparison category types in the standard
/// library, an check the VarDecls possibly returned by the operator<=>
/// builtins for that type.
///
/// \return The type of the comparison category type corresponding to the
/// specified Kind, or a null type if an error occurs
QualType CheckComparisonCategoryType(ComparisonCategoryType Kind,
SourceLocation Loc);
/// Tests whether Ty is an instance of std::initializer_list and, if
/// it is and Element is not NULL, assigns the element type to Element.
bool isStdInitializerList(QualType Ty, QualType *Element);
/// Looks for the std::initializer_list template and instantiates it
/// with Element, or emits an error if it's not found.
///
/// \returns The instantiated template, or null on error.
QualType BuildStdInitializerList(QualType Element, SourceLocation Loc);
/// Determine whether Ctor is an initializer-list constructor, as
/// defined in [dcl.init.list]p2.
bool isInitListConstructor(const FunctionDecl *Ctor);
Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc,
SourceLocation NamespcLoc, CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *NamespcName,
const ParsedAttributesView &AttrList);
void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir);
Decl *ActOnNamespaceAliasDef(Scope *CurScope,
SourceLocation NamespaceLoc,
SourceLocation AliasLoc,
IdentifierInfo *Alias,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *Ident);
void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow);
bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target,
const LookupResult &PreviousDecls,
UsingShadowDecl *&PrevShadow);
UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD,
NamedDecl *Target,
UsingShadowDecl *PrevDecl);
bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
bool HasTypenameKeyword,
const CXXScopeSpec &SS,
SourceLocation NameLoc,
const LookupResult &Previous);
bool CheckUsingDeclQualifier(SourceLocation UsingLoc,
bool HasTypename,
const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
SourceLocation NameLoc);
NamedDecl *BuildUsingDeclaration(
Scope *S, AccessSpecifier AS, SourceLocation UsingLoc,
bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS,
DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList, bool IsInstantiation);
NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom,
ArrayRef<NamedDecl *> Expansions);
bool CheckInheritingConstructorUsingDecl(UsingDecl *UD);
/// Given a derived-class using shadow declaration for a constructor and the
/// correspnding base class constructor, find or create the implicit
/// synthesized derived class constructor to use for this initialization.
CXXConstructorDecl *
findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor,
ConstructorUsingShadowDecl *DerivedShadow);
Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS,
SourceLocation UsingLoc,
SourceLocation TypenameLoc, CXXScopeSpec &SS,
UnqualifiedId &Name, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS,
MultiTemplateParamsArg TemplateParams,
SourceLocation UsingLoc, UnqualifiedId &Name,
const ParsedAttributesView &AttrList,
TypeResult Type, Decl *DeclFromDeclSpec);
/// BuildCXXConstructExpr - Creates a complete call to a constructor,
/// including handling of its default argument expressions.
///
/// \param ConstructKind - a CXXConstructExpr::ConstructionKind
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
/// Build a CXXConstructExpr whose constructor has already been resolved if
/// it denotes an inherited constructor.
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
// FIXME: Can we remove this and have the above BuildCXXConstructExpr check if
// the constructor can be elidable?
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs, bool HadMultipleCandidates,
bool IsListInitialization,
bool IsStdInitListInitialization, bool RequiresZeroInit,
unsigned ConstructKind, SourceRange ParenRange);
ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field);
/// Instantiate or parse a C++ default argument expression as necessary.
/// Return true on error.
bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD,
ParmVarDecl *Param);
/// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating
/// the default expr if needed.
ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc,
FunctionDecl *FD,
ParmVarDecl *Param);
/// FinalizeVarWithDestructor - Prepare for calling destructor on the
/// constructed variable.
void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType);
/// Helper class that collects exception specifications for
/// implicitly-declared special member functions.
class ImplicitExceptionSpecification {
// Pointer to allow copying
Sema *Self;
// We order exception specifications thus:
// noexcept is the most restrictive, but is only used in C++11.
// throw() comes next.
// Then a throw(collected exceptions)
// Finally no specification, which is expressed as noexcept(false).
// throw(...) is used instead if any called function uses it.
ExceptionSpecificationType ComputedEST;
llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen;
SmallVector<QualType, 4> Exceptions;
void ClearExceptions() {
ExceptionsSeen.clear();
Exceptions.clear();
}
public:
explicit ImplicitExceptionSpecification(Sema &Self)
: Self(&Self), ComputedEST(EST_BasicNoexcept) {
if (!Self.getLangOpts().CPlusPlus11)
ComputedEST = EST_DynamicNone;
}
/// Get the computed exception specification type.
ExceptionSpecificationType getExceptionSpecType() const {
assert(!isComputedNoexcept(ComputedEST) &&
"noexcept(expr) should not be a possible result");
return ComputedEST;
}
/// The number of exceptions in the exception specification.
unsigned size() const { return Exceptions.size(); }
/// The set of exceptions in the exception specification.
const QualType *data() const { return Exceptions.data(); }
/// Integrate another called method into the collected data.
void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method);
/// Integrate an invoked expression into the collected data.
void CalledExpr(Expr *E);
/// Overwrite an EPI's exception specification with this
/// computed exception specification.
FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const {
FunctionProtoType::ExceptionSpecInfo ESI;
ESI.Type = getExceptionSpecType();
if (ESI.Type == EST_Dynamic) {
ESI.Exceptions = Exceptions;
} else if (ESI.Type == EST_None) {
/// C++11 [except.spec]p14:
/// The exception-specification is noexcept(false) if the set of
/// potential exceptions of the special member function contains "any"
ESI.Type = EST_NoexceptFalse;
ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(),
tok::kw_false).get();
}
return ESI;
}
};
/// Determine what sort of exception specification a defaulted
/// copy constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc,
CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// default constructor of a class will have, and whether the parameter
/// will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// copy assignment operator of a class will have, and whether the
/// parameter will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted move
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted move
/// assignment operator of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// destructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification an inheriting
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeInheritingCtorExceptionSpec(SourceLocation Loc,
CXXConstructorDecl *CD);
/// Evaluate the implicit exception specification for a defaulted
/// special member function.
void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD);
/// Check the given noexcept-specifier, convert its expression, and compute
/// the appropriate ExceptionSpecificationType.
ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr,
ExceptionSpecificationType &EST);
/// Check the given exception-specification and update the
/// exception specification information with the results.
void checkExceptionSpecification(bool IsTopLevel,
ExceptionSpecificationType EST,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr,
SmallVectorImpl<QualType> &Exceptions,
FunctionProtoType::ExceptionSpecInfo &ESI);
/// Determine if we're in a case where we need to (incorrectly) eagerly
/// parse an exception specification to work around a libstdc++ bug.
bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D);
/// Add an exception-specification to the given member function
/// (or member function template). The exception-specification was parsed
/// after the method itself was declared.
void actOnDelayedExceptionSpecification(Decl *Method,
ExceptionSpecificationType EST,
SourceRange SpecificationRange,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr);
class InheritedConstructorInfo;
/// Determine if a special member function should have a deleted
/// definition when it is defaulted.
bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
InheritedConstructorInfo *ICI = nullptr,
bool Diagnose = false);
/// Produce notes explaining why a defaulted function was defined as deleted.
void DiagnoseDeletedDefaultedFunction(FunctionDecl *FD);
/// Declare the implicit default constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// default constructor will be added.
///
/// \returns The implicitly-declared default constructor.
CXXConstructorDecl *DeclareImplicitDefaultConstructor(
CXXRecordDecl *ClassDecl);
/// DefineImplicitDefaultConstructor - Checks for feasibility of
/// defining this constructor as the default constructor.
void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit destructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// destructor will be added.
///
/// \returns The implicitly-declared destructor.
CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitDestructor - Checks for feasibility of
/// defining this destructor as the default destructor.
void DefineImplicitDestructor(SourceLocation CurrentLocation,
CXXDestructorDecl *Destructor);
/// Build an exception spec for destructors that don't have one.
///
/// C++11 says that user-defined destructors with no exception spec get one
/// that looks as if the destructor was implicitly declared.
void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor);
/// Define the specified inheriting constructor.
void DefineInheritingConstructor(SourceLocation UseLoc,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy constructor will be added.
///
/// \returns The implicitly-declared copy constructor.
CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitCopyConstructor - Checks for feasibility of
/// defining this constructor as the copy constructor.
void DefineImplicitCopyConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit move constructor for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move constructor will be added.
///
/// \returns The implicitly-declared move constructor, or NULL if it wasn't
/// declared.
CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitMoveConstructor - Checks for feasibility of
/// defining this constructor as the move constructor.
void DefineImplicitMoveConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy assignment operator for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy assignment operator will be added.
///
/// \returns The implicitly-declared copy assignment operator.
CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared copy assignment operator.
void DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Declare the implicit move assignment operator for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move assignment operator will be added.
///
/// \returns The implicitly-declared move assignment operator, or NULL if it
/// wasn't declared.
CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared move assignment operator.
void DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Force the declaration of any implicitly-declared members of this
/// class.
void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class);
/// Check a completed declaration of an implicit special member.
void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD);
/// Determine whether the given function is an implicitly-deleted
/// special member function.
bool isImplicitlyDeleted(FunctionDecl *FD);
/// Check whether 'this' shows up in the type of a static member
/// function after the (naturally empty) cv-qualifier-seq would be.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method);
/// Whether this' shows up in the exception specification of a static
/// member function.
bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method);
/// Check whether 'this' shows up in the attributes of the given
/// static member function.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method);
/// MaybeBindToTemporary - If the passed in expression has a record type with
/// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise
/// it simply returns the passed in expression.
ExprResult MaybeBindToTemporary(Expr *E);
bool CompleteConstructorCall(CXXConstructorDecl *Constructor,
MultiExprArg ArgsPtr,
SourceLocation Loc,
SmallVectorImpl<Expr*> &ConvertedArgs,
bool AllowExplicit = false,
bool IsListInitialization = false);
ParsedType getInheritingConstructorName(CXXScopeSpec &SS,
SourceLocation NameLoc,
IdentifierInfo &Name);
ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
bool EnteringContext);
ParsedType getDestructorName(SourceLocation TildeLoc,
IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
ParsedType ObjectType,
bool EnteringContext);
ParsedType getDestructorTypeForDecltype(const DeclSpec &DS,
ParsedType ObjectType);
// Checks that reinterpret casts don't have undefined behavior.
void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType,
bool IsDereference, SourceRange Range);
/// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's.
ExprResult ActOnCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
SourceLocation LAngleBracketLoc,
Declarator &D,
SourceLocation RAngleBracketLoc,
SourceLocation LParenLoc,
Expr *E,
SourceLocation RParenLoc);
ExprResult BuildCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
TypeSourceInfo *Ty,
Expr *E,
SourceRange AngleBrackets,
SourceRange Parens);
ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl,
ExprResult Operand,
SourceLocation RParenLoc);
ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI,
Expr *Operand, SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXTypeid - Parse typeid( something ).
ExprResult ActOnCXXTypeid(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXUuidof - Parse __uuidof( something ).
ExprResult ActOnCXXUuidof(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
/// Handle a C++1z fold-expression: ( expr op ... op expr ).
ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
tok::TokenKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc);
ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
BinaryOperatorKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc,
Optional<unsigned> NumExpansions);
ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc,
BinaryOperatorKind Operator);
//// ActOnCXXThis - Parse 'this' pointer.
ExprResult ActOnCXXThis(SourceLocation loc);
/// Build a CXXThisExpr and mark it referenced in the current context.
Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit);
void MarkThisReferenced(CXXThisExpr *This);
/// Try to retrieve the type of the 'this' pointer.
///
/// \returns The type of 'this', if possible. Otherwise, returns a NULL type.
QualType getCurrentThisType();
/// When non-NULL, the C++ 'this' expression is allowed despite the
/// current context not being a non-static member function. In such cases,
/// this provides the type used for 'this'.
QualType CXXThisTypeOverride;
/// RAII object used to temporarily allow the C++ 'this' expression
/// to be used, with the given qualifiers on the current class type.
class CXXThisScopeRAII {
Sema &S;
QualType OldCXXThisTypeOverride;
bool Enabled;
public:
/// Introduce a new scope where 'this' may be allowed (when enabled),
/// using the given declaration (which is either a class template or a
/// class) along with the given qualifiers.
/// along with the qualifiers placed on '*this'.
CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals,
bool Enabled = true);
~CXXThisScopeRAII();
};
/// Make sure the value of 'this' is actually available in the current
/// context, if it is a potentially evaluated context.
///
/// \param Loc The location at which the capture of 'this' occurs.
///
/// \param Explicit Whether 'this' is explicitly captured in a lambda
/// capture list.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// 'this' that may or may not be used in certain specializations of
/// a nested generic lambda (depending on whether the name resolves to
/// a non-static member function or a static function).
/// \return returns 'true' if failed, 'false' if success.
bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false,
bool BuildAndDiagnose = true,
const unsigned *const FunctionScopeIndexToStopAt = nullptr,
bool ByCopy = false);
/// Determine whether the given type is the type of *this that is used
/// outside of the body of a member function for a type that is currently
/// being defined.
bool isThisOutsideMemberFunctionBody(QualType BaseType);
/// ActOnCXXBoolLiteral - Parse {true,false} literals.
ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
/// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals.
ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
ExprResult
ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs,
SourceLocation AtLoc, SourceLocation RParen);
/// ActOnCXXNullPtrLiteral - Parse 'nullptr'.
ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc);
//// ActOnCXXThrow - Parse throw expressions.
ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr);
ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
bool IsThrownVarInScope);
bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E);
/// ActOnCXXTypeConstructExpr - Parse construction of a specified type.
/// Can be interpreted either as function-style casting ("int(x)")
/// or class type construction ("ClassType(x,y,z)")
/// or creation of a value-initialized type ("int()").
ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep,
SourceLocation LParenOrBraceLoc,
MultiExprArg Exprs,
SourceLocation RParenOrBraceLoc,
bool ListInitialization);
ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type,
SourceLocation LParenLoc,
MultiExprArg Exprs,
SourceLocation RParenLoc,
bool ListInitialization);
/// ActOnCXXNew - Parsed a C++ 'new' expression.
ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens, Declarator &D,
Expr *Initializer);
ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens,
QualType AllocType,
TypeSourceInfo *AllocTypeInfo,
Optional<Expr *> ArraySize,
SourceRange DirectInitRange,
Expr *Initializer);
/// Determine whether \p FD is an aligned allocation or deallocation
/// function that is unavailable.
bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const;
/// Produce diagnostics if \p FD is an aligned allocation or deallocation
/// function that is unavailable.
void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD,
SourceLocation Loc);
bool CheckAllocatedType(QualType AllocType, SourceLocation Loc,
SourceRange R);
/// The scope in which to find allocation functions.
enum AllocationFunctionScope {
/// Only look for allocation functions in the global scope.
AFS_Global,
/// Only look for allocation functions in the scope of the
/// allocated class.
AFS_Class,
/// Look for allocation functions in both the global scope
/// and in the scope of the allocated class.
AFS_Both
};
/// Finds the overloads of operator new and delete that are appropriate
/// for the allocation.
bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
AllocationFunctionScope NewScope,
AllocationFunctionScope DeleteScope,
QualType AllocType, bool IsArray,
bool &PassAlignment, MultiExprArg PlaceArgs,
FunctionDecl *&OperatorNew,
FunctionDecl *&OperatorDelete,
bool Diagnose = true);
void DeclareGlobalNewDelete();
void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return,
ArrayRef<QualType> Params);
bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
DeclarationName Name, FunctionDecl* &Operator,
bool Diagnose = true);
FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc,
bool CanProvideSize,
bool Overaligned,
DeclarationName Name);
FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc,
CXXRecordDecl *RD);
/// ActOnCXXDelete - Parsed a C++ 'delete' expression
ExprResult ActOnCXXDelete(SourceLocation StartLoc,
bool UseGlobal, bool ArrayForm,
Expr *Operand);
void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc,
bool IsDelete, bool CallCanBeVirtual,
bool WarnOnNonAbstractTypes,
SourceLocation DtorLoc);
ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen,
Expr *Operand, SourceLocation RParen);
ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand,
SourceLocation RParen);
/// Parsed one of the type trait support pseudo-functions.
ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<ParsedType> Args,
SourceLocation RParenLoc);
ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<TypeSourceInfo *> Args,
SourceLocation RParenLoc);
/// ActOnArrayTypeTrait - Parsed one of the binary type trait support
/// pseudo-functions.
ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
ParsedType LhsTy,
Expr *DimExpr,
SourceLocation RParen);
ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
TypeSourceInfo *TSInfo,
Expr *DimExpr,
SourceLocation RParen);
/// ActOnExpressionTrait - Parsed one of the unary type trait support
/// pseudo-functions.
ExprResult ActOnExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult BuildExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult ActOnStartCXXMemberReference(Scope *S,
Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
ParsedType &ObjectType,
bool &MayBePseudoDestructor);
ExprResult BuildPseudoDestructorExpr(Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
const CXXScopeSpec &SS,
TypeSourceInfo *ScopeType,
SourceLocation CCLoc,
SourceLocation TildeLoc,
PseudoDestructorTypeStorage DestroyedType);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
UnqualifiedId &FirstTypeName,
SourceLocation CCLoc,
SourceLocation TildeLoc,
UnqualifiedId &SecondTypeName);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
SourceLocation TildeLoc,
const DeclSpec& DS);
/// MaybeCreateExprWithCleanups - If the current full-expression
/// requires any cleanups, surround it with a ExprWithCleanups node.
/// Otherwise, just returns the passed-in expression.
Expr *MaybeCreateExprWithCleanups(Expr *SubExpr);
Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt);
ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr);
MaterializeTemporaryExpr *
CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary,
bool BoundToLvalueReference);
ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) {
return ActOnFinishFullExpr(
Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue);
}
ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC,
bool DiscardedValue, bool IsConstexpr = false);
StmtResult ActOnFinishFullStmt(Stmt *Stmt);
// Marks SS invalid if it represents an incomplete type.
bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC);
DeclContext *computeDeclContext(QualType T);
DeclContext *computeDeclContext(const CXXScopeSpec &SS,
bool EnteringContext = false);
bool isDependentScopeSpecifier(const CXXScopeSpec &SS);
CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS);
/// The parser has parsed a global nested-name-specifier '::'.
///
/// \param CCLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS);
/// The parser has parsed a '__super' nested-name-specifier.
///
/// \param SuperLoc The location of the '__super' keyword.
///
/// \param ColonColonLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc,
SourceLocation ColonColonLoc, CXXScopeSpec &SS);
bool isAcceptableNestedNameSpecifier(const NamedDecl *SD,
bool *CanCorrect = nullptr);
NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS);
/// Keeps information about an identifier in a nested-name-spec.
///
struct NestedNameSpecInfo {
/// The type of the object, if we're parsing nested-name-specifier in
/// a member access expression.
ParsedType ObjectType;
/// The identifier preceding the '::'.
IdentifierInfo *Identifier;
/// The location of the identifier.
SourceLocation IdentifierLoc;
/// The location of the '::'.
SourceLocation CCLoc;
/// Creates info object for the most typical case.
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType())
: ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc),
CCLoc(ColonColonLoc) {
}
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, QualType ObjectType)
: ObjectType(ParsedType::make(ObjectType)), Identifier(II),
IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) {
}
};
bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo);
bool BuildCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
NamedDecl *ScopeLookupResult,
bool ErrorRecoveryLookup,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
/// The parser has parsed a nested-name-specifier 'identifier::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param IdInfo Parser information about an identifier in the
/// nested-name-spec.
///
/// \param EnteringContext Whether we're entering the context nominated by
/// this nested-name-specifier.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param ErrorRecoveryLookup If true, then this method is called to improve
/// error recovery. In this case do not emit error message.
///
/// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':'
/// are allowed. The bool value pointed by this parameter is set to 'true'
/// if the identifier is treated as if it was followed by ':', not '::'.
///
/// \param OnlyNamespace If true, only considers namespaces in lookup.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
bool ErrorRecoveryLookup = false,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
ExprResult ActOnDecltypeExpression(Expr *E);
bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS,
const DeclSpec &DS,
SourceLocation ColonColonLoc);
bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo,
bool EnteringContext);
/// The parser has parsed a nested-name-specifier
/// 'template[opt] template-name < template-args >::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param TemplateKWLoc the location of the 'template' keyword, if any.
/// \param TemplateName the template name.
/// \param TemplateNameLoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
/// \param CCLoc The location of the '::'.
///
/// \param EnteringContext Whether we're entering the context of the
/// nested-name-specifier.
///
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateName,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
SourceLocation CCLoc,
bool EnteringContext);
/// Given a C++ nested-name-specifier, produce an annotation value
/// that the parser can use later to reconstruct the given
/// nested-name-specifier.
///
/// \param SS A nested-name-specifier.
///
/// \returns A pointer containing all of the information in the
/// nested-name-specifier \p SS.
void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS);
/// Given an annotation pointer for a nested-name-specifier, restore
/// the nested-name-specifier structure.
///
/// \param Annotation The annotation pointer, produced by
/// \c SaveNestedNameSpecifierAnnotation().
///
/// \param AnnotationRange The source range corresponding to the annotation.
///
/// \param SS The nested-name-specifier that will be updated with the contents
/// of the annotation pointer.
void RestoreNestedNameSpecifierAnnotation(void *Annotation,
SourceRange AnnotationRange,
CXXScopeSpec &SS);
bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global
/// scope or nested-name-specifier) is parsed, part of a declarator-id.
/// After this method is called, according to [C++ 3.4.3p3], names should be
/// looked up in the declarator-id's scope, until the declarator is parsed and
/// ActOnCXXExitDeclaratorScope is called.
/// The 'SS' should be a non-empty valid CXXScopeSpec.
bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS);
/// ActOnCXXExitDeclaratorScope - Called when a declarator that previously
/// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same
/// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well.
/// Used to indicate that names should revert to being looked up in the
/// defining scope.
void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an
/// initializer for the declaration 'Dcl'.
/// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a
/// static data member of class X, names should be looked up in the scope of
/// class X.
void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl);
/// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an
/// initializer for the declaration 'Dcl'.
void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl);
/// Create a new lambda closure type.
CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange,
TypeSourceInfo *Info,
bool KnownDependent,
LambdaCaptureDefault CaptureDefault);
/// Start the definition of a lambda expression.
CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class,
SourceRange IntroducerRange,
TypeSourceInfo *MethodType,
SourceLocation EndLoc,
ArrayRef<ParmVarDecl *> Params,
ConstexprSpecKind ConstexprKind);
/// Number lambda for linkage purposes if necessary.
void handleLambdaNumbering(
CXXRecordDecl *Class, CXXMethodDecl *Method,
Optional<std::tuple<unsigned, bool, Decl *>> Mangling = None);
/// Endow the lambda scope info with the relevant properties.
void buildLambdaScope(sema::LambdaScopeInfo *LSI,
CXXMethodDecl *CallOperator,
SourceRange IntroducerRange,
LambdaCaptureDefault CaptureDefault,
SourceLocation CaptureDefaultLoc,
bool ExplicitParams,
bool ExplicitResultType,
bool Mutable);
/// Perform initialization analysis of the init-capture and perform
/// any implicit conversions such as an lvalue-to-rvalue conversion if
/// not being used to initialize a reference.
ParsedType actOnLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) {
return ParsedType::make(buildLambdaInitCaptureInitialization(
Loc, ByRef, EllipsisLoc, None, Id,
InitKind != LambdaCaptureInitKind::CopyInit, Init));
}
QualType buildLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit,
Expr *&Init);
/// Create a dummy variable within the declcontext of the lambda's
/// call operator, for name lookup purposes for a lambda init capture.
///
/// CodeGen handles emission of lambda captures, ignoring these dummy
/// variables appropriately.
VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc,
QualType InitCaptureType,
SourceLocation EllipsisLoc,
IdentifierInfo *Id,
unsigned InitStyle, Expr *Init);
/// Add an init-capture to a lambda scope.
void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var);
/// Note that we have finished the explicit captures for the
/// given lambda.
void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI);
/// \brief This is called after parsing the explicit template parameter list
/// on a lambda (if it exists) in C++2a.
void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> TParams,
SourceLocation RAngleLoc);
/// Introduce the lambda parameters into scope.
void addLambdaParameters(
ArrayRef<LambdaIntroducer::LambdaCapture> Captures,
CXXMethodDecl *CallOperator, Scope *CurScope);
/// Deduce a block or lambda's return type based on the return
/// statements present in the body.
void deduceClosureReturnType(sema::CapturingScopeInfo &CSI);
/// ActOnStartOfLambdaDefinition - This is called just before we start
/// parsing the body of a lambda; it analyzes the explicit captures and
/// arguments, and sets up various data-structures for the body of the
/// lambda.
void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
Declarator &ParamInfo, Scope *CurScope);
/// ActOnLambdaError - If there is an error parsing a lambda, this callback
/// is invoked to pop the information about the lambda.
void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope,
bool IsInstantiation = false);
/// ActOnLambdaExpr - This is called when the body of a lambda expression
/// was successfully completed.
ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
Scope *CurScope);
/// Does copying/destroying the captured variable have side effects?
bool CaptureHasSideEffects(const sema::Capture &From);
/// Diagnose if an explicit lambda capture is unused. Returns true if a
/// diagnostic is emitted.
bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange,
const sema::Capture &From);
/// Build a FieldDecl suitable to hold the given capture.
FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture);
/// Initialize the given capture with a suitable expression.
ExprResult BuildCaptureInit(const sema::Capture &Capture,
SourceLocation ImplicitCaptureLoc,
bool IsOpenMPMapping = false);
/// Complete a lambda-expression having processed and attached the
/// lambda body.
ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
sema::LambdaScopeInfo *LSI);
/// Get the return type to use for a lambda's conversion function(s) to
/// function pointer type, given the type of the call operator.
QualType
getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType);
/// Define the "body" of the conversion from a lambda object to a
/// function pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToFunctionPointerConversion(
SourceLocation CurrentLoc, CXXConversionDecl *Conv);
/// Define the "body" of the conversion from a lambda object to a
/// block pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc,
CXXConversionDecl *Conv);
ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation,
SourceLocation ConvLocation,
CXXConversionDecl *Conv,
Expr *Src);
/// Check whether the given expression is a valid constraint expression.
/// A diagnostic is emitted if it is not, and false is returned.
bool CheckConstraintExpression(Expr *CE);
/// \brief Check whether the given list of constraint expressions are
/// satisfied (as if in a 'conjunction') given template arguments.
/// \param ConstraintExprs a list of constraint expressions, treated as if
/// they were 'AND'ed together.
/// \param TemplateArgs the list of template arguments to substitute into the
/// constraint expression.
/// \param TemplateIDRange The source range of the template id that
/// caused the constraints check.
/// \param Satisfaction if true is returned, will contain details of the
/// satisfaction, with enough information to diagnose an unsatisfied
/// expression.
/// \returns true if an error occurred and satisfaction could not be checked,
/// false otherwise.
bool CheckConstraintSatisfaction(TemplateDecl *Template,
ArrayRef<const Expr *> ConstraintExprs,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange TemplateIDRange,
ConstraintSatisfaction &Satisfaction);
bool CheckConstraintSatisfaction(ClassTemplatePartialSpecializationDecl *TD,
ArrayRef<const Expr *> ConstraintExprs,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange TemplateIDRange,
ConstraintSatisfaction &Satisfaction);
bool CheckConstraintSatisfaction(VarTemplatePartialSpecializationDecl *TD,
ArrayRef<const Expr *> ConstraintExprs,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange TemplateIDRange,
ConstraintSatisfaction &Satisfaction);
/// \brief Check whether the given non-dependent constraint expression is
/// satisfied. Returns false and updates Satisfaction with the satisfaction
/// verdict if successful, emits a diagnostic and returns true if an error
/// occured and satisfaction could not be determined.
///
/// \returns true if an error occurred, false otherwise.
bool CheckConstraintSatisfaction(const Expr *ConstraintExpr,
ConstraintSatisfaction &Satisfaction);
/// Check that the associated constraints of a template declaration match the
/// associated constraints of an older declaration of which it is a
/// redeclaration.
bool CheckRedeclarationConstraintMatch(TemplateParameterList *Old,
TemplateParameterList *New);
/// \brief Ensure that the given template arguments satisfy the constraints
/// associated with the given template, emitting a diagnostic if they do not.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateArgs The converted, canonicalized template arguments.
///
/// \param TemplateIDRange The source range of the template id that
/// caused the constraints check.
///
/// \returns true if the constrains are not satisfied or could not be checked
/// for satisfaction, false if the constraints are satisfied.
bool EnsureTemplateArgumentListConstraints(TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange TemplateIDRange);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied.
void
DiagnoseUnsatisfiedConstraint(const ConstraintSatisfaction& Satisfaction);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied.
void
DiagnoseUnsatisfiedConstraint(const ASTConstraintSatisfaction& Satisfaction);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied because it was ill-formed.
void DiagnoseUnsatisfiedIllFormedConstraint(SourceLocation DiagnosticLocation,
StringRef Diagnostic);
// ParseObjCStringLiteral - Parse Objective-C string literals.
ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs,
ArrayRef<Expr *> Strings);
ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S);
/// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the
/// numeric literal expression. Type of the expression will be "NSNumber *"
/// or "id" if NSNumber is unavailable.
ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number);
ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc,
bool Value);
ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements);
/// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the
/// '@' prefixed parenthesized expression. The type of the expression will
/// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type
/// of ValueType, which is allowed to be a built-in numeric type, "char *",
/// "const char *" or C structure with attribute 'objc_boxable'.
ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr);
ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr,
Expr *IndexExpr,
ObjCMethodDecl *getterMethod,
ObjCMethodDecl *setterMethod);
ExprResult BuildObjCDictionaryLiteral(SourceRange SR,
MutableArrayRef<ObjCDictionaryElement> Elements);
ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc,
TypeSourceInfo *EncodedTypeInfo,
SourceLocation RParenLoc);
ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl,
CXXConversionDecl *Method,
bool HadMultipleCandidates);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc,
SourceLocation EncodeLoc,
SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc);
/// ParseObjCSelectorExpression - Build selector expression for \@selector
ExprResult ParseObjCSelectorExpression(Selector Sel,
SourceLocation AtLoc,
SourceLocation SelLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc,
bool WarnMultipleSelectors);
/// ParseObjCProtocolExpression - Build protocol expression for \@protocol
ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName,
SourceLocation AtLoc,
SourceLocation ProtoLoc,
SourceLocation LParenLoc,
SourceLocation ProtoIdLoc,
SourceLocation RParenLoc);
//===--------------------------------------------------------------------===//
// C++ Declarations
//
Decl *ActOnStartLinkageSpecification(Scope *S,
SourceLocation ExternLoc,
Expr *LangStr,
SourceLocation LBraceLoc);
Decl *ActOnFinishLinkageSpecification(Scope *S,
Decl *LinkageSpec,
SourceLocation RBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Classes
//
CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS);
bool isCurrentClassName(const IdentifierInfo &II, Scope *S,
const CXXScopeSpec *SS = nullptr);
bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS);
bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc,
SourceLocation ColonLoc,
const ParsedAttributesView &Attrs);
NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS,
Declarator &D,
MultiTemplateParamsArg TemplateParameterLists,
Expr *BitfieldWidth, const VirtSpecifiers &VS,
InClassInitStyle InitStyle);
void ActOnStartCXXInClassMemberInitializer();
void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl,
SourceLocation EqualLoc,
Expr *Init);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
SourceLocation LParenLoc,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
SourceLocation EllipsisLoc);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *InitList,
SourceLocation EllipsisLoc);
MemInitResult BuildMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *Init,
SourceLocation EllipsisLoc);
MemInitResult BuildMemberInitializer(ValueDecl *Member,
Expr *Init,
SourceLocation IdLoc);
MemInitResult BuildBaseInitializer(QualType BaseType,
TypeSourceInfo *BaseTInfo,
Expr *Init,
CXXRecordDecl *ClassDecl,
SourceLocation EllipsisLoc);
MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo,
Expr *Init,
CXXRecordDecl *ClassDecl);
bool SetDelegatingInitializer(CXXConstructorDecl *Constructor,
CXXCtorInitializer *Initializer);
bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors,
ArrayRef<CXXCtorInitializer *> Initializers = None);
void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation);
/// MarkBaseAndMemberDestructorsReferenced - Given a record decl,
/// mark all the non-trivial destructors of its members and bases as
/// referenced.
void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc,
CXXRecordDecl *Record);
/// The list of classes whose vtables have been used within
/// this translation unit, and the source locations at which the
/// first use occurred.
typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse;
/// The list of vtables that are required but have not yet been
/// materialized.
SmallVector<VTableUse, 16> VTableUses;
/// The set of classes whose vtables have been used within
/// this translation unit, and a bit that will be true if the vtable is
/// required to be emitted (otherwise, it should be emitted only if needed
/// by code generation).
llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed;
/// Load any externally-stored vtable uses.
void LoadExternalVTableUses();
/// Note that the vtable for the given class was used at the
/// given location.
void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
bool DefinitionRequired = false);
/// Mark the exception specifications of all virtual member functions
/// in the given class as needed.
void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc,
const CXXRecordDecl *RD);
/// MarkVirtualMembersReferenced - Will mark all members of the given
/// CXXRecordDecl referenced.
void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD,
bool ConstexprOnly = false);
/// Define all of the vtables that have been used in this
/// translation unit and reference any virtual members used by those
/// vtables.
///
/// \returns true if any work was done, false otherwise.
bool DefineUsedVTables();
void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl);
void ActOnMemInitializers(Decl *ConstructorDecl,
SourceLocation ColonLoc,
ArrayRef<CXXCtorInitializer*> MemInits,
bool AnyErrors);
/// Check class-level dllimport/dllexport attribute. The caller must
/// ensure that referenceDLLExportedClassMethods is called some point later
/// when all outer classes of Class are complete.
void checkClassLevelDLLAttribute(CXXRecordDecl *Class);
void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class);
void referenceDLLExportedClassMethods();
void propagateDLLAttrToBaseClassTemplate(
CXXRecordDecl *Class, Attr *ClassAttr,
ClassTemplateSpecializationDecl *BaseTemplateSpec,
SourceLocation BaseLoc);
/// Add gsl::Pointer attribute to std::container::iterator
/// \param ND The declaration that introduces the name
/// std::container::iterator. \param UnderlyingRecord The record named by ND.
void inferGslPointerAttribute(NamedDecl *ND, CXXRecordDecl *UnderlyingRecord);
/// Add [[gsl::Owner]] and [[gsl::Pointer]] attributes for std:: types.
void inferGslOwnerPointerAttribute(CXXRecordDecl *Record);
/// Add [[gsl::Pointer]] attributes for std:: types.
void inferGslPointerAttribute(TypedefNameDecl *TD);
void CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record);
/// Check that the C++ class annoated with "trivial_abi" satisfies all the
/// conditions that are needed for the attribute to have an effect.
void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD);
void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc,
Decl *TagDecl, SourceLocation LBrac,
SourceLocation RBrac,
const ParsedAttributesView &AttrList);
void ActOnFinishCXXMemberDecls();
void ActOnFinishCXXNonNestedClass();
void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param);
unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template);
void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param);
void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnFinishDelayedMemberInitializers(Decl *Record);
void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD,
CachedTokens &Toks);
void UnmarkAsLateParsedTemplate(FunctionDecl *FD);
bool IsInsideALocalClassWithinATemplateFunction();
Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
Expr *AssertMessageExpr,
SourceLocation RParenLoc);
Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
StringLiteral *AssertMessageExpr,
SourceLocation RParenLoc,
bool Failed);
FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart,
SourceLocation FriendLoc,
TypeSourceInfo *TSInfo);
Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
MultiTemplateParamsArg TemplateParams);
NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParams);
QualType CheckConstructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
void CheckConstructor(CXXConstructorDecl *Constructor);
QualType CheckDestructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
bool CheckDestructor(CXXDestructorDecl *Destructor);
void CheckConversionDeclarator(Declarator &D, QualType &R,
StorageClass& SC);
Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion);
void CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
StorageClass &SC);
void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD);
void CheckExplicitlyDefaultedFunction(Scope *S, FunctionDecl *MD);
bool CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD,
CXXSpecialMember CSM);
void CheckDelayedMemberExceptionSpecs();
bool CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *MD,
DefaultedComparisonKind DCK);
void DeclareImplicitEqualityComparison(CXXRecordDecl *RD,
FunctionDecl *Spaceship);
void DefineDefaultedComparison(SourceLocation Loc, FunctionDecl *FD,
DefaultedComparisonKind DCK);
//===--------------------------------------------------------------------===//
// C++ Derived Classes
//
/// ActOnBaseSpecifier - Parsed a base specifier
CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class,
SourceRange SpecifierRange,
bool Virtual, AccessSpecifier Access,
TypeSourceInfo *TInfo,
SourceLocation EllipsisLoc);
BaseResult ActOnBaseSpecifier(Decl *classdecl,
SourceRange SpecifierRange,
ParsedAttributes &Attrs,
bool Virtual, AccessSpecifier Access,
ParsedType basetype,
SourceLocation BaseLoc,
SourceLocation EllipsisLoc);
bool AttachBaseSpecifiers(CXXRecordDecl *Class,
MutableArrayRef<CXXBaseSpecifier *> Bases);
void ActOnBaseSpecifiers(Decl *ClassDecl,
MutableArrayRef<CXXBaseSpecifier *> Bases);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base,
CXXBasePaths &Paths);
// FIXME: I don't like this name.
void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
SourceLocation Loc, SourceRange Range,
CXXCastPath *BasePath = nullptr,
bool IgnoreAccess = false);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
unsigned InaccessibleBaseID,
unsigned AmbigiousBaseConvID,
SourceLocation Loc, SourceRange Range,
DeclarationName Name,
CXXCastPath *BasePath,
bool IgnoreAccess = false);
std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths);
bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionReturnType - Checks whether the return types are
/// covariant, according to C++ [class.virtual]p5.
bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionExceptionSpec - Checks whether the exception
/// spec is a subset of base spec.
bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange);
/// CheckOverrideControl - Check C++11 override control semantics.
void CheckOverrideControl(NamedDecl *D);
/// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was
/// not used in the declaration of an overriding method.
void DiagnoseAbsenceOfOverrideControl(NamedDecl *D);
/// CheckForFunctionMarkedFinal - Checks whether a virtual member function
/// overrides a virtual member function marked 'final', according to
/// C++11 [class.virtual]p4.
bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
//===--------------------------------------------------------------------===//
// C++ Access Control
//
enum AccessResult {
AR_accessible,
AR_inaccessible,
AR_dependent,
AR_delayed
};
bool SetMemberAccessSpecifier(NamedDecl *MemberDecl,
NamedDecl *PrevMemberDecl,
AccessSpecifier LexicalAS);
AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckAllocationAccess(SourceLocation OperatorLoc,
SourceRange PlacementRange,
CXXRecordDecl *NamingClass,
DeclAccessPair FoundDecl,
bool Diagnose = true);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
bool IsCopyBindingRefToTemp = false);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
const PartialDiagnostic &PDiag);
AccessResult CheckDestructorAccess(SourceLocation Loc,
CXXDestructorDecl *Dtor,
const PartialDiagnostic &PDiag,
QualType objectType = QualType());
AccessResult CheckFriendAccess(NamedDecl *D);
AccessResult CheckMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *NamingClass,
DeclAccessPair Found);
AccessResult
CheckStructuredBindingMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *DecomposedClass,
DeclAccessPair Field);
AccessResult CheckMemberOperatorAccess(SourceLocation Loc,
Expr *ObjectExpr,
Expr *ArgExpr,
DeclAccessPair FoundDecl);
AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr,
DeclAccessPair FoundDecl);
AccessResult CheckBaseClassAccess(SourceLocation AccessLoc,
QualType Base, QualType Derived,
const CXXBasePath &Path,
unsigned DiagID,
bool ForceCheck = false,
bool ForceUnprivileged = false);
void CheckLookupAccess(const LookupResult &R);
bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass,
QualType BaseType);
bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass,
DeclAccessPair Found, QualType ObjectType,
SourceLocation Loc,
const PartialDiagnostic &Diag);
bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass,
DeclAccessPair Found,
QualType ObjectType) {
return isMemberAccessibleForDeletion(NamingClass, Found, ObjectType,
SourceLocation(), PDiag());
}
void HandleDependentAccessCheck(const DependentDiagnostic &DD,
const MultiLevelTemplateArgumentList &TemplateArgs);
void PerformDependentDiagnostics(const DeclContext *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
/// When true, access checking violations are treated as SFINAE
/// failures rather than hard errors.
bool AccessCheckingSFINAE;
enum AbstractDiagSelID {
AbstractNone = -1,
AbstractReturnType,
AbstractParamType,
AbstractVariableType,
AbstractFieldType,
AbstractIvarType,
AbstractSynthesizedIvarType,
AbstractArrayType
};
bool isAbstractType(SourceLocation Loc, QualType T);
bool RequireNonAbstractType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
template <typename... Ts>
bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireNonAbstractType(Loc, T, Diagnoser);
}
void DiagnoseAbstractType(const CXXRecordDecl *RD);
//===--------------------------------------------------------------------===//
// C++ Overloaded Operators [C++ 13.5]
//
bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl);
bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl);
//===--------------------------------------------------------------------===//
// C++ Templates [C++ 14]
//
void FilterAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
bool hasAnyAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true,
bool AllowNonTemplateFunctions = false);
/// Try to interpret the lookup result D as a template-name.
///
/// \param D A declaration found by name lookup.
/// \param AllowFunctionTemplates Whether function templates should be
/// considered valid results.
/// \param AllowDependent Whether unresolved using declarations (that might
/// name templates) should be considered valid results.
NamedDecl *getAsTemplateNameDecl(NamedDecl *D,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
enum class AssumedTemplateKind {
/// This is not assumed to be a template name.
None,
/// This is assumed to be a template name because lookup found nothing.
FoundNothing,
/// This is assumed to be a template name because lookup found one or more
/// functions (but no function templates).
FoundFunctions,
};
bool LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS,
QualType ObjectType, bool EnteringContext,
bool &MemberOfUnknownSpecialization,
SourceLocation TemplateKWLoc = SourceLocation(),
AssumedTemplateKind *ATK = nullptr);
TemplateNameKind isTemplateName(Scope *S,
CXXScopeSpec &SS,
bool hasTemplateKeyword,
const UnqualifiedId &Name,
ParsedType ObjectType,
bool EnteringContext,
TemplateTy &Template,
bool &MemberOfUnknownSpecialization);
/// Try to resolve an undeclared template name as a type template.
///
/// Sets II to the identifier corresponding to the template name, and updates
/// Name to a corresponding (typo-corrected) type template name and TNK to
/// the corresponding kind, if possible.
void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name,
TemplateNameKind &TNK,
SourceLocation NameLoc,
IdentifierInfo *&II);
bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name,
SourceLocation NameLoc,
bool Diagnose = true);
/// Determine whether a particular identifier might be the name in a C++1z
/// deduction-guide declaration.
bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name,
SourceLocation NameLoc,
ParsedTemplateTy *Template = nullptr);
bool DiagnoseUnknownTemplateName(const IdentifierInfo &II,
SourceLocation IILoc,
Scope *S,
const CXXScopeSpec *SS,
TemplateTy &SuggestedTemplate,
TemplateNameKind &SuggestedKind);
bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation,
NamedDecl *Instantiation,
bool InstantiatedFromMember,
const NamedDecl *Pattern,
const NamedDecl *PatternDef,
TemplateSpecializationKind TSK,
bool Complain = true);
void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl);
TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl);
NamedDecl *ActOnTypeParameter(Scope *S, bool Typename,
SourceLocation EllipsisLoc,
SourceLocation KeyLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth, unsigned Position,
SourceLocation EqualLoc,
ParsedType DefaultArg);
QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI,
SourceLocation Loc);
QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc);
NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
Expr *DefaultArg);
NamedDecl *ActOnTemplateTemplateParameter(Scope *S,
SourceLocation TmpLoc,
TemplateParameterList *Params,
SourceLocation EllipsisLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
ParsedTemplateArgument DefaultArg);
TemplateParameterList *
ActOnTemplateParameterList(unsigned Depth,
SourceLocation ExportLoc,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> Params,
SourceLocation RAngleLoc,
Expr *RequiresClause);
/// The context in which we are checking a template parameter list.
enum TemplateParamListContext {
TPC_ClassTemplate,
TPC_VarTemplate,
TPC_FunctionTemplate,
TPC_ClassTemplateMember,
TPC_FriendClassTemplate,
TPC_FriendFunctionTemplate,
TPC_FriendFunctionTemplateDefinition,
TPC_TypeAliasTemplate
};
bool CheckTemplateParameterList(TemplateParameterList *NewParams,
TemplateParameterList *OldParams,
TemplateParamListContext TPC,
SkipBodyInfo *SkipBody = nullptr);
TemplateParameterList *MatchTemplateParametersToScopeSpecifier(
SourceLocation DeclStartLoc, SourceLocation DeclLoc,
const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId,
ArrayRef<TemplateParameterList *> ParamLists,
bool IsFriend, bool &IsMemberSpecialization, bool &Invalid);
DeclResult CheckClassTemplate(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc,
const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists,
TemplateParameterList **OuterTemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg,
QualType NTTPType,
SourceLocation Loc);
void translateTemplateArguments(const ASTTemplateArgsPtr &In,
TemplateArgumentListInfo &Out);
ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType);
void NoteAllFoundTemplates(TemplateName Name);
QualType CheckTemplateIdType(TemplateName Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs);
TypeResult
ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
TemplateTy Template, IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc, SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc,
bool IsCtorOrDtorName = false, bool IsClassName = false);
/// Parsed an elaborated-type-specifier that refers to a template-id,
/// such as \c class T::template apply<U>.
TypeResult ActOnTagTemplateIdType(TagUseKind TUK,
TypeSpecifierType TagSpec,
SourceLocation TagLoc,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateD,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgsIn,
SourceLocation RAngleLoc);
DeclResult ActOnVarTemplateSpecialization(
Scope *S, Declarator &D, TypeSourceInfo *DI,
SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams,
StorageClass SC, bool IsPartialSpecialization);
DeclResult CheckVarTemplateId(VarTemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation TemplateNameLoc,
const TemplateArgumentListInfo &TemplateArgs);
ExprResult CheckVarTemplateId(const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
VarTemplateDecl *Template,
SourceLocation TemplateLoc,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult
CheckConceptTemplateId(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
SourceLocation ConceptNameLoc, NamedDecl *FoundDecl,
ConceptDecl *NamedConcept,
const TemplateArgumentListInfo *TemplateArgs);
void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc);
ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
bool RequiresADL,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
TemplateNameKind ActOnDependentTemplateName(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext,
TemplateTy &Template, bool AllowInjectedClassName = false);
DeclResult ActOnClassTemplateSpecialization(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
SourceLocation ModulePrivateLoc, TemplateIdAnnotation &TemplateId,
const ParsedAttributesView &Attr,
MultiTemplateParamsArg TemplateParameterLists,
SkipBodyInfo *SkipBody = nullptr);
bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc,
TemplateDecl *PrimaryTemplate,
unsigned NumExplicitArgs,
ArrayRef<TemplateArgument> Args);
void CheckTemplatePartialSpecialization(
ClassTemplatePartialSpecializationDecl *Partial);
void CheckTemplatePartialSpecialization(
VarTemplatePartialSpecializationDecl *Partial);
Decl *ActOnTemplateDeclarator(Scope *S,
MultiTemplateParamsArg TemplateParameterLists,
Declarator &D);
bool
CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
TemplateSpecializationKind NewTSK,
NamedDecl *PrevDecl,
TemplateSpecializationKind PrevTSK,
SourceLocation PrevPtOfInstantiation,
bool &SuppressNew);
bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD,
const TemplateArgumentListInfo &ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckFunctionTemplateSpecialization(
FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs,
LookupResult &Previous, bool QualifiedFriend = false);
bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
DeclResult ActOnExplicitInstantiation(
Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS,
TemplateTy Template, SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc, const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc,
SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
Declarator &D);
TemplateArgumentLoc
SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
Decl *Param,
SmallVectorImpl<TemplateArgument>
&Converted,
bool &HasDefaultArg);
/// Specifies the context in which a particular template
/// argument is being checked.
enum CheckTemplateArgumentKind {
/// The template argument was specified in the code or was
/// instantiated with some deduced template arguments.
CTAK_Specified,
/// The template argument was deduced via template argument
/// deduction.
CTAK_Deduced,
/// The template argument was deduced from an array bound
/// via template argument deduction.
CTAK_DeducedFromArrayBound
};
bool CheckTemplateArgument(NamedDecl *Param,
TemplateArgumentLoc &Arg,
NamedDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
unsigned ArgumentPackIndex,
SmallVectorImpl<TemplateArgument> &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
/// Check that the given template arguments can be be provided to
/// the given template, converting the arguments along the way.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateLoc The location of the template name in the source.
///
/// \param TemplateArgs The list of template arguments. If the template is
/// a template template parameter, this function may extend the set of
/// template arguments to also include substituted, defaulted template
/// arguments.
///
/// \param PartialTemplateArgs True if the list of template arguments is
/// intentionally partial, e.g., because we're checking just the initial
/// set of template arguments.
///
/// \param Converted Will receive the converted, canonicalized template
/// arguments.
///
/// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to
/// contain the converted forms of the template arguments as written.
/// Otherwise, \p TemplateArgs will not be modified.
///
/// \param ConstraintsNotSatisfied If provided, and an error occured, will
/// receive true if the cause for the error is the associated constraints of
/// the template not being satisfied by the template arguments.
///
/// \returns true if an error occurred, false otherwise.
bool CheckTemplateArgumentList(TemplateDecl *Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs,
bool PartialTemplateArgs,
SmallVectorImpl<TemplateArgument> &Converted,
bool UpdateArgsWithConversions = true,
bool *ConstraintsNotSatisfied = nullptr);
bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
TemplateArgumentLoc &Arg,
SmallVectorImpl<TemplateArgument> &Converted);
bool CheckTemplateArgument(TemplateTypeParmDecl *Param,
TypeSourceInfo *Arg);
ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
QualType InstantiatedParamType, Expr *Arg,
TemplateArgument &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
bool CheckTemplateTemplateArgument(TemplateParameterList *Params,
TemplateArgumentLoc &Arg);
ExprResult
BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
QualType ParamType,
SourceLocation Loc);
ExprResult
BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
SourceLocation Loc);
/// Enumeration describing how template parameter lists are compared
/// for equality.
enum TemplateParameterListEqualKind {
/// We are matching the template parameter lists of two templates
/// that might be redeclarations.
///
/// \code
/// template<typename T> struct X;
/// template<typename T> struct X;
/// \endcode
TPL_TemplateMatch,
/// We are matching the template parameter lists of two template
/// template parameters as part of matching the template parameter lists
/// of two templates that might be redeclarations.
///
/// \code
/// template<template<int I> class TT> struct X;
/// template<template<int Value> class Other> struct X;
/// \endcode
TPL_TemplateTemplateParmMatch,
/// We are matching the template parameter lists of a template
/// template argument against the template parameter lists of a template
/// template parameter.
///
/// \code
/// template<template<int Value> class Metafun> struct X;
/// template<int Value> struct integer_c;
/// X<integer_c> xic;
/// \endcode
TPL_TemplateTemplateArgumentMatch
};
bool TemplateParameterListsAreEqual(TemplateParameterList *New,
TemplateParameterList *Old,
bool Complain,
TemplateParameterListEqualKind Kind,
SourceLocation TemplateArgLoc
= SourceLocation());
bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams);
/// Called when the parser has parsed a C++ typename
/// specifier, e.g., "typename T::type".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param II the identifier we're retrieving (e.g., 'type' in the example).
/// \param IdLoc the location of the identifier.
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS, const IdentifierInfo &II,
SourceLocation IdLoc);
/// Called when the parser has parsed a C++ typename
/// specifier that ends in a template-id, e.g.,
/// "typename MetaFun::template apply<T1, T2>".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param TemplateLoc the location of the 'template' keyword, if any.
/// \param TemplateName The template name.
/// \param TemplateII The identifier used to name the template.
/// \param TemplateIILoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateLoc,
TemplateTy TemplateName,
IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc);
TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T,
SourceLocation Loc,
DeclarationName Name);
bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS);
ExprResult RebuildExprInCurrentInstantiation(Expr *E);
bool RebuildTemplateParamsInCurrentInstantiation(
TemplateParameterList *Params);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgumentList &Args);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgument *Args,
unsigned NumArgs);
// Concepts
Decl *ActOnConceptDefinition(
Scope *S, MultiTemplateParamsArg TemplateParameterLists,
IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr);
//===--------------------------------------------------------------------===//
// C++ Variadic Templates (C++0x [temp.variadic])
//===--------------------------------------------------------------------===//
/// Determine whether an unexpanded parameter pack might be permitted in this
/// location. Useful for error recovery.
bool isUnexpandedParameterPackPermitted();
/// The context in which an unexpanded parameter pack is
/// being diagnosed.
///
/// Note that the values of this enumeration line up with the first
/// argument to the \c err_unexpanded_parameter_pack diagnostic.
enum UnexpandedParameterPackContext {
/// An arbitrary expression.
UPPC_Expression = 0,
/// The base type of a class type.
UPPC_BaseType,
/// The type of an arbitrary declaration.
UPPC_DeclarationType,
/// The type of a data member.
UPPC_DataMemberType,
/// The size of a bit-field.
UPPC_BitFieldWidth,
/// The expression in a static assertion.
UPPC_StaticAssertExpression,
/// The fixed underlying type of an enumeration.
UPPC_FixedUnderlyingType,
/// The enumerator value.
UPPC_EnumeratorValue,
/// A using declaration.
UPPC_UsingDeclaration,
/// A friend declaration.
UPPC_FriendDeclaration,
/// A declaration qualifier.
UPPC_DeclarationQualifier,
/// An initializer.
UPPC_Initializer,
/// A default argument.
UPPC_DefaultArgument,
/// The type of a non-type template parameter.
UPPC_NonTypeTemplateParameterType,
/// The type of an exception.
UPPC_ExceptionType,
/// Partial specialization.
UPPC_PartialSpecialization,
/// Microsoft __if_exists.
UPPC_IfExists,
/// Microsoft __if_not_exists.
UPPC_IfNotExists,
/// Lambda expression.
UPPC_Lambda,
/// Block expression,
UPPC_Block
};
/// Diagnose unexpanded parameter packs.
///
/// \param Loc The location at which we should emit the diagnostic.
///
/// \param UPPC The context in which we are diagnosing unexpanded
/// parameter packs.
///
/// \param Unexpanded the set of unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
UnexpandedParameterPackContext UPPC,
ArrayRef<UnexpandedParameterPack> Unexpanded);
/// If the given type contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The source location where a diagnostc should be emitted.
///
/// \param T The type that is being checked for unexpanded parameter
/// packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T,
UnexpandedParameterPackContext UPPC);
/// If the given expression contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param E The expression that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(Expr *E,
UnexpandedParameterPackContext UPPC = UPPC_Expression);
/// If the given nested-name-specifier contains an unexpanded
/// parameter pack, diagnose the error.
///
/// \param SS The nested-name-specifier that is being checked for
/// unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS,
UnexpandedParameterPackContext UPPC);
/// If the given name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param NameInfo The name (with source location information) that
/// is being checked for unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo,
UnexpandedParameterPackContext UPPC);
/// If the given template name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The location of the template name.
///
/// \param Template The template name that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc,
TemplateName Template,
UnexpandedParameterPackContext UPPC);
/// If the given template argument contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param Arg The template argument that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg,
UnexpandedParameterPackContext UPPC);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgument Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param T The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(QualType T,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param TL The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TypeLoc TL,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// nested-name-specifier.
///
/// \param NNS The nested-name-specifier that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// name.
///
/// \param NameInfo The name that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Invoked when parsing a template argument followed by an
/// ellipsis, which creates a pack expansion.
///
/// \param Arg The template argument preceding the ellipsis, which
/// may already be invalid.
///
/// \param EllipsisLoc The location of the ellipsis.
ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg,
SourceLocation EllipsisLoc);
/// Invoked when parsing a type followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Type The type preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
QualType CheckPackExpansion(QualType Pattern,
SourceRange PatternRange,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Determine whether we could expand a pack expansion with the
/// given set of parameter packs into separate arguments by repeatedly
/// transforming the pattern.
///
/// \param EllipsisLoc The location of the ellipsis that identifies the
/// pack expansion.
///
/// \param PatternRange The source range that covers the entire pattern of
/// the pack expansion.
///
/// \param Unexpanded The set of unexpanded parameter packs within the
/// pattern.
///
/// \param ShouldExpand Will be set to \c true if the transformer should
/// expand the corresponding pack expansions into separate arguments. When
/// set, \c NumExpansions must also be set.
///
/// \param RetainExpansion Whether the caller should add an unexpanded
/// pack expansion after all of the expanded arguments. This is used
/// when extending explicitly-specified template argument packs per
/// C++0x [temp.arg.explicit]p9.
///
/// \param NumExpansions The number of separate arguments that will be in
/// the expanded form of the corresponding pack expansion. This is both an
/// input and an output parameter, which can be set by the caller if the
/// number of expansions is known a priori (e.g., due to a prior substitution)
/// and will be set by the callee when the number of expansions is known.
/// The callee must set this value when \c ShouldExpand is \c true; it may
/// set this value in other cases.
///
/// \returns true if an error occurred (e.g., because the parameter packs
/// are to be instantiated with arguments of different lengths), false
/// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions)
/// must be set.
bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc,
SourceRange PatternRange,
ArrayRef<UnexpandedParameterPack> Unexpanded,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool &ShouldExpand,
bool &RetainExpansion,
Optional<unsigned> &NumExpansions);
/// Determine the number of arguments in the given pack expansion
/// type.
///
/// This routine assumes that the number of arguments in the expansion is
/// consistent across all of the unexpanded parameter packs in its pattern.
///
/// Returns an empty Optional if the type can't be expanded.
Optional<unsigned> getNumArgumentsInExpansion(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Determine whether the given declarator contains any unexpanded
/// parameter packs.
///
/// This routine is used by the parser to disambiguate function declarators
/// with an ellipsis prior to the ')', e.g.,
///
/// \code
/// void f(T...);
/// \endcode
///
/// To determine whether we have an (unnamed) function parameter pack or
/// a variadic function.
///
/// \returns true if the declarator contains any unexpanded parameter packs,
/// false otherwise.
bool containsUnexpandedParameterPacks(Declarator &D);
/// Returns the pattern of the pack expansion for a template argument.
///
/// \param OrigLoc The template argument to expand.
///
/// \param Ellipsis Will be set to the location of the ellipsis.
///
/// \param NumExpansions Will be set to the number of expansions that will
/// be generated from this pack expansion, if known a priori.
TemplateArgumentLoc getTemplateArgumentPackExpansionPattern(
TemplateArgumentLoc OrigLoc,
SourceLocation &Ellipsis,
Optional<unsigned> &NumExpansions) const;
/// Given a template argument that contains an unexpanded parameter pack, but
/// which has already been substituted, attempt to determine the number of
/// elements that will be produced once this argument is fully-expanded.
///
/// This is intended for use when transforming 'sizeof...(Arg)' in order to
/// avoid actually expanding the pack where possible.
Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg);
//===--------------------------------------------------------------------===//
// C++ Template Argument Deduction (C++ [temp.deduct])
//===--------------------------------------------------------------------===//
/// Adjust the type \p ArgFunctionType to match the calling convention,
/// noreturn, and optionally the exception specification of \p FunctionType.
/// Deduction often wants to ignore these properties when matching function
/// types.
QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType,
bool AdjustExceptionSpec = false);
/// Describes the result of template argument deduction.
///
/// The TemplateDeductionResult enumeration describes the result of
/// template argument deduction, as returned from
/// DeduceTemplateArguments(). The separate TemplateDeductionInfo
/// structure provides additional information about the results of
/// template argument deduction, e.g., the deduced template argument
/// list (if successful) or the specific template parameters or
/// deduced arguments that were involved in the failure.
enum TemplateDeductionResult {
/// Template argument deduction was successful.
TDK_Success = 0,
/// The declaration was invalid; do nothing.
TDK_Invalid,
/// Template argument deduction exceeded the maximum template
/// instantiation depth (which has already been diagnosed).
TDK_InstantiationDepth,
/// Template argument deduction did not deduce a value
/// for every template parameter.
TDK_Incomplete,
/// Template argument deduction did not deduce a value for every
/// expansion of an expanded template parameter pack.
TDK_IncompletePack,
/// Template argument deduction produced inconsistent
/// deduced values for the given template parameter.
TDK_Inconsistent,
/// Template argument deduction failed due to inconsistent
/// cv-qualifiers on a template parameter type that would
/// otherwise be deduced, e.g., we tried to deduce T in "const T"
/// but were given a non-const "X".
TDK_Underqualified,
/// Substitution of the deduced template argument values
/// resulted in an error.
TDK_SubstitutionFailure,
/// After substituting deduced template arguments, a dependent
/// parameter type did not match the corresponding argument.
TDK_DeducedMismatch,
/// After substituting deduced template arguments, an element of
/// a dependent parameter type did not match the corresponding element
/// of the corresponding argument (when deducing from an initializer list).
TDK_DeducedMismatchNested,
/// A non-depnedent component of the parameter did not match the
/// corresponding component of the argument.
TDK_NonDeducedMismatch,
/// When performing template argument deduction for a function
/// template, there were too many call arguments.
TDK_TooManyArguments,
/// When performing template argument deduction for a function
/// template, there were too few call arguments.
TDK_TooFewArguments,
/// The explicitly-specified template arguments were not valid
/// template arguments for the given template.
TDK_InvalidExplicitArguments,
/// Checking non-dependent argument conversions failed.
TDK_NonDependentConversionFailure,
/// The deduced arguments did not satisfy the constraints associated
/// with the template.
TDK_ConstraintsNotSatisfied,
/// Deduction failed; that's all we know.
TDK_MiscellaneousDeductionFailure,
/// CUDA Target attributes do not match.
TDK_CUDATargetMismatch
};
TemplateDeductionResult
DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult SubstituteExplicitTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo &ExplicitTemplateArgs,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType,
sema::TemplateDeductionInfo &Info);
/// brief A function argument from which we performed template argument
// deduction for a call.
struct OriginalCallArg {
OriginalCallArg(QualType OriginalParamType, bool DecomposedParam,
unsigned ArgIdx, QualType OriginalArgType)
: OriginalParamType(OriginalParamType),
DecomposedParam(DecomposedParam), ArgIdx(ArgIdx),
OriginalArgType(OriginalArgType) {}
QualType OriginalParamType;
bool DecomposedParam;
unsigned ArgIdx;
QualType OriginalArgType;
};
TemplateDeductionResult FinishTemplateArgumentDeduction(
FunctionTemplateDecl *FunctionTemplate,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
unsigned NumExplicitlySpecified, FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr,
bool PartialOverloading = false,
llvm::function_ref<bool()> CheckNonDependent = []{ return false; });
TemplateDeductionResult DeduceTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info,
bool PartialOverloading,
llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ArgFunctionType,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
QualType ToType,
CXXConversionDecl *&Specialization,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
/// Substitute Replacement for \p auto in \p TypeWithAuto
QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement);
/// Substitute Replacement for auto in TypeWithAuto
TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
/// Completely replace the \c auto in \p TypeWithAuto by
/// \p Replacement. This does not retain any \c auto type sugar.
QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement);
/// Result type of DeduceAutoType.
enum DeduceAutoResult {
DAR_Succeeded,
DAR_Failed,
DAR_FailedAlreadyDiagnosed
};
DeduceAutoResult
DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None);
DeduceAutoResult
DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None);
void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init);
bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc,
bool Diagnose = true);
/// Declare implicit deduction guides for a class template if we've
/// not already done so.
void DeclareImplicitDeductionGuides(TemplateDecl *Template,
SourceLocation Loc);
QualType DeduceTemplateSpecializationFromInitializer(
TypeSourceInfo *TInfo, const InitializedEntity &Entity,
const InitializationKind &Kind, MultiExprArg Init);
QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name,
QualType Type, TypeSourceInfo *TSI,
SourceRange Range, bool DirectInit,
Expr *Init);
TypeLoc getReturnTypeLoc(FunctionDecl *FD) const;
bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
SourceLocation ReturnLoc,
Expr *&RetExpr, AutoType *AT);
FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1,
FunctionTemplateDecl *FT2,
SourceLocation Loc,
TemplatePartialOrderingContext TPOC,
unsigned NumCallArguments1,
unsigned NumCallArguments2);
UnresolvedSetIterator
getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd,
TemplateSpecCandidateSet &FailedCandidates,
SourceLocation Loc,
const PartialDiagnostic &NoneDiag,
const PartialDiagnostic &AmbigDiag,
const PartialDiagnostic &CandidateDiag,
bool Complain = true, QualType TargetType = QualType());
ClassTemplatePartialSpecializationDecl *
getMoreSpecializedPartialSpecialization(
ClassTemplatePartialSpecializationDecl *PS1,
ClassTemplatePartialSpecializationDecl *PS2,
SourceLocation Loc);
bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization(
VarTemplatePartialSpecializationDecl *PS1,
VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc);
bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
bool isTemplateTemplateParameterAtLeastAsSpecializedAs(
TemplateParameterList *P, TemplateDecl *AArg, SourceLocation Loc);
void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs,
bool OnlyDeduced,
unsigned Depth,
llvm::SmallBitVector &Used);
void MarkDeducedTemplateParameters(
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced) {
return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced);
}
static void MarkDeducedTemplateParameters(ASTContext &Ctx,
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced);
//===--------------------------------------------------------------------===//
// C++ Template Instantiation
//
MultiLevelTemplateArgumentList
getTemplateInstantiationArgs(NamedDecl *D,
const TemplateArgumentList *Innermost = nullptr,
bool RelativeToPrimary = false,
const FunctionDecl *Pattern = nullptr);
/// A context in which code is being synthesized (where a source location
/// alone is not sufficient to identify the context). This covers template
/// instantiation and various forms of implicitly-generated functions.
struct CodeSynthesisContext {
/// The kind of template instantiation we are performing
enum SynthesisKind {
/// We are instantiating a template declaration. The entity is
/// the declaration we're instantiating (e.g., a CXXRecordDecl).
TemplateInstantiation,
/// We are instantiating a default argument for a template
/// parameter. The Entity is the template parameter whose argument is
/// being instantiated, the Template is the template, and the
/// TemplateArgs/NumTemplateArguments provide the template arguments as
/// specified.
DefaultTemplateArgumentInstantiation,
/// We are instantiating a default argument for a function.
/// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs
/// provides the template arguments as specified.
DefaultFunctionArgumentInstantiation,
/// We are substituting explicit template arguments provided for
/// a function template. The entity is a FunctionTemplateDecl.
ExplicitTemplateArgumentSubstitution,
/// We are substituting template argument determined as part of
/// template argument deduction for either a class template
/// partial specialization or a function template. The
/// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or
/// a TemplateDecl.
DeducedTemplateArgumentSubstitution,
/// We are substituting prior template arguments into a new
/// template parameter. The template parameter itself is either a
/// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl.
PriorTemplateArgumentSubstitution,
/// We are checking the validity of a default template argument that
/// has been used when naming a template-id.
DefaultTemplateArgumentChecking,
/// We are computing the exception specification for a defaulted special
/// member function.
ExceptionSpecEvaluation,
/// We are instantiating the exception specification for a function
/// template which was deferred until it was needed.
ExceptionSpecInstantiation,
/// We are declaring an implicit special member function.
DeclaringSpecialMember,
/// We are declaring an implicit 'operator==' for a defaulted
/// 'operator<=>'.
DeclaringImplicitEqualityComparison,
/// We are defining a synthesized function (such as a defaulted special
/// member).
DefiningSynthesizedFunction,
// We are checking the constraints associated with a constrained entity or
// the constraint expression of a concept. This includes the checks that
// atomic constraints have the type 'bool' and that they can be constant
// evaluated.
ConstraintsCheck,
// We are substituting template arguments into a constraint expression.
ConstraintSubstitution,
/// We are rewriting a comparison operator in terms of an operator<=>.
RewritingOperatorAsSpaceship,
/// Added for Template instantiation observation.
/// Memoization means we are _not_ instantiating a template because
/// it is already instantiated (but we entered a context where we
/// would have had to if it was not already instantiated).
Memoization
} Kind;
/// Was the enclosing context a non-instantiation SFINAE context?
bool SavedInNonInstantiationSFINAEContext;
/// The point of instantiation or synthesis within the source code.
SourceLocation PointOfInstantiation;
/// The entity that is being synthesized.
Decl *Entity;
/// The template (or partial specialization) in which we are
/// performing the instantiation, for substitutions of prior template
/// arguments.
NamedDecl *Template;
/// The list of template arguments we are substituting, if they
/// are not part of the entity.
const TemplateArgument *TemplateArgs;
// FIXME: Wrap this union around more members, or perhaps store the
// kind-specific members in the RAII object owning the context.
union {
/// The number of template arguments in TemplateArgs.
unsigned NumTemplateArgs;
/// The special member being declared or defined.
CXXSpecialMember SpecialMember;
};
ArrayRef<TemplateArgument> template_arguments() const {
assert(Kind != DeclaringSpecialMember);
return {TemplateArgs, NumTemplateArgs};
}
/// The template deduction info object associated with the
/// substitution or checking of explicit or deduced template arguments.
sema::TemplateDeductionInfo *DeductionInfo;
/// The source range that covers the construct that cause
/// the instantiation, e.g., the template-id that causes a class
/// template instantiation.
SourceRange InstantiationRange;
CodeSynthesisContext()
: Kind(TemplateInstantiation),
SavedInNonInstantiationSFINAEContext(false), Entity(nullptr),
Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0),
DeductionInfo(nullptr) {}
/// Determines whether this template is an actual instantiation
/// that should be counted toward the maximum instantiation depth.
bool isInstantiationRecord() const;
};
/// List of active code synthesis contexts.
///
/// This vector is treated as a stack. As synthesis of one entity requires
/// synthesis of another, additional contexts are pushed onto the stack.
SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts;
/// Specializations whose definitions are currently being instantiated.
llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations;
/// Non-dependent types used in templates that have already been instantiated
/// by some template instantiation.
llvm::DenseSet<QualType> InstantiatedNonDependentTypes;
/// Extra modules inspected when performing a lookup during a template
/// instantiation. Computed lazily.
SmallVector<Module*, 16> CodeSynthesisContextLookupModules;
/// Cache of additional modules that should be used for name lookup
/// within the current template instantiation. Computed lazily; use
/// getLookupModules() to get a complete set.
llvm::DenseSet<Module*> LookupModulesCache;
/// Get the set of additional modules that should be checked during
/// name lookup. A module and its imports become visible when instanting a
/// template defined within it.
llvm::DenseSet<Module*> &getLookupModules();
/// Map from the most recent declaration of a namespace to the most
/// recent visible declaration of that namespace.
llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache;
/// Whether we are in a SFINAE context that is not associated with
/// template instantiation.
///
/// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside
/// of a template instantiation or template argument deduction.
bool InNonInstantiationSFINAEContext;
/// The number of \p CodeSynthesisContexts that are not template
/// instantiations and, therefore, should not be counted as part of the
/// instantiation depth.
///
/// When the instantiation depth reaches the user-configurable limit
/// \p LangOptions::InstantiationDepth we will abort instantiation.
// FIXME: Should we have a similar limit for other forms of synthesis?
unsigned NonInstantiationEntries;
/// The depth of the context stack at the point when the most recent
/// error or warning was produced.
///
/// This value is used to suppress printing of redundant context stacks
/// when there are multiple errors or warnings in the same instantiation.
// FIXME: Does this belong in Sema? It's tough to implement it anywhere else.
unsigned LastEmittedCodeSynthesisContextDepth = 0;
/// The template instantiation callbacks to trace or track
/// instantiations (objects can be chained).
///
/// This callbacks is used to print, trace or track template
/// instantiations as they are being constructed.
std::vector<std::unique_ptr<TemplateInstantiationCallback>>
TemplateInstCallbacks;
/// The current index into pack expansion arguments that will be
/// used for substitution of parameter packs.
///
/// The pack expansion index will be -1 to indicate that parameter packs
/// should be instantiated as themselves. Otherwise, the index specifies
/// which argument within the parameter pack will be used for substitution.
int ArgumentPackSubstitutionIndex;
/// RAII object used to change the argument pack substitution index
/// within a \c Sema object.
///
/// See \c ArgumentPackSubstitutionIndex for more information.
class ArgumentPackSubstitutionIndexRAII {
Sema &Self;
int OldSubstitutionIndex;
public:
ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex)
: Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) {
Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex;
}
~ArgumentPackSubstitutionIndexRAII() {
Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex;
}
};
friend class ArgumentPackSubstitutionRAII;
/// For each declaration that involved template argument deduction, the
/// set of diagnostics that were suppressed during that template argument
/// deduction.
///
/// FIXME: Serialize this structure to the AST file.
typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> >
SuppressedDiagnosticsMap;
SuppressedDiagnosticsMap SuppressedDiagnostics;
/// A stack object to be created when performing template
/// instantiation.
///
/// Construction of an object of type \c InstantiatingTemplate
/// pushes the current instantiation onto the stack of active
/// instantiations. If the size of this stack exceeds the maximum
/// number of recursive template instantiations, construction
/// produces an error and evaluates true.
///
/// Destruction of this object will pop the named instantiation off
/// the stack.
struct InstantiatingTemplate {
/// Note that we are instantiating a class template,
/// function template, variable template, alias template,
/// or a member thereof.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
Decl *Entity,
SourceRange InstantiationRange = SourceRange());
struct ExceptionSpecification {};
/// Note that we are instantiating an exception specification
/// of a function template.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionDecl *Entity, ExceptionSpecification,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument in a
/// template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateParameter Param, TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting either explicitly-specified or
/// deduced template arguments during function template argument deduction.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionTemplateDecl *FunctionTemplate,
ArrayRef<TemplateArgument> TemplateArgs,
CodeSynthesisContext::SynthesisKind Kind,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template declaration.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ClassTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a variable template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
VarTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument for a function
/// parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParmVarDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting prior template arguments into a
/// non-type parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
NonTypeTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are substituting prior template arguments into a
/// template template parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
TemplateTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are checking the default template argument
/// against the template parameter for a given template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
NamedDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
struct ConstraintsCheck {};
/// \brief Note that we are checking the constraints associated with some
/// constrained entity (a concept declaration or a template with associated
/// constraints).
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintsCheck, NamedDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
struct ConstraintSubstitution {};
/// \brief Note that we are checking a constraint expression associated
/// with a template declaration or as part of the satisfaction check of a
/// concept.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintSubstitution, NamedDecl *Template,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange);
/// Note that we have finished instantiating this template.
void Clear();
~InstantiatingTemplate() { Clear(); }
/// Determines whether we have exceeded the maximum
/// recursive template instantiations.
bool isInvalid() const { return Invalid; }
/// Determine whether we are already instantiating this
/// specialization in some surrounding active instantiation.
bool isAlreadyInstantiating() const { return AlreadyInstantiating; }
private:
Sema &SemaRef;
bool Invalid;
bool AlreadyInstantiating;
bool CheckInstantiationDepth(SourceLocation PointOfInstantiation,
SourceRange InstantiationRange);
InstantiatingTemplate(
Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind,
SourceLocation PointOfInstantiation, SourceRange InstantiationRange,
Decl *Entity, NamedDecl *Template = nullptr,
ArrayRef<TemplateArgument> TemplateArgs = None,
sema::TemplateDeductionInfo *DeductionInfo = nullptr);
InstantiatingTemplate(const InstantiatingTemplate&) = delete;
InstantiatingTemplate&
operator=(const InstantiatingTemplate&) = delete;
};
void pushCodeSynthesisContext(CodeSynthesisContext Ctx);
void popCodeSynthesisContext();
/// Determine whether we are currently performing template instantiation.
bool inTemplateInstantiation() const {
return CodeSynthesisContexts.size() > NonInstantiationEntries;
}
void PrintContextStack() {
if (!CodeSynthesisContexts.empty() &&
CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) {
PrintInstantiationStack();
LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size();
}
if (PragmaAttributeCurrentTargetDecl)
PrintPragmaAttributeInstantiationPoint();
}
void PrintInstantiationStack();
void PrintPragmaAttributeInstantiationPoint();
/// Determines whether we are currently in a context where
/// template argument substitution failures are not considered
/// errors.
///
/// \returns An empty \c Optional if we're not in a SFINAE context.
/// Otherwise, contains a pointer that, if non-NULL, contains the nearest
/// template-deduction context object, which can be used to capture
/// diagnostics that will be suppressed.
Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const;
/// Determines whether we are currently in a context that
/// is not evaluated as per C++ [expr] p5.
bool isUnevaluatedContext() const {
assert(!ExprEvalContexts.empty() &&
"Must be in an expression evaluation context");
return ExprEvalContexts.back().isUnevaluated();
}
/// RAII class used to determine whether SFINAE has
/// trapped any errors that occur during template argument
/// deduction.
class SFINAETrap {
Sema &SemaRef;
unsigned PrevSFINAEErrors;
bool PrevInNonInstantiationSFINAEContext;
bool PrevAccessCheckingSFINAE;
bool PrevLastDiagnosticIgnored;
public:
explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false)
: SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors),
PrevInNonInstantiationSFINAEContext(
SemaRef.InNonInstantiationSFINAEContext),
PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE),
PrevLastDiagnosticIgnored(
SemaRef.getDiagnostics().isLastDiagnosticIgnored())
{
if (!SemaRef.isSFINAEContext())
SemaRef.InNonInstantiationSFINAEContext = true;
SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE;
}
~SFINAETrap() {
SemaRef.NumSFINAEErrors = PrevSFINAEErrors;
SemaRef.InNonInstantiationSFINAEContext
= PrevInNonInstantiationSFINAEContext;
SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE;
SemaRef.getDiagnostics().setLastDiagnosticIgnored(
PrevLastDiagnosticIgnored);
}
/// Determine whether any SFINAE errors have been trapped.
bool hasErrorOccurred() const {
return SemaRef.NumSFINAEErrors > PrevSFINAEErrors;
}
};
/// RAII class used to indicate that we are performing provisional
/// semantic analysis to determine the validity of a construct, so
/// typo-correction and diagnostics in the immediate context (not within
/// implicitly-instantiated templates) should be suppressed.
class TentativeAnalysisScope {
Sema &SemaRef;
// FIXME: Using a SFINAETrap for this is a hack.
SFINAETrap Trap;
bool PrevDisableTypoCorrection;
public:
explicit TentativeAnalysisScope(Sema &SemaRef)
: SemaRef(SemaRef), Trap(SemaRef, true),
PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) {
SemaRef.DisableTypoCorrection = true;
}
~TentativeAnalysisScope() {
SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection;
}
};
/// The current instantiation scope used to store local
/// variables.
LocalInstantiationScope *CurrentInstantiationScope;
/// Tracks whether we are in a context where typo correction is
/// disabled.
bool DisableTypoCorrection;
/// The number of typos corrected by CorrectTypo.
unsigned TyposCorrected;
typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet;
typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations;
/// A cache containing identifiers for which typo correction failed and
/// their locations, so that repeated attempts to correct an identifier in a
/// given location are ignored if typo correction already failed for it.
IdentifierSourceLocations TypoCorrectionFailures;
/// Worker object for performing CFG-based warnings.
sema::AnalysisBasedWarnings AnalysisWarnings;
threadSafety::BeforeSet *ThreadSafetyDeclCache;
/// An entity for which implicit template instantiation is required.
///
/// The source location associated with the declaration is the first place in
/// the source code where the declaration was "used". It is not necessarily
/// the point of instantiation (which will be either before or after the
/// namespace-scope declaration that triggered this implicit instantiation),
/// However, it is the location that diagnostics should generally refer to,
/// because users will need to know what code triggered the instantiation.
typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation;
/// The queue of implicit template instantiations that are required
/// but have not yet been performed.
std::deque<PendingImplicitInstantiation> PendingInstantiations;
/// Queue of implicit template instantiations that cannot be performed
/// eagerly.
SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations;
class GlobalEagerInstantiationScope {
public:
GlobalEagerInstantiationScope(Sema &S, bool Enabled)
: S(S), Enabled(Enabled) {
if (!Enabled) return;
SavedPendingInstantiations.swap(S.PendingInstantiations);
SavedVTableUses.swap(S.VTableUses);
}
void perform() {
if (Enabled) {
S.DefineUsedVTables();
S.PerformPendingInstantiations();
}
}
~GlobalEagerInstantiationScope() {
if (!Enabled) return;
// Restore the set of pending vtables.
assert(S.VTableUses.empty() &&
"VTableUses should be empty before it is discarded.");
S.VTableUses.swap(SavedVTableUses);
// Restore the set of pending implicit instantiations.
assert(S.PendingInstantiations.empty() &&
"PendingInstantiations should be empty before it is discarded.");
S.PendingInstantiations.swap(SavedPendingInstantiations);
}
private:
Sema &S;
SmallVector<VTableUse, 16> SavedVTableUses;
std::deque<PendingImplicitInstantiation> SavedPendingInstantiations;
bool Enabled;
};
/// The queue of implicit template instantiations that are required
/// and must be performed within the current local scope.
///
/// This queue is only used for member functions of local classes in
/// templates, which must be instantiated in the same scope as their
/// enclosing function, so that they can reference function-local
/// types, static variables, enumerators, etc.
std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations;
class LocalEagerInstantiationScope {
public:
LocalEagerInstantiationScope(Sema &S) : S(S) {
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); }
~LocalEagerInstantiationScope() {
assert(S.PendingLocalImplicitInstantiations.empty() &&
"there shouldn't be any pending local implicit instantiations");
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
private:
Sema &S;
std::deque<PendingImplicitInstantiation>
SavedPendingLocalImplicitInstantiations;
};
/// A helper class for building up ExtParameterInfos.
class ExtParameterInfoBuilder {
SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos;
bool HasInteresting = false;
public:
/// Set the ExtParameterInfo for the parameter at the given index,
///
void set(unsigned index, FunctionProtoType::ExtParameterInfo info) {
assert(Infos.size() <= index);
Infos.resize(index);
Infos.push_back(info);
if (!HasInteresting)
HasInteresting = (info != FunctionProtoType::ExtParameterInfo());
}
/// Return a pointer (suitable for setting in an ExtProtoInfo) to the
/// ExtParameterInfo array we've built up.
const FunctionProtoType::ExtParameterInfo *
getPointerOrNull(unsigned numParams) {
if (!HasInteresting) return nullptr;
Infos.resize(numParams);
return Infos.data();
}
};
void PerformPendingInstantiations(bool LocalOnly = false);
TypeSourceInfo *SubstType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity,
bool AllowDeducedTST = false);
QualType SubstType(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstType(TypeLoc TL,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc,
DeclarationName Entity,
CXXRecordDecl *ThisContext,
Qualifiers ThisTypeQuals);
void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto,
const MultiLevelTemplateArgumentList &Args);
bool SubstExceptionSpec(SourceLocation Loc,
FunctionProtoType::ExceptionSpecInfo &ESI,
SmallVectorImpl<QualType> &ExceptionStorage,
const MultiLevelTemplateArgumentList &Args);
ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
int indexAdjustment,
Optional<unsigned> NumExpansions,
bool ExpectParameterPack);
bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params,
const FunctionProtoType::ExtParameterInfo *ExtParamInfos,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<QualType> &ParamTypes,
SmallVectorImpl<ParmVarDecl *> *OutParams,
ExtParameterInfoBuilder &ParamInfos);
ExprResult SubstExpr(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Substitute the given template arguments into a list of
/// expressions, expanding pack expansions if required.
///
/// \param Exprs The list of expressions to substitute into.
///
/// \param IsCall Whether this is some form of call, in which case
/// default arguments will be dropped.
///
/// \param TemplateArgs The set of template arguments to substitute.
///
/// \param Outputs Will receive all of the substituted arguments.
///
/// \returns true if an error occurred, false otherwise.
bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<Expr *> &Outputs);
StmtResult SubstStmt(Stmt *S,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateParameterList *
SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
Decl *SubstDecl(Decl *D, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Substitute the name and return type of a defaulted 'operator<=>' to form
/// an implicit 'operator=='.
FunctionDecl *SubstSpaceshipAsEqualEqual(CXXRecordDecl *RD,
FunctionDecl *Spaceship);
ExprResult SubstInitializer(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool CXXDirectInit);
bool
SubstBaseSpecifiers(CXXRecordDecl *Instantiation,
CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
InstantiateClass(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK,
bool Complain = true);
bool InstantiateEnum(SourceLocation PointOfInstantiation,
EnumDecl *Instantiation, EnumDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
bool InstantiateInClassInitializer(
SourceLocation PointOfInstantiation, FieldDecl *Instantiation,
FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs);
struct LateInstantiatedAttribute {
const Attr *TmplAttr;
LocalInstantiationScope *Scope;
Decl *NewDecl;
LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S,
Decl *D)
: TmplAttr(A), Scope(S), NewDecl(D)
{ }
};
typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec;
void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
void
InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
bool usesPartialOrExplicitSpecialization(
SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec);
bool
InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK,
bool Complain = true);
void InstantiateClassMembers(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
void InstantiateClassTemplateSpecializationMembers(
SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK);
NestedNameSpecifierLoc
SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
const MultiLevelTemplateArgumentList &TemplateArgs);
DeclarationNameInfo
SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateName
SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name,
SourceLocation Loc,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs,
TemplateArgumentListInfo &Result,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
FunctionDecl *Function);
FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD,
const TemplateArgumentList *Args,
SourceLocation Loc);
void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
FunctionDecl *Function,
bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
VarTemplateSpecializationDecl *BuildVarTemplateInstantiation(
VarTemplateDecl *VarTemplate, VarDecl *FromVar,
const TemplateArgumentList &TemplateArgList,
const TemplateArgumentListInfo &TemplateArgsInfo,
SmallVectorImpl<TemplateArgument> &Converted,
SourceLocation PointOfInstantiation, void *InsertPos,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *StartingScope = nullptr);
VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl(
VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl,
const MultiLevelTemplateArgumentList &TemplateArgs);
void
BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs,
LateInstantiatedAttrVec *LateAttrs,
DeclContext *Owner,
LocalInstantiationScope *StartingScope,
bool InstantiatingVarTemplate = false,
VarTemplateSpecializationDecl *PrevVTSD = nullptr);
VarDecl *getVarTemplateSpecialization(
VarTemplateDecl *VarTempl, const TemplateArgumentListInfo *TemplateArgs,
const DeclarationNameInfo &MemberNameInfo, SourceLocation TemplateKWLoc);
void InstantiateVariableInitializer(
VarDecl *Var, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
VarDecl *Var, bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
void InstantiateMemInitializers(CXXConstructorDecl *New,
const CXXConstructorDecl *Tmpl,
const MultiLevelTemplateArgumentList &TemplateArgs);
NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool FindingInstantiatedContext = false);
DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC,
const MultiLevelTemplateArgumentList &TemplateArgs);
// Objective-C declarations.
enum ObjCContainerKind {
OCK_None = -1,
OCK_Interface = 0,
OCK_Protocol,
OCK_Category,
OCK_ClassExtension,
OCK_Implementation,
OCK_CategoryImplementation
};
ObjCContainerKind getObjCContainerKind() const;
DeclResult actOnObjCTypeParam(Scope *S,
ObjCTypeParamVariance variance,
SourceLocation varianceLoc,
unsigned index,
IdentifierInfo *paramName,
SourceLocation paramLoc,
SourceLocation colonLoc,
ParsedType typeBound);
ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc,
ArrayRef<Decl *> typeParams,
SourceLocation rAngleLoc);
void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList);
Decl *ActOnStartClassInterface(
Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *SuperName, SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
void ActOnSuperClassOfClassInterface(Scope *S,
SourceLocation AtInterfaceLoc,
ObjCInterfaceDecl *IDecl,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperName,
SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs,
SourceRange SuperTypeArgsRange);
void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs,
SmallVectorImpl<SourceLocation> &ProtocolLocs,
IdentifierInfo *SuperName,
SourceLocation SuperLoc);
Decl *ActOnCompatibilityAlias(
SourceLocation AtCompatibilityAliasLoc,
IdentifierInfo *AliasName, SourceLocation AliasLocation,
IdentifierInfo *ClassName, SourceLocation ClassLocation);
bool CheckForwardProtocolDeclarationForCircularDependency(
IdentifierInfo *PName,
SourceLocation &PLoc, SourceLocation PrevLoc,
const ObjCList<ObjCProtocolDecl> &PList);
Decl *ActOnStartProtocolInterface(
SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName,
SourceLocation ProtocolLoc, Decl *const *ProtoRefNames,
unsigned NumProtoRefs, const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryInterface(
SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *CategoryName, SourceLocation CategoryLoc,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperClassname,
SourceLocation SuperClassLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *CatName,
SourceLocation CatLoc,
const ParsedAttributesView &AttrList);
DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl,
ArrayRef<Decl *> Decls);
DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc,
IdentifierInfo **IdentList,
SourceLocation *IdentLocs,
ArrayRef<ObjCTypeParamList *> TypeParamLists,
unsigned NumElts);
DeclGroupPtrTy
ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc,
ArrayRef<IdentifierLocPair> IdentList,
const ParsedAttributesView &attrList);
void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer,
ArrayRef<IdentifierLocPair> ProtocolId,
SmallVectorImpl<Decl *> &Protocols);
void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId,
SourceLocation ProtocolLoc,
IdentifierInfo *TypeArgId,
SourceLocation TypeArgLoc,
bool SelectProtocolFirst = false);
/// Given a list of identifiers (and their locations), resolve the
/// names to either Objective-C protocol qualifiers or type
/// arguments, as appropriate.
void actOnObjCTypeArgsOrProtocolQualifiers(
Scope *S,
ParsedType baseType,
SourceLocation lAngleLoc,
ArrayRef<IdentifierInfo *> identifiers,
ArrayRef<SourceLocation> identifierLocs,
SourceLocation rAngleLoc,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SourceLocation &protocolRAngleLoc,
bool warnOnIncompleteProtocols);
/// Build a an Objective-C protocol-qualified 'id' type where no
/// base type was specified.
TypeResult actOnObjCProtocolQualifierType(
SourceLocation lAngleLoc,
ArrayRef<Decl *> protocols,
ArrayRef<SourceLocation> protocolLocs,
SourceLocation rAngleLoc);
/// Build a specialized and/or protocol-qualified Objective-C type.
TypeResult actOnObjCTypeArgsAndProtocolQualifiers(
Scope *S,
SourceLocation Loc,
ParsedType BaseType,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<ParsedType> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<Decl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc);
/// Build an Objective-C type parameter type.
QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Build an Objective-C object pointer type.
QualType BuildObjCObjectType(QualType BaseType,
SourceLocation Loc,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<TypeSourceInfo *> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Ensure attributes are consistent with type.
/// \param [in, out] Attributes The attributes to check; they will
/// be modified to be consistent with \p PropertyTy.
void CheckObjCPropertyAttributes(Decl *PropertyPtrTy,
SourceLocation Loc,
unsigned &Attributes,
bool propertyInPrimaryClass);
/// Process the specified property declaration and create decls for the
/// setters and getters as needed.
/// \param property The property declaration being processed
void ProcessPropertyDecl(ObjCPropertyDecl *property);
void DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
ObjCPropertyDecl *SuperProperty,
const IdentifierInfo *Name,
bool OverridingProtocolProperty);
void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT,
ObjCInterfaceDecl *ID);
Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd,
ArrayRef<Decl *> allMethods = None,
ArrayRef<DeclGroupPtrTy> allTUVars = None);
Decl *ActOnProperty(Scope *S, SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD, ObjCDeclSpec &ODS,
Selector GetterSel, Selector SetterSel,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
Decl *ActOnPropertyImplDecl(Scope *S,
SourceLocation AtLoc,
SourceLocation PropertyLoc,
bool ImplKind,
IdentifierInfo *PropertyId,
IdentifierInfo *PropertyIvar,
SourceLocation PropertyIvarLoc,
ObjCPropertyQueryKind QueryKind);
enum ObjCSpecialMethodKind {
OSMK_None,
OSMK_Alloc,
OSMK_New,
OSMK_Copy,
OSMK_RetainingInit,
OSMK_NonRetainingInit
};
struct ObjCArgInfo {
IdentifierInfo *Name;
SourceLocation NameLoc;
// The Type is null if no type was specified, and the DeclSpec is invalid
// in this case.
ParsedType Type;
ObjCDeclSpec DeclSpec;
/// ArgAttrs - Attribute list for this argument.
ParsedAttributesView ArgAttrs;
};
Decl *ActOnMethodDeclaration(
Scope *S,
SourceLocation BeginLoc, // location of the + or -.
SourceLocation EndLoc, // location of the ; or {.
tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType,
ArrayRef<SourceLocation> SelectorLocs, Selector Sel,
// optional arguments. The number of types/arguments is obtained
// from the Sel.getNumArgs().
ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo,
unsigned CNumArgs, // c-style args
const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind,
bool isVariadic, bool MethodDefinition);
ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel,
const ObjCObjectPointerType *OPT,
bool IsInstance);
ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty,
bool IsInstance);
bool CheckARCMethodDecl(ObjCMethodDecl *method);
bool inferObjCARCLifetime(ValueDecl *decl);
void deduceOpenCLAddressSpace(ValueDecl *decl);
ExprResult
HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
Expr *BaseExpr,
SourceLocation OpLoc,
DeclarationName MemberName,
SourceLocation MemberLoc,
SourceLocation SuperLoc, QualType SuperType,
bool Super);
ExprResult
ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
IdentifierInfo &propertyName,
SourceLocation receiverNameLoc,
SourceLocation propertyNameLoc);
ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc);
/// Describes the kind of message expression indicated by a message
/// send that starts with an identifier.
enum ObjCMessageKind {
/// The message is sent to 'super'.
ObjCSuperMessage,
/// The message is an instance message.
ObjCInstanceMessage,
/// The message is a class message, and the identifier is a type
/// name.
ObjCClassMessage
};
ObjCMessageKind getObjCMessageKind(Scope *S,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsSuper,
bool HasTrailingDot,
ParsedType &ReceiverType);
ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildClassMessageImplicit(QualType ReceiverType,
bool isSuperReceiver,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnClassMessage(Scope *S,
ParsedType Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildInstanceMessage(Expr *Receiver,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildInstanceMessageImplicit(Expr *Receiver,
QualType ReceiverType,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnInstanceMessage(Scope *S,
Expr *Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
TypeSourceInfo *TSInfo,
Expr *SubExpr);
ExprResult ActOnObjCBridgedCast(Scope *S,
SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
ParsedType Type,
SourceLocation RParenLoc,
Expr *SubExpr);
void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr);
void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr);
bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr,
CastKind &Kind);
bool checkObjCBridgeRelatedComponents(SourceLocation Loc,
QualType DestType, QualType SrcType,
ObjCInterfaceDecl *&RelatedClass,
ObjCMethodDecl *&ClassMethod,
ObjCMethodDecl *&InstanceMethod,
TypedefNameDecl *&TDNDecl,
bool CfToNs, bool Diagnose = true);
bool CheckObjCBridgeRelatedConversions(SourceLocation Loc,
QualType DestType, QualType SrcType,
Expr *&SrcExpr, bool Diagnose = true);
bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr,
bool Diagnose = true);
bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall);
/// Check whether the given new method is a valid override of the
/// given overridden method, and set any properties that should be inherited.
void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
const ObjCMethodDecl *Overridden);
/// Describes the compatibility of a result type with its method.
enum ResultTypeCompatibilityKind {
RTC_Compatible,
RTC_Incompatible,
RTC_Unknown
};
void CheckObjCMethodDirectOverrides(ObjCMethodDecl *method,
ObjCMethodDecl *overridden);
void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
ObjCInterfaceDecl *CurrentClass,
ResultTypeCompatibilityKind RTC);
enum PragmaOptionsAlignKind {
POAK_Native, // #pragma options align=native
POAK_Natural, // #pragma options align=natural
POAK_Packed, // #pragma options align=packed
POAK_Power, // #pragma options align=power
POAK_Mac68k, // #pragma options align=mac68k
POAK_Reset // #pragma options align=reset
};
/// ActOnPragmaClangSection - Called on well formed \#pragma clang section
void ActOnPragmaClangSection(SourceLocation PragmaLoc,
PragmaClangSectionAction Action,
PragmaClangSectionKind SecKind, StringRef SecName);
/// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align.
void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
SourceLocation PragmaLoc);
/// ActOnPragmaPack - Called on well formed \#pragma pack(...).
void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action,
StringRef SlotLabel, Expr *Alignment);
enum class PragmaPackDiagnoseKind {
NonDefaultStateAtInclude,
ChangedStateAtExit
};
void DiagnoseNonDefaultPragmaPack(PragmaPackDiagnoseKind Kind,
SourceLocation IncludeLoc);
void DiagnoseUnterminatedPragmaPack();
/// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off].
void ActOnPragmaMSStruct(PragmaMSStructKind Kind);
/// ActOnPragmaMSComment - Called on well formed
/// \#pragma comment(kind, "arg").
void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind,
StringRef Arg);
/// ActOnPragmaMSPointersToMembers - called on well formed \#pragma
/// pointers_to_members(representation method[, general purpose
/// representation]).
void ActOnPragmaMSPointersToMembers(
LangOptions::PragmaMSPointersToMembersKind Kind,
SourceLocation PragmaLoc);
/// Called on well formed \#pragma vtordisp().
void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action,
SourceLocation PragmaLoc,
MSVtorDispMode Value);
enum PragmaSectionKind {
PSK_DataSeg,
PSK_BSSSeg,
PSK_ConstSeg,
PSK_CodeSeg,
};
bool UnifySection(StringRef SectionName,
int SectionFlags,
DeclaratorDecl *TheDecl);
bool UnifySection(StringRef SectionName,
int SectionFlags,
SourceLocation PragmaSectionLocation);
/// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg.
void ActOnPragmaMSSeg(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
StringLiteral *SegmentName,
llvm::StringRef PragmaName);
/// Called on well formed \#pragma section().
void ActOnPragmaMSSection(SourceLocation PragmaLocation,
int SectionFlags, StringLiteral *SegmentName);
/// Called on well-formed \#pragma init_seg().
void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation,
StringLiteral *SegmentName);
/// Called on #pragma clang __debug dump II
void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II);
/// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch
void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name,
StringRef Value);
/// ActOnPragmaUnused - Called on well-formed '\#pragma unused'.
void ActOnPragmaUnused(const Token &Identifier,
Scope *curScope,
SourceLocation PragmaLoc);
/// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... .
void ActOnPragmaVisibility(const IdentifierInfo* VisType,
SourceLocation PragmaLoc);
NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
SourceLocation Loc);
void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W);
/// ActOnPragmaWeakID - Called on well formed \#pragma weak ident.
void ActOnPragmaWeakID(IdentifierInfo* WeakName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc);
/// ActOnPragmaRedefineExtname - Called on well formed
/// \#pragma redefine_extname oldname newname.
void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident.
void ActOnPragmaWeakAlias(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaFPContract - Called on well formed
/// \#pragma {STDC,OPENCL} FP_CONTRACT and
/// \#pragma clang fp contract
void ActOnPragmaFPContract(LangOptions::FPContractModeKind FPC);
/// ActOnPragmaFenvAccess - Called on well formed
/// \#pragma STDC FENV_ACCESS
void ActOnPragmaFEnvAccess(LangOptions::FEnvAccessModeKind FPC);
/// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to
/// a the record decl, to handle '\#pragma pack' and '\#pragma options align'.
void AddAlignmentAttributesForRecord(RecordDecl *RD);
/// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record.
void AddMsStructLayoutForRecord(RecordDecl *RD);
/// FreePackedContext - Deallocate and null out PackContext.
void FreePackedContext();
/// PushNamespaceVisibilityAttr - Note that we've entered a
/// namespace with a visibility attribute.
void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr,
SourceLocation Loc);
/// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used,
/// add an appropriate visibility attribute.
void AddPushedVisibilityAttribute(Decl *RD);
/// PopPragmaVisibility - Pop the top element of the visibility stack; used
/// for '\#pragma GCC visibility' and visibility attributes on namespaces.
void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc);
/// FreeVisContext - Deallocate and null out VisContext.
void FreeVisContext();
/// AddCFAuditedAttribute - Check whether we're currently within
/// '\#pragma clang arc_cf_code_audited' and, if so, consider adding
/// the appropriate attribute.
void AddCFAuditedAttribute(Decl *D);
void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute,
SourceLocation PragmaLoc,
attr::ParsedSubjectMatchRuleSet Rules);
void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Called on well-formed '\#pragma clang attribute pop'.
void ActOnPragmaAttributePop(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Adds the attributes that have been specified using the
/// '\#pragma clang attribute push' directives to the given declaration.
void AddPragmaAttributes(Scope *S, Decl *D);
void DiagnoseUnterminatedPragmaAttribute();
/// Called on well formed \#pragma clang optimize.
void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc);
/// Get the location for the currently active "\#pragma clang optimize
/// off". If this location is invalid, then the state of the pragma is "on".
SourceLocation getOptimizeOffPragmaLocation() const {
return OptimizeOffPragmaLocation;
}
/// Only called on function definitions; if there is a pragma in scope
/// with the effect of a range-based optnone, consider marking the function
/// with attribute optnone.
void AddRangeBasedOptnone(FunctionDecl *FD);
/// Adds the 'optnone' attribute to the function declaration if there
/// are no conflicts; Loc represents the location causing the 'optnone'
/// attribute to be added (usually because of a pragma).
void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc);
template <typename AttrType>
bool checkRangedIntegralArgument(Expr *E, const AttrType *TmpAttr,
ExprResult &Result);
template <typename AttrType>
void AddOneConstantValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E);
template <typename AttrType>
void AddOneConstantPowerTwoValueAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *E);
void AddIntelFPGABankBitsAttr(Decl *D, const AttributeCommonInfo &CI,
Expr **Exprs, unsigned Size);
/// AddAlignedAttr - Adds an aligned attribute to a particular declaration.
void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
bool IsPackExpansion);
void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, TypeSourceInfo *T,
bool IsPackExpansion);
/// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular
/// declaration.
void AddAssumeAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
Expr *OE);
/// AddAllocAlignAttr - Adds an alloc_align attribute to a particular
/// declaration.
void AddAllocAlignAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *ParamExpr);
/// AddAlignValueAttr - Adds an align_value attribute to a particular
/// declaration.
void AddAlignValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E);
/// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular
/// declaration.
void AddLaunchBoundsAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *MaxThreads, Expr *MinBlocks);
/// AddModeAttr - Adds a mode attribute to a particular declaration.
void AddModeAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Name,
bool InInstantiation = false);
void AddParameterABIAttr(Decl *D, const AttributeCommonInfo &CI,
ParameterABI ABI);
enum class RetainOwnershipKind {NS, CF, OS};
void AddXConsumedAttr(Decl *D, const AttributeCommonInfo &CI,
RetainOwnershipKind K, bool IsTemplateInstantiation);
/// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size
/// attribute to a particular declaration.
void addAMDGPUFlatWorkGroupSizeAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *Min, Expr *Max);
/// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a
/// particular declaration.
void addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *Min, Expr *Max);
bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type);
//===--------------------------------------------------------------------===//
// C++ Coroutines TS
//
bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc,
StringRef Keyword);
ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E);
StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
UnresolvedLookupExpr* Lookup);
ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E);
StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs);
bool buildCoroutineParameterMoves(SourceLocation Loc);
VarDecl *buildCoroutinePromise(SourceLocation Loc);
void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body);
ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc,
SourceLocation FuncLoc);
//===--------------------------------------------------------------------===//
// OpenCL extensions.
//
private:
std::string CurrOpenCLExtension;
/// Extensions required by an OpenCL type.
llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap;
/// Extensions required by an OpenCL declaration.
llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap;
public:
llvm::StringRef getCurrentOpenCLExtension() const {
return CurrOpenCLExtension;
}
/// Check if a function declaration \p FD associates with any
/// extensions present in OpenCLDeclExtMap and if so return the
/// extension(s) name(s).
std::string getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD);
/// Check if a function type \p FT associates with any
/// extensions present in OpenCLTypeExtMap and if so return the
/// extension(s) name(s).
std::string getOpenCLExtensionsFromTypeExtMap(FunctionType *FT);
/// Find an extension in an appropriate extension map and return its name
template<typename T, typename MapT>
std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map);
void setCurrentOpenCLExtension(llvm::StringRef Ext) {
CurrOpenCLExtension = Ext;
}
/// Set OpenCL extensions for a type which can only be used when these
/// OpenCL extensions are enabled. If \p Exts is empty, do nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts);
/// Set OpenCL extensions for a declaration which can only be
/// used when these OpenCL extensions are enabled. If \p Exts is empty, do
/// nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts);
/// Set current OpenCL extensions for a type which can only be used
/// when these OpenCL extensions are enabled. If current OpenCL extension is
/// empty, do nothing.
void setCurrentOpenCLExtensionForType(QualType T);
/// Set current OpenCL extensions for a declaration which
/// can only be used when these OpenCL extensions are enabled. If current
/// OpenCL extension is empty, do nothing.
void setCurrentOpenCLExtensionForDecl(Decl *FD);
bool isOpenCLDisabledDecl(Decl *FD);
/// Check if type \p T corresponding to declaration specifier \p DS
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T);
/// Check if declaration \p D used by expression \p E
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E);
//===--------------------------------------------------------------------===//
// OpenMP directives and clauses.
//
private:
void *VarDataSharingAttributesStack;
/// Number of nested '#pragma omp declare target' directives.
unsigned DeclareTargetNestingLevel = 0;
/// Initialization of data-sharing attributes stack.
void InitDataSharingAttributesStack();
void DestroyDataSharingAttributesStack();
ExprResult
VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind,
bool StrictlyPositive = true);
/// Returns OpenMP nesting level for current directive.
unsigned getOpenMPNestingLevel() const;
/// Adjusts the function scopes index for the target-based regions.
void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex,
unsigned Level) const;
/// Returns the number of scopes associated with the construct on the given
/// OpenMP level.
int getNumberOfConstructScopes(unsigned Level) const;
/// Push new OpenMP function region for non-capturing function.
void pushOpenMPFunctionRegion();
/// Pop OpenMP function region for non-capturing function.
void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI);
/// Check whether we're allowed to call Callee from the current function.
void checkOpenMPDeviceFunction(SourceLocation Loc, FunctionDecl *Callee,
bool CheckForDelayedContext = true);
/// Check whether we're allowed to call Callee from the current function.
void checkOpenMPHostFunction(SourceLocation Loc, FunctionDecl *Callee,
bool CheckCaller = true);
/// Check if the expression is allowed to be used in expressions for the
/// OpenMP devices.
void checkOpenMPDeviceExpr(const Expr *E);
/// Finishes analysis of the deferred functions calls that may be declared as
/// host/nohost during device/host compilation.
void finalizeOpenMPDelayedAnalysis();
/// Checks if a type or a declaration is disabled due to the owning extension
/// being disabled, and emits diagnostic messages if it is disabled.
/// \param D type or declaration to be checked.
/// \param DiagLoc source location for the diagnostic message.
/// \param DiagInfo information to be emitted for the diagnostic message.
/// \param SrcRange source range of the declaration.
/// \param Map maps type or declaration to the extensions.
/// \param Selector selects diagnostic message: 0 for type and 1 for
/// declaration.
/// \return true if the type or declaration is disabled.
template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT>
bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo,
MapT &Map, unsigned Selector = 0,
SourceRange SrcRange = SourceRange());
/// Marks all the functions that might be required for the currently active
/// OpenMP context.
void markOpenMPDeclareVariantFuncsReferenced(SourceLocation Loc,
FunctionDecl *Func,
bool MightBeOdrUse);
public:
/// Struct to store the context selectors info for declare variant directive.
using OMPCtxStringType = SmallString<8>;
using OMPCtxSelectorData =
OpenMPCtxSelectorData<SmallVector<OMPCtxStringType, 4>, ExprResult>;
/// Checks if the variant/multiversion functions are compatible.
bool areMultiversionVariantFunctionsCompatible(
const FunctionDecl *OldFD, const FunctionDecl *NewFD,
const PartialDiagnostic &NoProtoDiagID,
const PartialDiagnosticAt &NoteCausedDiagIDAt,
const PartialDiagnosticAt &NoSupportDiagIDAt,
const PartialDiagnosticAt &DiffDiagIDAt, bool TemplatesSupported,
bool ConstexprSupported, bool CLinkageMayDiffer);
/// Function tries to capture lambda's captured variables in the OpenMP region
/// before the original lambda is captured.
void tryCaptureOpenMPLambdas(ValueDecl *V);
/// Return true if the provided declaration \a VD should be captured by
/// reference.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
/// \param OpenMPCaptureLevel Capture level within an OpenMP construct.
bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level,
unsigned OpenMPCaptureLevel) const;
/// Check if the specified variable is used in one of the private
/// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP
/// constructs.
VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false,
unsigned StopAt = 0);
ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK,
ExprObjectKind OK, SourceLocation Loc);
/// If the current region is a loop-based region, mark the start of the loop
/// construct.
void startOpenMPLoop();
/// If the current region is a range loop-based region, mark the start of the
/// loop construct.
void startOpenMPCXXRangeFor();
/// Check if the specified variable is used in 'private' clause.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPPrivateDecl(const ValueDecl *D, unsigned Level) const;
/// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.)
/// for \p FD based on DSA for the provided corresponding captured declaration
/// \p D.
void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level);
/// Check if the specified variable is captured by 'target' directive.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level) const;
ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc,
Expr *Op);
/// Called on start of new data sharing attribute block.
void StartOpenMPDSABlock(OpenMPDirectiveKind K,
const DeclarationNameInfo &DirName, Scope *CurScope,
SourceLocation Loc);
/// Start analysis of clauses.
void StartOpenMPClause(OpenMPClauseKind K);
/// End analysis of clauses.
void EndOpenMPClause();
/// Called on end of data sharing attribute block.
void EndOpenMPDSABlock(Stmt *CurDirective);
/// Check if the current region is an OpenMP loop region and if it is,
/// mark loop control variable, used in \p Init for loop initialization, as
/// private by default.
/// \param Init First part of the for loop.
void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init);
// OpenMP directives and clauses.
/// Called on correct id-expression from the '#pragma omp
/// threadprivate'.
ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
OpenMPDirectiveKind Kind);
/// Called on well-formed '#pragma omp threadprivate'.
DeclGroupPtrTy ActOnOpenMPThreadprivateDirective(
SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Builds a new OpenMPThreadPrivateDecl and checks its correctness.
OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Called on well-formed '#pragma omp allocate'.
DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc,
ArrayRef<Expr *> VarList,
ArrayRef<OMPClause *> Clauses,
DeclContext *Owner = nullptr);
/// Called on well-formed '#pragma omp requires'.
DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc,
ArrayRef<OMPClause *> ClauseList);
/// Check restrictions on Requires directive
OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc,
ArrayRef<OMPClause *> Clauses);
/// Check if the specified type is allowed to be used in 'omp declare
/// reduction' construct.
QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name,
ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes,
AccessSpecifier AS, Decl *PrevDeclInScope = nullptr);
/// Initialize declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner);
/// Initialize declare reduction construct initializer.
/// \return omp_priv variable.
VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer,
VarDecl *OmpPrivParm);
/// Called at the end of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd(
Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid);
/// Check variable declaration in 'omp declare mapper' construct.
TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D);
/// Check if the specified type is allowed to be used in 'omp declare
/// mapper' construct.
QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare mapper'.
OMPDeclareMapperDecl *ActOnOpenMPDeclareMapperDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType,
SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS,
Decl *PrevDeclInScope = nullptr);
/// Build the mapper variable of '#pragma omp declare mapper'.
void ActOnOpenMPDeclareMapperDirectiveVarDecl(OMPDeclareMapperDecl *DMD,
Scope *S, QualType MapperType,
SourceLocation StartLoc,
DeclarationName VN);
/// Called at the end of '#pragma omp declare mapper'.
DeclGroupPtrTy
ActOnOpenMPDeclareMapperDirectiveEnd(OMPDeclareMapperDecl *D, Scope *S,
ArrayRef<OMPClause *> ClauseList);
/// Called on the start of target region i.e. '#pragma omp declare target'.
bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc);
/// Called at the end of target region i.e. '#pragme omp end declare target'.
void ActOnFinishOpenMPDeclareTargetDirective();
/// Searches for the provided declaration name for OpenMP declare target
/// directive.
NamedDecl *
lookupOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
NamedDeclSetType &SameDirectiveDecls);
/// Called on correct id-expression from the '#pragma omp declare target'.
void ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc,
OMPDeclareTargetDeclAttr::MapTypeTy MT,
OMPDeclareTargetDeclAttr::DevTypeTy DT);
/// Check declaration inside target region.
void
checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
SourceLocation IdLoc = SourceLocation());
/// Return true inside OpenMP declare target region.
bool isInOpenMPDeclareTargetContext() const {
return DeclareTargetNestingLevel > 0;
}
/// Return true inside OpenMP target region.
bool isInOpenMPTargetExecutionDirective() const;
/// Return the number of captured regions created for an OpenMP directive.
static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind);
/// Initialization of captured region for OpenMP region.
void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope);
/// End of OpenMP region.
///
/// \param S Statement associated with the current OpenMP region.
/// \param Clauses List of clauses for the current OpenMP region.
///
/// \returns Statement for finished OpenMP region.
StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses);
StmtResult ActOnOpenMPExecutableDirective(
OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName,
OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
using VarsWithInheritedDSAType =
llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>;
/// Called on well-formed '\#pragma omp simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp sections' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp section' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp single' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp master' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp critical' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName,
ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel for' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel sections' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp task' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskyield'.
StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp barrier'.
StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskwait'.
StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskgroup'.
StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp flush'.
StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp ordered' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp atomic' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target data' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target enter data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target exit data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target parallel' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp cancellation point'.
StmtResult
ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp cancel'.
StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp taskloop' after parsing of the
/// associated statement.
StmtResult
ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp master taskloop' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp master taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPMasterTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master taskloop' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master taskloop simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target update'.
StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp distribute parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target simd' after parsing of
/// the associated statement.
StmtResult
ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute simd' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target teams distribute' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for
/// simd' after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Checks correctness of linear modifiers.
bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind,
SourceLocation LinLoc);
/// Checks that the specified declaration matches requirements for the linear
/// decls.
bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc,
OpenMPLinearClauseKind LinKind, QualType Type);
/// Called on well-formed '\#pragma omp declare simd' after parsing of
/// the associated method/function.
DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective(
DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS,
Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds,
ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears,
ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR);
/// Checks '\#pragma omp declare variant' variant function and original
/// functions after parsing of the associated method/function.
/// \param DG Function declaration to which declare variant directive is
/// applied to.
/// \param VariantRef Expression that references the variant function, which
/// must be used instead of the original one, specified in \p DG.
/// \returns None, if the function/variant function are not compatible with
/// the pragma, pair of original function/variant ref expression otherwise.
Optional<std::pair<FunctionDecl *, Expr *>> checkOpenMPDeclareVariantFunction(
DeclGroupPtrTy DG, Expr *VariantRef, SourceRange SR);
/// Called on well-formed '\#pragma omp declare variant' after parsing of
/// the associated method/function.
/// \param FD Function declaration to which declare variant directive is
/// applied to.
/// \param VariantRef Expression that references the variant function, which
/// must be used instead of the original one, specified in \p DG.
/// \param Data Set of context-specific data for the specified context
/// selector.
void ActOnOpenMPDeclareVariantDirective(FunctionDecl *FD, Expr *VariantRef,
SourceRange SR,
ArrayRef<OMPCtxSelectorData> Data);
OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind,
Expr *Expr,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'allocator' clause.
OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'if' clause.
OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier,
Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation NameModifierLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'final' clause.
OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_threads' clause.
OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'safelen' clause.
OMPClause *ActOnOpenMPSafelenClause(Expr *Length,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simdlen' clause.
OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'collapse' clause.
OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'ordered' clause.
OMPClause *
ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc,
SourceLocation LParenLoc = SourceLocation(),
Expr *NumForLoops = nullptr);
/// Called on well-formed 'grainsize' clause.
OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_tasks' clause.
OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'hint' clause.
OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind,
unsigned Argument,
SourceLocation ArgumentLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'default' clause.
OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'proc_bind' clause.
OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSingleExprWithArgClause(
OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr,
SourceLocation StartLoc, SourceLocation LParenLoc,
ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc,
SourceLocation EndLoc);
/// Called on well-formed 'schedule' clause.
OMPClause *ActOnOpenMPScheduleClause(
OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2,
OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc,
SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nowait' clause.
OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'untied' clause.
OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'mergeable' clause.
OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'read' clause.
OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'write' clause.
OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'capture' clause.
OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'seq_cst' clause.
OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'threads' clause.
OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simd' clause.
OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nogroup' clause.
OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reverse_offload' clause.
OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dynamic_allocators' clause.
OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'atomic_default_mem_order' clause.
OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause(
OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPVarListClause(
OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr,
const OMPVarListLocTy &Locs, SourceLocation ColonLoc,
CXXScopeSpec &ReductionOrMapperIdScopeSpec,
DeclarationNameInfo &ReductionOrMapperId, OpenMPDependClauseKind DepKind,
OpenMPLinearClauseKind LinKind,
ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc, OpenMPMapClauseKind MapType,
bool IsMapTypeImplicit, SourceLocation DepLinMapLoc);
/// Called on well-formed 'allocate' clause.
OMPClause *
ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation ColonLoc,
SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'private' clause.
OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'firstprivate' clause.
OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'lastprivate' clause.
OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'shared' clause.
OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reduction' clause.
OMPClause *ActOnOpenMPReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'task_reduction' clause.
OMPClause *ActOnOpenMPTaskReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'in_reduction' clause.
OMPClause *ActOnOpenMPInReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'linear' clause.
OMPClause *
ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step,
SourceLocation StartLoc, SourceLocation LParenLoc,
OpenMPLinearClauseKind LinKind, SourceLocation LinLoc,
SourceLocation ColonLoc, SourceLocation EndLoc);
/// Called on well-formed 'aligned' clause.
OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList,
Expr *Alignment,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyin' clause.
OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyprivate' clause.
OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'flush' pseudo clause.
OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'depend' clause.
OMPClause *
ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc,
SourceLocation ColonLoc, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'device' clause.
OMPClause *ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'map' clause.
OMPClause *
ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId,
OpenMPMapClauseKind MapType, bool IsMapTypeImplicit,
SourceLocation MapLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'num_teams' clause.
OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'thread_limit' clause.
OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'priority' clause.
OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dist_schedule' clause.
OMPClause *ActOnOpenMPDistScheduleClause(
OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc,
SourceLocation CommaLoc, SourceLocation EndLoc);
/// Called on well-formed 'defaultmap' clause.
OMPClause *ActOnOpenMPDefaultmapClause(
OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc,
SourceLocation KindLoc, SourceLocation EndLoc);
/// Called on well-formed 'to' clause.
OMPClause *
ActOnOpenMPToClause(ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId,
const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'from' clause.
OMPClause *ActOnOpenMPFromClause(
ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'use_device_ptr' clause.
OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'is_device_ptr' clause.
OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// The kind of conversion being performed.
enum CheckedConversionKind {
/// An implicit conversion.
CCK_ImplicitConversion,
/// A C-style cast.
CCK_CStyleCast,
/// A functional-style cast.
CCK_FunctionalCast,
/// A cast other than a C-style cast.
CCK_OtherCast,
/// A conversion for an operand of a builtin overloaded operator.
CCK_ForBuiltinOverloadedOp
};
static bool isCast(CheckedConversionKind CCK) {
return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast ||
CCK == CCK_OtherCast;
}
/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit
/// cast. If there is already an implicit cast, merge into the existing one.
/// If isLvalue, the result of the cast is an lvalue.
ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK,
ExprValueKind VK = VK_RValue,
const CXXCastPath *BasePath = nullptr,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
/// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding
/// to the conversion from scalar type ScalarTy to the Boolean type.
static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy);
/// IgnoredValueConversions - Given that an expression's result is
/// syntactically ignored, perform any conversions that are
/// required.
ExprResult IgnoredValueConversions(Expr *E);
// UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts
// functions and arrays to their respective pointers (C99 6.3.2.1).
ExprResult UsualUnaryConversions(Expr *E);
/// CallExprUnaryConversions - a special case of an unary conversion
/// performed on a function designator of a call expression.
ExprResult CallExprUnaryConversions(Expr *E);
// DefaultFunctionArrayConversion - converts functions and arrays
// to their respective pointers (C99 6.3.2.1).
ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true);
// DefaultFunctionArrayLvalueConversion - converts functions and
// arrays to their respective pointers and performs the
// lvalue-to-rvalue conversion.
ExprResult DefaultFunctionArrayLvalueConversion(Expr *E,
bool Diagnose = true);
// DefaultLvalueConversion - performs lvalue-to-rvalue conversion on
// the operand. This is DefaultFunctionArrayLvalueConversion,
// except that it assumes the operand isn't of function or array
// type.
ExprResult DefaultLvalueConversion(Expr *E);
// DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that
// do not have a prototype. Integer promotions are performed on each
// argument, and arguments that have type float are promoted to double.
ExprResult DefaultArgumentPromotion(Expr *E);
/// If \p E is a prvalue denoting an unmaterialized temporary, materialize
/// it as an xvalue. In C++98, the result will still be a prvalue, because
/// we don't have xvalues there.
ExprResult TemporaryMaterializationConversion(Expr *E);
// Used for emitting the right warning by DefaultVariadicArgumentPromotion
enum VariadicCallType {
VariadicFunction,
VariadicBlock,
VariadicMethod,
VariadicConstructor,
VariadicDoesNotApply
};
VariadicCallType getVariadicCallType(FunctionDecl *FDecl,
const FunctionProtoType *Proto,
Expr *Fn);
// Used for determining in which context a type is allowed to be passed to a
// vararg function.
enum VarArgKind {
VAK_Valid,
VAK_ValidInCXX11,
VAK_Undefined,
VAK_MSVCUndefined,
VAK_Invalid
};
// Determines which VarArgKind fits an expression.
VarArgKind isValidVarArgType(const QualType &Ty);
/// Check to see if the given expression is a valid argument to a variadic
/// function, issuing a diagnostic if not.
void checkVariadicArgument(const Expr *E, VariadicCallType CT);
/// Check to see if a given expression could have '.c_str()' called on it.
bool hasCStrMethod(const Expr *E);
/// GatherArgumentsForCall - Collector argument expressions for various
/// form of call prototypes.
bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl,
const FunctionProtoType *Proto,
unsigned FirstParam, ArrayRef<Expr *> Args,
SmallVectorImpl<Expr *> &AllArgs,
VariadicCallType CallType = VariadicDoesNotApply,
bool AllowExplicit = false,
bool IsListInitialization = false);
// DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but
// will create a runtime trap if the resulting type is not a POD type.
ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
FunctionDecl *FDecl);
// UsualArithmeticConversions - performs the UsualUnaryConversions on it's
// operands and then handles various conversions that are common to binary
// operators (C99 6.3.1.8). If both operands aren't arithmetic, this
// routine returns the first non-arithmetic type found. The client is
// responsible for emitting appropriate error diagnostics.
QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS,
bool IsCompAssign = false);
/// AssignConvertType - All of the 'assignment' semantic checks return this
/// enum to indicate whether the assignment was allowed. These checks are
/// done for simple assignments, as well as initialization, return from
/// function, argument passing, etc. The query is phrased in terms of a
/// source and destination type.
enum AssignConvertType {
/// Compatible - the types are compatible according to the standard.
Compatible,
/// PointerToInt - The assignment converts a pointer to an int, which we
/// accept as an extension.
PointerToInt,
/// IntToPointer - The assignment converts an int to a pointer, which we
/// accept as an extension.
IntToPointer,
/// FunctionVoidPointer - The assignment is between a function pointer and
/// void*, which the standard doesn't allow, but we accept as an extension.
FunctionVoidPointer,
/// IncompatiblePointer - The assignment is between two pointers types that
/// are not compatible, but we accept them as an extension.
IncompatiblePointer,
/// IncompatiblePointerSign - The assignment is between two pointers types
/// which point to integers which have a different sign, but are otherwise
/// identical. This is a subset of the above, but broken out because it's by
/// far the most common case of incompatible pointers.
IncompatiblePointerSign,
/// CompatiblePointerDiscardsQualifiers - The assignment discards
/// c/v/r qualifiers, which we accept as an extension.
CompatiblePointerDiscardsQualifiers,
/// IncompatiblePointerDiscardsQualifiers - The assignment
/// discards qualifiers that we don't permit to be discarded,
/// like address spaces.
IncompatiblePointerDiscardsQualifiers,
/// IncompatibleNestedPointerAddressSpaceMismatch - The assignment
/// changes address spaces in nested pointer types which is not allowed.
/// For instance, converting __private int ** to __generic int ** is
/// illegal even though __private could be converted to __generic.
IncompatibleNestedPointerAddressSpaceMismatch,
/// IncompatibleNestedPointerQualifiers - The assignment is between two
/// nested pointer types, and the qualifiers other than the first two
/// levels differ e.g. char ** -> const char **, but we accept them as an
/// extension.
IncompatibleNestedPointerQualifiers,
/// IncompatibleVectors - The assignment is between two vector types that
/// have the same size, which we accept as an extension.
IncompatibleVectors,
/// IntToBlockPointer - The assignment converts an int to a block
/// pointer. We disallow this.
IntToBlockPointer,
/// IncompatibleBlockPointer - The assignment is between two block
/// pointers types that are not compatible.
IncompatibleBlockPointer,
/// IncompatibleObjCQualifiedId - The assignment is between a qualified
/// id type and something else (that is incompatible with it). For example,
/// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol.
IncompatibleObjCQualifiedId,
/// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an
/// object with __weak qualifier.
IncompatibleObjCWeakRef,
/// Incompatible - We reject this conversion outright, it is invalid to
/// represent it in the AST.
Incompatible
};
/// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the
/// assignment conversion type specified by ConvTy. This returns true if the
/// conversion was invalid or false if the conversion was accepted.
bool DiagnoseAssignmentResult(AssignConvertType ConvTy,
SourceLocation Loc,
QualType DstType, QualType SrcType,
Expr *SrcExpr, AssignmentAction Action,
bool *Complained = nullptr);
/// IsValueInFlagEnum - Determine if a value is allowed as part of a flag
/// enum. If AllowMask is true, then we also allow the complement of a valid
/// value, to be used as a mask.
bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val,
bool AllowMask) const;
/// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant
/// integer not in the range of enum values.
void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType,
Expr *SrcExpr);
/// CheckAssignmentConstraints - Perform type checking for assignment,
/// argument passing, variable initialization, and function return values.
/// C99 6.5.16.
AssignConvertType CheckAssignmentConstraints(SourceLocation Loc,
QualType LHSType,
QualType RHSType);
/// Check assignment constraints and optionally prepare for a conversion of
/// the RHS to the LHS type. The conversion is prepared for if ConvertRHS
/// is true.
AssignConvertType CheckAssignmentConstraints(QualType LHSType,
ExprResult &RHS,
CastKind &Kind,
bool ConvertRHS = true);
/// Check assignment constraints for an assignment of RHS to LHSType.
///
/// \param LHSType The destination type for the assignment.
/// \param RHS The source expression for the assignment.
/// \param Diagnose If \c true, diagnostics may be produced when checking
/// for assignability. If a diagnostic is produced, \p RHS will be
/// set to ExprError(). Note that this function may still return
/// without producing a diagnostic, even for an invalid assignment.
/// \param DiagnoseCFAudited If \c true, the target is a function parameter
/// in an audited Core Foundation API and does not need to be checked
/// for ARC retain issues.
/// \param ConvertRHS If \c true, \p RHS will be updated to model the
/// conversions necessary to perform the assignment. If \c false,
/// \p Diagnose must also be \c false.
AssignConvertType CheckSingleAssignmentConstraints(
QualType LHSType, ExprResult &RHS, bool Diagnose = true,
bool DiagnoseCFAudited = false, bool ConvertRHS = true);
// If the lhs type is a transparent union, check whether we
// can initialize the transparent union with the given expression.
AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType,
ExprResult &RHS);
bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType);
bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit = false);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit,
ImplicitConversionSequence& ICS);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const ImplicitConversionSequence& ICS,
AssignmentAction Action,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const StandardConversionSequence& SCS,
AssignmentAction Action,
CheckedConversionKind CCK);
ExprResult PerformQualificationConversion(
Expr *E, QualType Ty, ExprValueKind VK = VK_RValue,
CheckedConversionKind CCK = CCK_ImplicitConversion);
/// the following "Check" methods will return a valid/converted QualType
/// or a null QualType (indicating an error diagnostic was issued).
/// type checking binary operators (subroutines of CreateBuiltinBinOp).
QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType CheckPointerToMemberOperands( // C++ 5.5
ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK,
SourceLocation OpLoc, bool isIndirect);
QualType CheckMultiplyDivideOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign,
bool IsDivide);
QualType CheckRemainderOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
bool IsCompAssign = false);
QualType CheckAdditionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr);
QualType CheckSubtractionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
QualType* CompLHSTy = nullptr);
QualType CheckShiftOperands( // C99 6.5.7
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, bool IsCompAssign = false);
void CheckPtrComparisonWithNullChar(ExprResult &E, ExprResult &NullE);
QualType CheckCompareOperands( // C99 6.5.8/9
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckBitwiseOperands( // C99 6.5.[10...12]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckLogicalOperands( // C99 6.5.[13,14]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
// CheckAssignmentOperands is used for both simple and compound assignment.
// For simple assignment, pass both expressions and a null converted type.
// For compound assignment, pass both expressions and the converted type.
QualType CheckAssignmentOperands( // C99 6.5.16.[1,2]
Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType);
ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opcode, Expr *Op);
ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opcode,
Expr *LHS, Expr *RHS);
ExprResult checkPseudoObjectRValue(Expr *E);
Expr *recreateSyntacticForm(PseudoObjectExpr *E);
QualType CheckConditionalOperands( // C99 6.5.15
ExprResult &Cond, ExprResult &LHS, ExprResult &RHS,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc);
QualType CXXCheckConditionalOperands( // C++ 5.16
ExprResult &cond, ExprResult &lhs, ExprResult &rhs,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc);
QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2,
bool ConvertArgs = true);
QualType FindCompositePointerType(SourceLocation Loc,
ExprResult &E1, ExprResult &E2,
bool ConvertArgs = true) {
Expr *E1Tmp = E1.get(), *E2Tmp = E2.get();
QualType Composite =
FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs);
E1 = E1Tmp;
E2 = E2Tmp;
return Composite;
}
QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS,
SourceLocation QuestionLoc);
bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr,
SourceLocation QuestionLoc);
void DiagnoseAlwaysNonNullPointer(Expr *E,
Expr::NullPointerConstantKind NullType,
bool IsEqual, SourceRange Range);
/// type checking for vector binary operators.
QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign,
bool AllowBothBool, bool AllowBoolConversion);
QualType GetSignedVectorType(QualType V);
QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc);
bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType);
bool isLaxVectorConversion(QualType srcType, QualType destType);
/// type checking declaration initializers (C99 6.7.8)
bool CheckForConstantInitializer(Expr *e, QualType t);
// type checking C++ declaration initializers (C++ [dcl.init]).
/// ReferenceCompareResult - Expresses the result of comparing two
/// types (cv1 T1 and cv2 T2) to determine their compatibility for the
/// purposes of initialization by reference (C++ [dcl.init.ref]p4).
enum ReferenceCompareResult {
/// Ref_Incompatible - The two types are incompatible, so direct
/// reference binding is not possible.
Ref_Incompatible = 0,
/// Ref_Related - The two types are reference-related, which means
/// that their unqualified forms (T1 and T2) are either the same
/// or T1 is a base class of T2.
Ref_Related,
/// Ref_Compatible - The two types are reference-compatible.
Ref_Compatible
};
ReferenceCompareResult
CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2,
bool &DerivedToBase, bool &ObjCConversion,
bool &ObjCLifetimeConversion,
bool &FunctionConversion);
ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType,
Expr *CastExpr, CastKind &CastKind,
ExprValueKind &VK, CXXCastPath &Path);
/// Force an expression with unknown-type to an expression of the
/// given type.
ExprResult forceUnknownAnyToType(Expr *E, QualType ToType);
/// Type-check an expression that's being passed to an
/// __unknown_anytype parameter.
ExprResult checkUnknownAnyArg(SourceLocation callLoc,
Expr *result, QualType ¶mType);
// CheckVectorCast - check type constraints for vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size.
// returns true if the cast is invalid
bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty,
CastKind &Kind);
/// Prepare `SplattedExpr` for a vector splat operation, adding
/// implicit casts if necessary.
ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr);
// CheckExtVectorCast - check type constraints for extended vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size,
// or vectors and the element type of that vector.
// returns the cast expr
ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr,
CastKind &Kind);
ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type,
SourceLocation LParenLoc,
Expr *CastExpr,
SourceLocation RParenLoc);
enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error };
/// Checks for invalid conversions and casts between
/// retainable pointers and other pointer kinds for ARC and Weak.
ARCConversionResult CheckObjCConversion(SourceRange castRange,
QualType castType, Expr *&op,
CheckedConversionKind CCK,
bool Diagnose = true,
bool DiagnoseCFAudited = false,
BinaryOperatorKind Opc = BO_PtrMemD
);
Expr *stripARCUnbridgedCast(Expr *e);
void diagnoseARCUnbridgedCast(Expr *e);
bool CheckObjCARCUnavailableWeakConversion(QualType castType,
QualType ExprType);
/// checkRetainCycles - Check whether an Objective-C message send
/// might create an obvious retain cycle.
void checkRetainCycles(ObjCMessageExpr *msg);
void checkRetainCycles(Expr *receiver, Expr *argument);
void checkRetainCycles(VarDecl *Var, Expr *Init);
/// checkUnsafeAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained type.
bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS);
/// checkUnsafeExprAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained expression.
void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS);
/// CheckMessageArgumentTypes - Check types in an Obj-C message send.
/// \param Method - May be null.
/// \param [out] ReturnType - The return type of the send.
/// \return true iff there were any incompatible types.
bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType,
MultiExprArg Args, Selector Sel,
ArrayRef<SourceLocation> SelectorLocs,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage, SourceLocation lbrac,
SourceLocation rbrac, SourceRange RecRange,
QualType &ReturnType, ExprValueKind &VK);
/// Determine the result of a message send expression based on
/// the type of the receiver, the method expected to receive the message,
/// and the form of the message send.
QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage);
/// If the given expression involves a message send to a method
/// with a related result type, emit a note describing what happened.
void EmitRelatedResultTypeNote(const Expr *E);
/// Given that we had incompatible pointer types in a return
/// statement, check whether we're in a method with a related result
/// type, and if so, emit a note describing what happened.
void EmitRelatedResultTypeNoteForReturn(QualType destType);
class ConditionResult {
Decl *ConditionVar;
FullExprArg Condition;
bool Invalid;
bool HasKnownValue;
bool KnownValue;
friend class Sema;
ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition,
bool IsConstexpr)
: ConditionVar(ConditionVar), Condition(Condition), Invalid(false),
HasKnownValue(IsConstexpr && Condition.get() &&
!Condition.get()->isValueDependent()),
KnownValue(HasKnownValue &&
!!Condition.get()->EvaluateKnownConstInt(S.Context)) {}
explicit ConditionResult(bool Invalid)
: ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid),
HasKnownValue(false), KnownValue(false) {}
public:
ConditionResult() : ConditionResult(false) {}
bool isInvalid() const { return Invalid; }
std::pair<VarDecl *, Expr *> get() const {
return std::make_pair(cast_or_null<VarDecl>(ConditionVar),
Condition.get());
}
llvm::Optional<bool> getKnownValue() const {
if (!HasKnownValue)
return None;
return KnownValue;
}
};
static ConditionResult ConditionError() { return ConditionResult(true); }
enum class ConditionKind {
Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'.
ConstexprIf, ///< A constant boolean condition from 'if constexpr'.
Switch ///< An integral condition for a 'switch' statement.
};
ConditionResult ActOnCondition(Scope *S, SourceLocation Loc,
Expr *SubExpr, ConditionKind CK);
ConditionResult ActOnConditionVariable(Decl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D);
ExprResult CheckConditionVariable(VarDecl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond);
/// CheckBooleanCondition - Diagnose problems involving the use of
/// the given expression as a boolean condition (e.g. in an if
/// statement). Also performs the standard function and array
/// decays, possibly changing the input variable.
///
/// \param Loc - A location associated with the condition, e.g. the
/// 'if' keyword.
/// \return true iff there were any errors
ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E,
bool IsConstexpr = false);
/// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression
/// found in an explicit(bool) specifier.
ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E);
/// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier.
/// Returns true if the explicit specifier is now resolved.
bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec);
/// DiagnoseAssignmentAsCondition - Given that an expression is
/// being used as a boolean condition, warn if it's an assignment.
void DiagnoseAssignmentAsCondition(Expr *E);
/// Redundant parentheses over an equality comparison can indicate
/// that the user intended an assignment used as condition.
void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE);
/// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid.
ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false);
/// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have
/// the specified width and sign. If an overflow occurs, detect it and emit
/// the specified diagnostic.
void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal,
unsigned NewWidth, bool NewSign,
SourceLocation Loc, unsigned DiagID);
/// Checks that the Objective-C declaration is declared in the global scope.
/// Emits an error and marks the declaration as invalid if it's not declared
/// in the global scope.
bool CheckObjCDeclScope(Decl *D);
/// Abstract base class used for diagnosing integer constant
/// expression violations.
class VerifyICEDiagnoser {
public:
bool Suppress;
VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { }
virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0;
virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR);
virtual ~VerifyICEDiagnoser() { }
};
/// VerifyIntegerConstantExpression - Verifies that an expression is an ICE,
/// and reports the appropriate diagnostics. Returns false on success.
/// Can optionally return the value of the expression.
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
VerifyICEDiagnoser &Diagnoser,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
unsigned DiagID,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E,
llvm::APSInt *Result = nullptr);
/// VerifyBitField - verifies that a bit field expression is an ICE and has
/// the correct width, and that the field type is valid.
/// Returns false on success.
/// Can optionally return whether the bit-field is of width 0
ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName,
QualType FieldTy, bool IsMsStruct,
Expr *BitWidth, bool *ZeroWidth = nullptr);
private:
unsigned ForceCUDAHostDeviceDepth = 0;
public:
/// Increments our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. So long as this count is greater
/// than zero, all functions encountered will be __host__ __device__.
void PushForceCUDAHostDevice();
/// Decrements our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. Returns false if the count is 0
/// before incrementing, so you can emit an error.
bool PopForceCUDAHostDevice();
/// Diagnostics that are emitted only if we discover that the given function
/// must be codegen'ed. Because handling these correctly adds overhead to
/// compilation, this is currently only enabled for CUDA compilations.
llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>,
std::vector<PartialDiagnosticAt>>
DeviceDeferredDiags;
/// A pair of a canonical FunctionDecl and a SourceLocation. When used as the
/// key in a hashtable, both the FD and location are hashed.
struct FunctionDeclAndLoc {
CanonicalDeclPtr<FunctionDecl> FD;
SourceLocation Loc;
};
/// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a
/// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the
/// same deferred diag twice.
llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags;
/// An inverse call graph, mapping known-emitted functions to one of their
/// known-emitted callers (plus the location of the call).
///
/// Functions that we can tell a priori must be emitted aren't added to this
/// map.
llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>,
/* Caller = */ FunctionDeclAndLoc>
DeviceKnownEmittedFns;
/// A partial call graph maintained during CUDA/OpenMP device code compilation
/// to support deferred diagnostics.
///
/// Functions are only added here if, at the time they're considered, they are
/// not known-emitted. As soon as we discover that a function is
/// known-emitted, we remove it and everything it transitively calls from this
/// set and add those functions to DeviceKnownEmittedFns.
llvm::DenseMap</* Caller = */ CanonicalDeclPtr<FunctionDecl>,
/* Callees = */ llvm::MapVector<CanonicalDeclPtr<FunctionDecl>,
SourceLocation>>
DeviceCallGraph;
/// Diagnostic builder for CUDA/OpenMP devices errors which may or may not be
/// deferred.
///
/// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch)
/// which are not allowed to appear inside __device__ functions and are
/// allowed to appear in __host__ __device__ functions only if the host+device
/// function is never codegen'ed.
///
/// To handle this, we use the notion of "deferred diagnostics", where we
/// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed.
///
/// This class lets you emit either a regular diagnostic, a deferred
/// diagnostic, or no diagnostic at all, according to an argument you pass to
/// its constructor, thus simplifying the process of creating these "maybe
/// deferred" diagnostics.
class DeviceDiagBuilder {
public:
enum Kind {
/// Emit no diagnostics.
K_Nop,
/// Emit the diagnostic immediately (i.e., behave like Sema::Diag()).
K_Immediate,
/// Emit the diagnostic immediately, and, if it's a warning or error, also
/// emit a call stack showing how this function can be reached by an a
/// priori known-emitted function.
K_ImmediateWithCallStack,
/// Create a deferred diagnostic, which is emitted only if the function
/// it's attached to is codegen'ed. Also emit a call stack as with
/// K_ImmediateWithCallStack.
K_Deferred
};
DeviceDiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID,
FunctionDecl *Fn, Sema &S);
DeviceDiagBuilder(DeviceDiagBuilder &&D);
DeviceDiagBuilder(const DeviceDiagBuilder &) = default;
~DeviceDiagBuilder();
/// Convertible to bool: True if we immediately emitted an error, false if
/// we didn't emit an error or we created a deferred error.
///
/// Example usage:
///
/// if (DeviceDiagBuilder(...) << foo << bar)
/// return ExprError();
///
/// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably
/// want to use these instead of creating a DeviceDiagBuilder yourself.
operator bool() const { return ImmediateDiag.hasValue(); }
template <typename T>
friend const DeviceDiagBuilder &operator<<(const DeviceDiagBuilder &Diag,
const T &Value) {
if (Diag.ImmediateDiag.hasValue())
*Diag.ImmediateDiag << Value;
else if (Diag.PartialDiagId.hasValue())
Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second
<< Value;
return Diag;
}
private:
Sema &S;
SourceLocation Loc;
unsigned DiagID;
FunctionDecl *Fn;
bool ShowCallStack;
// Invariant: At most one of these Optionals has a value.
// FIXME: Switch these to a Variant once that exists.
llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag;
llvm::Optional<unsigned> PartialDiagId;
};
/// Indicate that this function (and thus everything it transtively calls)
/// will be codegen'ed, and emit any deferred diagnostics on this function and
/// its (transitive) callees.
void markKnownEmitted(
Sema &S, FunctionDecl *OrigCaller, FunctionDecl *OrigCallee,
SourceLocation OrigLoc,
const llvm::function_ref<bool(Sema &, FunctionDecl *)> IsKnownEmitted);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current context
/// is "used as device code".
///
/// - If CurContext is a __host__ function, does not emit any diagnostics.
/// - If CurContext is a __device__ or __global__ function, emits the
/// diagnostics immediately.
/// - If CurContext is a __host__ __device__ function and we are compiling for
/// the device, creates a diagnostic which is emitted if and when we realize
/// that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in CUDA device code.
/// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget())
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current context
/// is "used as host code".
///
/// Same as CUDADiagIfDeviceCode, with "host" and "device" switched.
DeviceDiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the device, emits the diagnostics immediately.
/// - If CurContext is a non-`declare target` function and we are compiling
/// for the device, creates a diagnostic which is emitted if and when we
/// realize that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current
/// context is "used as host code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the host, emits the diagnostics immediately.
/// - If CurContext is a non-host function, just ignore it.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPHostode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder diagIfOpenMPHostCode(SourceLocation Loc, unsigned DiagID);
DeviceDiagBuilder targetDiag(SourceLocation Loc, unsigned DiagID);
enum CUDAFunctionTarget {
CFT_Device,
CFT_Global,
CFT_Host,
CFT_HostDevice,
CFT_InvalidTarget
};
/// Determines whether the given function is a CUDA device/host/kernel/etc.
/// function.
///
/// Use this rather than examining the function's attributes yourself -- you
/// will get it wrong. Returns CFT_Host if D is null.
CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D,
bool IgnoreImplicitHDAttr = false);
CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs);
/// Gets the CUDA target for the current context.
CUDAFunctionTarget CurrentCUDATarget() {
return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext));
}
// CUDA function call preference. Must be ordered numerically from
// worst to best.
enum CUDAFunctionPreference {
CFP_Never, // Invalid caller/callee combination.
CFP_WrongSide, // Calls from host-device to host or device
// function that do not match current compilation
// mode.
CFP_HostDevice, // Any calls to host/device functions.
CFP_SameSide, // Calls from host-device to host or device
// function matching current compilation mode.
CFP_Native, // host-to-host or device-to-device calls.
};
/// Identifies relative preference of a given Caller/Callee
/// combination, based on their host/device attributes.
/// \param Caller function which needs address of \p Callee.
/// nullptr in case of global context.
/// \param Callee target function
///
/// \returns preference value for particular Caller/Callee combination.
CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller,
const FunctionDecl *Callee);
/// Determines whether Caller may invoke Callee, based on their CUDA
/// host/device attributes. Returns false if the call is not allowed.
///
/// Note: Will return true for CFP_WrongSide calls. These may appear in
/// semantically correct CUDA programs, but only if they're never codegen'ed.
bool IsAllowedCUDACall(const FunctionDecl *Caller,
const FunctionDecl *Callee) {
return IdentifyCUDAPreference(Caller, Callee) != CFP_Never;
}
/// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD,
/// depending on FD and the current compilation settings.
void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD,
const LookupResult &Previous);
public:
/// Check whether we're allowed to call Callee from the current context.
///
/// - If the call is never allowed in a semantically-correct program
/// (CFP_Never), emits an error and returns false.
///
/// - If the call is allowed in semantically-correct programs, but only if
/// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to
/// be emitted if and when the caller is codegen'ed, and returns true.
///
/// Will only create deferred diagnostics for a given SourceLocation once,
/// so you can safely call this multiple times without generating duplicate
/// deferred errors.
///
/// - Otherwise, returns true without emitting any diagnostics.
bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee);
/// Set __device__ or __host__ __device__ attributes on the given lambda
/// operator() method.
///
/// CUDA lambdas declared inside __device__ or __global__ functions inherit
/// the __device__ attribute. Similarly, lambdas inside __host__ __device__
/// functions become __host__ __device__ themselves.
void CUDASetLambdaAttrs(CXXMethodDecl *Method);
/// Finds a function in \p Matches with highest calling priority
/// from \p Caller context and erases all functions with lower
/// calling priority.
void EraseUnwantedCUDAMatches(
const FunctionDecl *Caller,
SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches);
/// Given a implicit special member, infer its CUDA target from the
/// calls it needs to make to underlying base/field special members.
/// \param ClassDecl the class for which the member is being created.
/// \param CSM the kind of special member.
/// \param MemberDecl the special member itself.
/// \param ConstRHS true if this is a copy operation with a const object on
/// its RHS.
/// \param Diagnose true if this call should emit diagnostics.
/// \return true if there was an error inferring.
/// The result of this call is implicit CUDA target attribute(s) attached to
/// the member declaration.
bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
CXXSpecialMember CSM,
CXXMethodDecl *MemberDecl,
bool ConstRHS,
bool Diagnose);
/// \return true if \p CD can be considered empty according to CUDA
/// (E.2.3.1 in CUDA 7.5 Programming guide).
bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD);
bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD);
// \brief Checks that initializers of \p Var satisfy CUDA restrictions. In
// case of error emits appropriate diagnostic and invalidates \p Var.
//
// \details CUDA allows only empty constructors as initializers for global
// variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all
// __shared__ variables whether they are local or not (they all are implicitly
// static in CUDA). One exception is that CUDA allows constant initializers
// for __constant__ and __device__ variables.
void checkAllowedCUDAInitializer(VarDecl *VD);
/// Check whether NewFD is a valid overload for CUDA. Emits
/// diagnostics and invalidates NewFD if not.
void checkCUDATargetOverload(FunctionDecl *NewFD,
const LookupResult &Previous);
/// Copies target attributes from the template TD to the function FD.
void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD);
/// Returns the name of the launch configuration function. This is the name
/// of the function that will be called to configure kernel call, with the
/// parameters specified via <<<>>>.
std::string getCudaConfigureFuncName() const;
/// \name Code completion
//@{
/// Describes the context in which code completion occurs.
enum ParserCompletionContext {
/// Code completion occurs at top-level or namespace context.
PCC_Namespace,
/// Code completion occurs within a class, struct, or union.
PCC_Class,
/// Code completion occurs within an Objective-C interface, protocol,
/// or category.
PCC_ObjCInterface,
/// Code completion occurs within an Objective-C implementation or
/// category implementation
PCC_ObjCImplementation,
/// Code completion occurs within the list of instance variables
/// in an Objective-C interface, protocol, category, or implementation.
PCC_ObjCInstanceVariableList,
/// Code completion occurs following one or more template
/// headers.
PCC_Template,
/// Code completion occurs following one or more template
/// headers within a class.
PCC_MemberTemplate,
/// Code completion occurs within an expression.
PCC_Expression,
/// Code completion occurs within a statement, which may
/// also be an expression or a declaration.
PCC_Statement,
/// Code completion occurs at the beginning of the
/// initialization statement (or expression) in a for loop.
PCC_ForInit,
/// Code completion occurs within the condition of an if,
/// while, switch, or for statement.
PCC_Condition,
/// Code completion occurs within the body of a function on a
/// recovery path, where we do not have a specific handle on our position
/// in the grammar.
PCC_RecoveryInFunction,
/// Code completion occurs where only a type is permitted.
PCC_Type,
/// Code completion occurs in a parenthesized expression, which
/// might also be a type cast.
PCC_ParenthesizedExpression,
/// Code completion occurs within a sequence of declaration
/// specifiers within a function, method, or block.
PCC_LocalDeclarationSpecifiers
};
void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path);
void CodeCompleteOrdinaryName(Scope *S,
ParserCompletionContext CompletionContext);
void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
bool AllowNonIdentifiers,
bool AllowNestedNameSpecifiers);
struct CodeCompleteExpressionData;
void CodeCompleteExpression(Scope *S,
const CodeCompleteExpressionData &Data);
void CodeCompleteExpression(Scope *S, QualType PreferredType,
bool IsParenthesized = false);
void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase,
SourceLocation OpLoc, bool IsArrow,
bool IsBaseExprStatement,
QualType PreferredType);
void CodeCompletePostfixExpression(Scope *S, ExprResult LHS,
QualType PreferredType);
void CodeCompleteTag(Scope *S, unsigned TagSpec);
void CodeCompleteTypeQualifiers(DeclSpec &DS);
void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D,
const VirtSpecifiers *VS = nullptr);
void CodeCompleteBracketDeclarator(Scope *S);
void CodeCompleteCase(Scope *S);
/// Reports signatures for a call to CodeCompleteConsumer and returns the
/// preferred type for the current argument. Returned type can be null.
QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type,
SourceLocation Loc,
ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl,
CXXScopeSpec SS,
ParsedType TemplateTypeTy,
ArrayRef<Expr *> ArgExprs,
IdentifierInfo *II,
SourceLocation OpenParLoc);
void CodeCompleteInitializer(Scope *S, Decl *D);
void CodeCompleteAfterIf(Scope *S);
void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext,
bool IsUsingDeclaration, QualType BaseType,
QualType PreferredType);
void CodeCompleteUsing(Scope *S);
void CodeCompleteUsingDirective(Scope *S);
void CodeCompleteNamespaceDecl(Scope *S);
void CodeCompleteNamespaceAliasDecl(Scope *S);
void CodeCompleteOperatorName(Scope *S);
void CodeCompleteConstructorInitializer(
Decl *Constructor,
ArrayRef<CXXCtorInitializer *> Initializers);
void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
bool AfterAmpersand);
void CodeCompleteObjCAtDirective(Scope *S);
void CodeCompleteObjCAtVisibility(Scope *S);
void CodeCompleteObjCAtStatement(Scope *S);
void CodeCompleteObjCAtExpression(Scope *S);
void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS);
void CodeCompleteObjCPropertyGetter(Scope *S);
void CodeCompleteObjCPropertySetter(Scope *S);
void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
bool IsParameter);
void CodeCompleteObjCMessageReceiver(Scope *S);
void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression);
void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
bool IsSuper = false);
void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
ObjCInterfaceDecl *Super = nullptr);
void CodeCompleteObjCForCollection(Scope *S,
DeclGroupPtrTy IterationVar);
void CodeCompleteObjCSelector(Scope *S,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCProtocolReferences(
ArrayRef<IdentifierLocPair> Protocols);
void CodeCompleteObjCProtocolDecl(Scope *S);
void CodeCompleteObjCInterfaceDecl(Scope *S);
void CodeCompleteObjCSuperclass(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationDecl(Scope *S);
void CodeCompleteObjCInterfaceCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCPropertyDefinition(Scope *S);
void CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
IdentifierInfo *PropertyName);
void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
ParsedType ReturnType);
void CodeCompleteObjCMethodDeclSelector(Scope *S,
bool IsInstanceMethod,
bool AtParameterName,
ParsedType ReturnType,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName,
SourceLocation ClassNameLoc,
bool IsBaseExprStatement);
void CodeCompletePreprocessorDirective(bool InConditional);
void CodeCompleteInPreprocessorConditionalExclusion(Scope *S);
void CodeCompletePreprocessorMacroName(bool IsDefinition);
void CodeCompletePreprocessorExpression();
void CodeCompletePreprocessorMacroArgument(Scope *S,
IdentifierInfo *Macro,
MacroInfo *MacroInfo,
unsigned Argument);
void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled);
void CodeCompleteNaturalLanguage();
void CodeCompleteAvailabilityPlatformName();
void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
CodeCompletionTUInfo &CCTUInfo,
SmallVectorImpl<CodeCompletionResult> &Results);
//@}
//===--------------------------------------------------------------------===//
// Extra semantic analysis beyond the C type system
public:
SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL,
unsigned ByteNo) const;
private:
void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
const ArraySubscriptExpr *ASE=nullptr,
bool AllowOnePastEnd=true, bool IndexNegated=false);
void CheckArrayAccess(const Expr *E);
// Used to grab the relevant information from a FormatAttr and a
// FunctionDeclaration.
struct FormatStringInfo {
unsigned FormatIdx;
unsigned FirstDataArg;
bool HasVAListArg;
};
static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
FormatStringInfo *FSI);
bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc,
ArrayRef<const Expr *> Args);
bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto);
void CheckConstructorCall(FunctionDecl *FDecl,
ArrayRef<const Expr *> Args,
const FunctionProtoType *Proto,
SourceLocation Loc);
void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
const Expr *ThisArg, ArrayRef<const Expr *> Args,
bool IsMemberFunction, SourceLocation Loc, SourceRange Range,
VariadicCallType CallType);
bool CheckObjCString(Expr *Arg);
ExprResult CheckOSLogFormatStringArg(Expr *Arg);
ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl,
unsigned BuiltinID, CallExpr *TheCall);
void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall);
bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
unsigned MaxWidth);
bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckBPFBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckIntelFPGABuiltinFunctionCall(unsigned BuiltinID, CallExpr *Call);
bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call);
bool SemaBuiltinUnorderedCompare(CallExpr *TheCall);
bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs);
bool SemaBuiltinVSX(CallExpr *TheCall);
bool SemaBuiltinOSLogFormat(CallExpr *TheCall);
public:
// Used by C++ template instantiation.
ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall);
ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
private:
bool SemaBuiltinPrefetch(CallExpr *TheCall);
bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall);
bool SemaBuiltinAssume(CallExpr *TheCall);
bool SemaBuiltinAssumeAligned(CallExpr *TheCall);
bool SemaBuiltinLongjmp(CallExpr *TheCall);
bool SemaBuiltinSetjmp(CallExpr *TheCall);
ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult);
ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult);
ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult,
AtomicExpr::AtomicOp Op);
ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult,
bool IsDelete);
bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
llvm::APSInt &Result);
bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low,
int High, bool RangeIsError = true);
bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum,
unsigned Multiple);
bool SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum);
bool SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum);
bool SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, int ArgNum);
bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
int ArgNum, unsigned ExpectedFieldNum,
bool AllowName);
bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall);
public:
enum FormatStringType {
FST_Scanf,
FST_Printf,
FST_NSString,
FST_Strftime,
FST_Strfmon,
FST_Kprintf,
FST_FreeBSDKPrintf,
FST_OSTrace,
FST_OSLog,
FST_Unknown
};
static FormatStringType GetFormatStringType(const FormatAttr *Format);
bool FormatStringHasSArg(const StringLiteral *FExpr);
static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx);
private:
bool CheckFormatArguments(const FormatAttr *Format,
ArrayRef<const Expr *> Args,
bool IsCXXMember,
VariadicCallType CallType,
SourceLocation Loc, SourceRange Range,
llvm::SmallBitVector &CheckedVarArgs);
bool CheckFormatArguments(ArrayRef<const Expr *> Args,
bool HasVAListArg, unsigned format_idx,
unsigned firstDataArg, FormatStringType Type,
VariadicCallType CallType,
SourceLocation Loc, SourceRange range,
llvm::SmallBitVector &CheckedVarArgs);
void CheckAbsoluteValueFunction(const CallExpr *Call,
const FunctionDecl *FDecl);
void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl);
void CheckMemaccessArguments(const CallExpr *Call,
unsigned BId,
IdentifierInfo *FnName);
void CheckStrlcpycatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckStrncatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
SourceLocation ReturnLoc,
bool isObjCMethod = false,
const AttrVec *Attrs = nullptr,
const FunctionDecl *FD = nullptr);
public:
void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS);
private:
void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation());
void CheckBoolLikeConversion(Expr *E, SourceLocation CC);
void CheckForIntOverflow(Expr *E);
void CheckUnsequencedOperations(Expr *E);
/// Perform semantic checks on a completed expression. This will either
/// be a full-expression or a default argument expression.
void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(),
bool IsConstexpr = false);
void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field,
Expr *Init);
/// Check if there is a field shadowing.
void CheckShadowInheritedFields(const SourceLocation &Loc,
DeclarationName FieldName,
const CXXRecordDecl *RD,
bool DeclIsField = true);
/// Check if the given expression contains 'break' or 'continue'
/// statement that produces control flow different from GCC.
void CheckBreakContinueBinding(Expr *E);
/// Check whether receiver is mutable ObjC container which
/// attempts to add itself into the container
void CheckObjCCircularContainer(ObjCMessageExpr *Message);
void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE);
void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc,
bool DeleteWasArrayForm);
public:
/// Register a magic integral constant to be used as a type tag.
void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind,
uint64_t MagicValue, QualType Type,
bool LayoutCompatible, bool MustBeNull);
struct TypeTagData {
TypeTagData() {}
TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) :
Type(Type), LayoutCompatible(LayoutCompatible),
MustBeNull(MustBeNull)
{}
QualType Type;
/// If true, \c Type should be compared with other expression's types for
/// layout-compatibility.
unsigned LayoutCompatible : 1;
unsigned MustBeNull : 1;
};
/// A pair of ArgumentKind identifier and magic value. This uniquely
/// identifies the magic value.
typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue;
private:
/// A map from magic value to type information.
std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>>
TypeTagForDatatypeMagicValues;
/// Peform checks on a call of a function with argument_with_type_tag
/// or pointer_with_type_tag attributes.
void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
const ArrayRef<const Expr *> ExprArgs,
SourceLocation CallSiteLoc);
/// Check if we are taking the address of a packed field
/// as this may be a problem if the pointer value is dereferenced.
void CheckAddressOfPackedMember(Expr *rhs);
/// The parser's current scope.
///
/// The parser maintains this state here.
Scope *CurScope;
mutable IdentifierInfo *Ident_super;
mutable IdentifierInfo *Ident___float128;
/// Nullability type specifiers.
IdentifierInfo *Ident__Nonnull = nullptr;
IdentifierInfo *Ident__Nullable = nullptr;
IdentifierInfo *Ident__Null_unspecified = nullptr;
IdentifierInfo *Ident_NSError = nullptr;
/// The handler for the FileChanged preprocessor events.
///
/// Used for diagnostics that implement custom semantic analysis for #include
/// directives, like -Wpragma-pack.
sema::SemaPPCallbacks *SemaPPCallbackHandler;
protected:
friend class Parser;
friend class InitializationSequence;
friend class ASTReader;
friend class ASTDeclReader;
friend class ASTWriter;
public:
/// Retrieve the keyword associated
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability);
/// The struct behind the CFErrorRef pointer.
RecordDecl *CFError = nullptr;
/// Retrieve the identifier "NSError".
IdentifierInfo *getNSErrorIdent();
/// Retrieve the parser's current scope.
///
/// This routine must only be used when it is certain that semantic analysis
/// and the parser are in precisely the same context, which is not the case
/// when, e.g., we are performing any kind of template instantiation.
/// Therefore, the only safe places to use this scope are in the parser
/// itself and in routines directly invoked from the parser and *never* from
/// template substitution or instantiation.
Scope *getCurScope() const { return CurScope; }
void incrementMSManglingNumber() const {
return CurScope->incrementMSManglingNumber();
}
IdentifierInfo *getSuperIdentifier() const;
IdentifierInfo *getFloat128Identifier() const;
Decl *getObjCDeclContext() const;
DeclContext *getCurLexicalContext() const {
return OriginalLexicalContext ? OriginalLexicalContext : CurContext;
}
const DeclContext *getCurObjCLexicalContext() const {
const DeclContext *DC = getCurLexicalContext();
// A category implicitly has the attribute of the interface.
if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC))
DC = CatD->getClassInterface();
return DC;
}
/// To be used for checking whether the arguments being passed to
/// function exceeds the number of parameters expected for it.
static bool TooManyArguments(size_t NumParams, size_t NumArgs,
bool PartialOverloading = false) {
// We check whether we're just after a comma in code-completion.
if (NumArgs > 0 && PartialOverloading)
return NumArgs + 1 > NumParams; // If so, we view as an extra argument.
return NumArgs > NumParams;
}
// Emitting members of dllexported classes is delayed until the class
// (including field initializers) is fully parsed.
SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses;
SmallVector<CXXMethodDecl*, 4> DelayedDllExportMemberFunctions;
private:
int ParsingClassDepth = 0;
class SavePendingParsedClassStateRAII {
public:
SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); }
~SavePendingParsedClassStateRAII() {
assert(S.DelayedOverridingExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
assert(S.DelayedEquivalentExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
swapSavedState();
}
private:
Sema &S;
decltype(DelayedOverridingExceptionSpecChecks)
SavedOverridingExceptionSpecChecks;
decltype(DelayedEquivalentExceptionSpecChecks)
SavedEquivalentExceptionSpecChecks;
void swapSavedState() {
SavedOverridingExceptionSpecChecks.swap(
S.DelayedOverridingExceptionSpecChecks);
SavedEquivalentExceptionSpecChecks.swap(
S.DelayedEquivalentExceptionSpecChecks);
}
};
/// Helper class that collects misaligned member designations and
/// their location info for delayed diagnostics.
struct MisalignedMember {
Expr *E;
RecordDecl *RD;
ValueDecl *MD;
CharUnits Alignment;
MisalignedMember() : E(), RD(), MD(), Alignment() {}
MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment)
: E(E), RD(RD), MD(MD), Alignment(Alignment) {}
explicit MisalignedMember(Expr *E)
: MisalignedMember(E, nullptr, nullptr, CharUnits()) {}
bool operator==(const MisalignedMember &m) { return this->E == m.E; }
};
/// Small set of gathered accesses to potentially misaligned members
/// due to the packed attribute.
SmallVector<MisalignedMember, 4> MisalignedMembers;
/// Adds an expression to the set of gathered misaligned members.
void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment);
public:
/// Diagnoses the current set of gathered accesses. This typically
/// happens at full expression level. The set is cleared after emitting the
/// diagnostics.
void DiagnoseMisalignedMembers();
/// This function checks if the expression is in the sef of potentially
/// misaligned members and it is converted to some pointer type T with lower
/// or equal alignment requirements. If so it removes it. This is used when
/// we do not want to diagnose such misaligned access (e.g. in conversions to
/// void*).
void DiscardMisalignedMemberAddress(const Type *T, Expr *E);
/// This function calls Action when it determines that E designates a
/// misaligned member due to the packed attribute. This is used to emit
/// local diagnostics like in reference binding.
void RefersToMemberWithReducedAlignment(
Expr *E,
llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)>
Action);
/// Describes the reason a calling convention specification was ignored, used
/// for diagnostics.
enum class CallingConventionIgnoredReason {
ForThisTarget = 0,
VariadicFunction,
ConstructorDestructor,
BuiltinFunction
};
private:
// We store SYCL Kernels here and handle separately -- which is a hack.
// FIXME: It would be best to refactor this.
SmallVector<Decl*, 4> SyclDeviceDecls;
// SYCL integration header instance for current compilation unit this Sema
// is associated with.
std::unique_ptr<SYCLIntegrationHeader> SyclIntHeader;
public:
void addSyclDeviceDecl(Decl *d) { SyclDeviceDecls.push_back(d); }
SmallVectorImpl<Decl *> &syclDeviceDecls() { return SyclDeviceDecls; }
/// Lazily creates and returns SYCL integration header instance.
SYCLIntegrationHeader &getSyclIntegrationHeader() {
if (SyclIntHeader == nullptr)
SyclIntHeader = std::make_unique<SYCLIntegrationHeader>(
getDiagnostics(), getLangOpts().SYCLUnnamedLambda);
return *SyclIntHeader.get();
}
enum SYCLRestrictKind {
KernelGlobalVariable,
KernelRTTI,
KernelNonConstStaticDataVariable,
KernelCallVirtualFunction,
KernelUseExceptions,
KernelCallRecursiveFunction,
KernelCallFunctionPointer,
KernelAllocateStorage,
KernelUseAssembly,
KernelHavePolymorphicClass,
KernelCallDllimportFunction,
KernelCallVariadicFunction
};
DeviceDiagBuilder SYCLDiagIfDeviceCode(SourceLocation Loc, unsigned DiagID);
void ConstructOpenCLKernel(FunctionDecl *KernelCallerFunc, MangleContext &MC);
void MarkDevice(void);
bool CheckSYCLCall(SourceLocation Loc, FunctionDecl *Callee);
};
/// RAII object that enters a new expression evaluation context.
class EnterExpressionEvaluationContext {
Sema &Actions;
bool Entered = true;
public:
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Decl *LambdaContextDecl = nullptr,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other,
bool ShouldEnter = true)
: Actions(Actions), Entered(ShouldEnter) {
if (Entered)
Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl,
ExprContext);
}
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Sema::ReuseLambdaContextDecl_t,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other)
: Actions(Actions) {
Actions.PushExpressionEvaluationContext(
NewContext, Sema::ReuseLambdaContextDecl, ExprContext);
}
enum InitListTag { InitList };
EnterExpressionEvaluationContext(Sema &Actions, InitListTag,
bool ShouldEnter = true)
: Actions(Actions), Entered(false) {
// In C++11 onwards, narrowing checks are performed on the contents of
// braced-init-lists, even when they occur within unevaluated operands.
// Therefore we still need to instantiate constexpr functions used in such
// a context.
if (ShouldEnter && Actions.isUnevaluatedContext() &&
Actions.getLangOpts().CPlusPlus11) {
Actions.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::UnevaluatedList);
Entered = true;
}
}
~EnterExpressionEvaluationContext() {
if (Entered)
Actions.PopExpressionEvaluationContext();
}
};
DeductionFailureInfo
MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK,
sema::TemplateDeductionInfo &Info);
/// Contains a late templated function.
/// Will be parsed at the end of the translation unit, used by Sema & Parser.
struct LateParsedTemplate {
CachedTokens Toks;
/// The template function declaration to be late parsed.
Decl *D;
};
} // end namespace clang
namespace llvm {
// Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its
// SourceLocation.
template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> {
using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc;
using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>;
static FunctionDeclAndLoc getEmptyKey() {
return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()};
}
static FunctionDeclAndLoc getTombstoneKey() {
return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()};
}
static unsigned getHashValue(const FunctionDeclAndLoc &FDL) {
return hash_combine(FDBaseInfo::getHashValue(FDL.FD),
FDL.Loc.getRawEncoding());
}
static bool isEqual(const FunctionDeclAndLoc &LHS,
const FunctionDeclAndLoc &RHS) {
return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc;
}
};
} // namespace llvm
#endif
|
template-for-new-benchmark.c | /**
* template.c: This file is part of the PolyBench/C 3.2 test suite.
*
*
* Contact: Louis-Noel Pouchet <pouchet@cse.ohio-state.edu>
* Web address: http://polybench.sourceforge.net
* License: /LICENSE.OSU.txt
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include "../polybench/polybench.h"
/* Include benchmark-specific header. */
/* Default data type is double, default size is N=1024. */
#include "template-for-new-benchmark.h"
/* Array initialization. */
static
void init_array(int n, DATA_TYPE POLYBENCH_2D(C,N,N,n,n))
{
int i, j;
#pragma omp parallel for
for (i = 0; i < n; i++)
#pragma omp parallel for
for (j = 0; j < n; j++)
C[i][j] = 42;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int n, DATA_TYPE POLYBENCH_2D(C,N,N,n,n))
{
int i, j;
#pragma omp parallel for
for (i = 0; i < n; i++)
#pragma omp parallel for
for (j = 0; j < n; j++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, C[i][j]);
if (i % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_template(int n, DATA_TYPE POLYBENCH_2D(C,N,N,n,n))
{
int i, j;
#pragma omp parallel for
for (i = 0; i < _PB_N; i++)
#pragma omp parallel for
for (j = 0; j < _PB_N; j++)
C[i][j] += 42;
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int n = N;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(C,DATA_TYPE,N,N,n,n);
/* Initialize array(s). */
init_array (n, POLYBENCH_ARRAY(C));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_template (n, POLYBENCH_ARRAY(C));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(n, POLYBENCH_ARRAY(C)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(C);
return 0;
}
|
GB_unop__identity_fp64_uint8.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_fp64_uint8
// op(A') function: GB_unop_tran__identity_fp64_uint8
// C type: double
// A type: uint8_t
// cast: double cij = (double) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
double z = (double) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = (double) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FP64 || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_fp64_uint8
(
double *Cx, // Cx and Ax may be aliased
const uint8_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint8_t aij = Ax [p] ;
double z = (double) aij ;
Cx [p] = z ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_fp64_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Simulation.c | #include "XSbench_header.h"
////////////////////////////////////////////////////////////////////////////////////
// BASELINE FUNCTIONS
////////////////////////////////////////////////////////////////////////////////////
// All "baseline" code is at the top of this file. The baseline code is a simple
// implementation of the algorithm, with only minor CPU optimizations in place.
// Following these functions are a number of optimized variants,
// which each deploy a different combination of optimizations strategies. By
// default, XSBench will only run the baseline implementation. Optimized variants
// must be specifically selected using the "-k <optimized variant ID>" command
// line argument.
////////////////////////////////////////////////////////////////////////////////////
unsigned long long run_event_based_simulation(Inputs in, SimulationData SD, int mype)
{
//#ifdef USE_CALI_REG
//CALI_MARK_FUNCTION_BEGIN;
//#endif
if( mype == 0)
printf("Beginning event based simulation...\n");
////////////////////////////////////////////////////////////////////////////////
// SUMMARY: Simulation Data Structure Manifest for "SD" Object
// Here we list all heap arrays (and lengths) in SD that would need to be
// offloaded manually if using an accelerator with a seperate memory space
////////////////////////////////////////////////////////////////////////////////
// int * num_nucs; // Length = length_num_nucs;
// double * concs; // Length = length_concs
// int * mats; // Length = length_mats
// double * unionized_energy_array; // Length = length_unionized_energy_array
// int * index_grid; // Length = length_index_grid
// NuclideGridPoint * nuclide_grid; // Length = length_nuclide_grid
//
// Note: "unionized_energy_array" and "index_grid" can be of zero length
// depending on lookup method.
//
// Note: "Lengths" are given as the number of objects in the array, not the
// number of bytes.
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
// Begin Actual Simulation Loop
////////////////////////////////////////////////////////////////////////////////
unsigned long long verification = 0;
#ifdef USE_CALI_UNCORE
CALI_MARK_BEGIN("event_simulation");
#endif
#pragma omp parallel
{
#ifdef USE_CALI_REG
CALI_MARK_BEGIN("event_simulation");
#endif
#pragma omp for schedule(dynamic,100) reduction(+:verification)
for( int i = 0; i < in.lookups; i++ )
{
// Set the initial seed value
uint64_t seed = STARTING_SEED;
// Forward seed to lookup index (we need 2 samples per lookup)
seed = fast_forward_LCG(seed, 2*i);
// Randomly pick an energy and material for the particle
double p_energy = LCG_random_double(&seed);
int mat = pick_mat(&seed);
double macro_xs_vector[5] = {0};
// Perform macroscopic Cross Section Lookup
calculate_macro_xs(
p_energy, // Sampled neutron energy (in lethargy)
mat, // Sampled material type index neutron is in
in.n_isotopes, // Total number of isotopes in simulation
in.n_gridpoints, // Number of gridpoints per isotope in simulation
SD.num_nucs, // 1-D array with number of nuclides per material
SD.concs, // Flattened 2-D array with concentration of each nuclide in each material
SD.unionized_energy_array, // 1-D Unionized energy array
SD.index_grid, // Flattened 2-D grid holding indices into nuclide grid for each unionized energy level
SD.nuclide_grid, // Flattened 2-D grid holding energy levels and XS_data for all nuclides in simulation
SD.mats, // Flattened 2-D array with nuclide indices defining composition of each type of material
macro_xs_vector, // 1-D array with result of the macroscopic cross section (5 different reaction channels)
in.grid_type, // Lookup type (nuclide, hash, or unionized)
in.hash_bins, // Number of hash bins used (if using hash lookup type)
SD.max_num_nucs // Maximum number of nuclides present in any material
);
// For verification, and to prevent the compiler from optimizing
// all work out, we interrogate the returned macro_xs_vector array
// to find its maximum value index, then increment the verification
// value by that index. In this implementation, we prevent thread
// contention by using an OMP reduction on the verification value.
// For accelerators, a different approach might be required
// (e.g., atomics, reduction of thread-specific values in large
// array via CUDA thrust, etc).
double max = -1.0;
int max_idx = 0;
for(int j = 0; j < 5; j++ )
{
if( macro_xs_vector[j] > max )
{
max = macro_xs_vector[j];
max_idx = j;
}
}
verification += max_idx+1;
}
#ifdef USE_CALI_REG
CALI_MARK_END("event_simulation");
#endif
}
#ifdef USE_CALI_UNCORE
CALI_MARK_END("event_simulation");
#endif
//#ifdef USE_CALI_REG
//CALI_MARK_FUNCTION_END;
//#endif
return verification;
}
unsigned long long run_history_based_simulation(Inputs in, SimulationData SD, int mype)
{
//#ifdef USE_CALI_REG
//CALI_MARK_FUNCTION_BEGIN;
//#endif
if( mype == 0)
printf("Beginning history based simulation...\n");
////////////////////////////////////////////////////////////////////////////////
// SUMMARY: Simulation Data Structure Manifest for "SD" Object
// Here we list all heap arrays (and lengths) in SD that would need to be
// offloaded manually if using an accelerator with a seperate memory space
////////////////////////////////////////////////////////////////////////////////
// int * num_nucs; // Length = length_num_nucs;
// double * concs; // Length = length_concs
// int * mats; // Length = length_mats
// double * unionized_energy_array; // Length = length_unionized_energy_array
// int * index_grid; // Length = length_index_grid
// NuclideGridPoint * nuclide_grid; // Length = length_nuclide_grid
//
// Note: "unionized_energy_array" and "index_grid" can be of zero length
// depending on lookup method.
//
// Note: "Lengths" are given as the number of objects in the array, not the
// number of bytes.
////////////////////////////////////////////////////////////////////////////////
unsigned long long verification = 0;
// Begin outer lookup loop over particles. This loop is independent.
#ifdef USE_CALI_UNCORE
CALI_MARK_BEGIN("history_simulation");
#endif
#pragma omp parallel
{
#ifdef USE_CALI_REG
CALI_MARK_BEGIN("history_simulation");
#endif
#pragma omp for schedule(dynamic, 100) reduction(+:verification)
for( int p = 0; p < in.particles; p++ )
{
// Set the initial seed value
uint64_t seed = STARTING_SEED;
// Forward seed to lookup index (we need 2 samples per lookup, and
// we may fast forward up to 5 times after each lookup)
seed = fast_forward_LCG(seed, p*in.lookups*2*5);
// Randomly pick an energy and material for the particle
double p_energy = LCG_random_double(&seed);
int mat = pick_mat(&seed);
// Inner XS Lookup Loop
// This loop is dependent!
// i.e., Next iteration uses data computed in previous iter.
for( int i = 0; i < in.lookups; i++ )
{
double macro_xs_vector[5] = {0};
// Perform macroscopic Cross Section Lookup
calculate_macro_xs(
p_energy, // Sampled neutron energy (in lethargy)
mat, // Sampled material type neutron is in
in.n_isotopes, // Total number of isotopes in simulation
in.n_gridpoints, // Number of gridpoints per isotope in simulation
SD.num_nucs, // 1-D array with number of nuclides per material
SD.concs, // Flattened 2-D array with concentration of each nuclide in each material
SD.unionized_energy_array, // 1-D Unionized energy array
SD.index_grid, // Flattened 2-D grid holding indices into nuclide grid for each unionized energy level
SD.nuclide_grid, // Flattened 2-D grid holding energy levels and XS_data for all nuclides in simulation
SD.mats, // Flattened 2-D array with nuclide indices for each type of material
macro_xs_vector, // 1-D array with result of the macroscopic cross section (5 different reaction channels)
in.grid_type, // Lookup type (nuclide, hash, or unionized)
in.hash_bins, // Number of hash bins used (if using hash lookups)
SD.max_num_nucs // Maximum number of nuclides present in any material
);
// For verification, and to prevent the compiler from optimizing
// all work out, we interrogate the returned macro_xs_vector array
// to find its maximum value index, then increment the verification
// value by that index. In this implementation, we prevent thread
// contention by using an OMP reduction on it. For other accelerators,
// a different approach might be required (e.g., atomics, reduction
// of thread-specific values in large array via CUDA thrust, etc)
double max = -1.0;
int max_idx = 0;
for(int j = 0; j < 5; j++ )
{
if( macro_xs_vector[j] > max )
{
max = macro_xs_vector[j];
max_idx = j;
}
}
verification += max_idx+1;
// Randomly pick next energy and material for the particle
// Also incorporates results from macro_xs lookup to
// enforce loop dependency.
// In a real MC app, this dependency is expressed in terms
// of branching physics sampling, whereas here we are just
// artificially enforcing this dependence based on fast
// forwarding the LCG state
uint64_t n_forward = 0;
for( int j = 0; j < 5; j++ )
if( macro_xs_vector[j] > 1.0 )
n_forward++;
if( n_forward > 0 )
seed = fast_forward_LCG(seed, n_forward);
p_energy = LCG_random_double(&seed);
mat = pick_mat(&seed);
} //inner loop
} //outer loop
#ifdef USE_CALI_REG
CALI_MARK_END("history_simulation");
#endif
} // omp parallel
#ifdef USE_CALI_UNCORE
CALI_MARK_END("history_simulation");
#endif
//#ifdef USE_CALI_REG
//CALI_MARK_FUNCTION_END;
//#endif
return verification;
}
// Calculates the microscopic cross section for a given nuclide & energy
inline void calculate_micro_xs( double p_energy, int nuc, long n_isotopes,
long n_gridpoints,
double * restrict egrid, int * restrict index_data,
NuclideGridPoint * restrict nuclide_grids,
long idx, double * restrict xs_vector, int grid_type, int hash_bins ){
// Variables
double f;
NuclideGridPoint * low, * high;
// If using only the nuclide grid, we must perform a binary search
// to find the energy location in this particular nuclide's grid.
if( grid_type == NUCLIDE )
{
// Perform binary search on the Nuclide Grid to find the index
idx = grid_search_nuclide( n_gridpoints, p_energy, &nuclide_grids[nuc*n_gridpoints], 0, n_gridpoints-1);
// pull ptr from nuclide grid and check to ensure that
// we're not reading off the end of the nuclide's grid
if( idx == n_gridpoints - 1 )
low = &nuclide_grids[nuc*n_gridpoints + idx - 1];
else
low = &nuclide_grids[nuc*n_gridpoints + idx];
}
else if( grid_type == UNIONIZED) // Unionized Energy Grid - we already know the index, no binary search needed.
{
// pull ptr from energy grid and check to ensure that
// we're not reading off the end of the nuclide's grid
if( index_data[idx * n_isotopes + nuc] == n_gridpoints - 1 )
low = &nuclide_grids[nuc*n_gridpoints + index_data[idx * n_isotopes + nuc] - 1];
else
low = &nuclide_grids[nuc*n_gridpoints + index_data[idx * n_isotopes + nuc]];
}
else // Hash grid
{
// load lower bounding index
int u_low = index_data[idx * n_isotopes + nuc];
// Determine higher bounding index
int u_high;
if( idx == hash_bins - 1 )
u_high = n_gridpoints - 1;
else
u_high = index_data[(idx+1)*n_isotopes + nuc] + 1;
// Check edge cases to make sure energy is actually between these
// Then, if things look good, search for gridpoint in the nuclide grid
// within the lower and higher limits we've calculated.
double e_low = nuclide_grids[nuc*n_gridpoints + u_low].energy;
double e_high = nuclide_grids[nuc*n_gridpoints + u_high].energy;
int lower;
if( p_energy <= e_low )
lower = 0;
else if( p_energy >= e_high )
lower = n_gridpoints - 1;
else
lower = grid_search_nuclide( n_gridpoints, p_energy, &nuclide_grids[nuc*n_gridpoints], u_low, u_high);
if( lower == n_gridpoints - 1 )
low = &nuclide_grids[nuc*n_gridpoints + lower - 1];
else
low = &nuclide_grids[nuc*n_gridpoints + lower];
}
high = low + 1;
// calculate the re-useable interpolation factor
f = (high->energy - p_energy) / (high->energy - low->energy);
// Total XS
xs_vector[0] = high->total_xs - f * (high->total_xs - low->total_xs);
// Elastic XS
xs_vector[1] = high->elastic_xs - f * (high->elastic_xs - low->elastic_xs);
// Absorbtion XS
xs_vector[2] = high->absorbtion_xs - f * (high->absorbtion_xs - low->absorbtion_xs);
// Fission XS
xs_vector[3] = high->fission_xs - f * (high->fission_xs - low->fission_xs);
// Nu Fission XS
xs_vector[4] = high->nu_fission_xs - f * (high->nu_fission_xs - low->nu_fission_xs);
}
// Calculates macroscopic cross section based on a given material & energy
inline void calculate_macro_xs( double p_energy, int mat, long n_isotopes,
long n_gridpoints, int * restrict num_nucs,
double * restrict concs,
double * restrict egrid, int * restrict index_data,
NuclideGridPoint * restrict nuclide_grids,
int * restrict mats,
double * restrict macro_xs_vector, int grid_type, int hash_bins, int max_num_nucs ){
//#ifdef USE_CALI_REG
//CALI_MARK_FUNCTION_BEGIN;
//#endif
int p_nuc; // the nuclide we are looking up
long idx = -1;
double conc; // the concentration of the nuclide in the material
// cleans out macro_xs_vector
for( int k = 0; k < 5; k++ )
macro_xs_vector[k] = 0;
// If we are using the unionized energy grid (UEG), we only
// need to perform 1 binary search per macroscopic lookup.
// If we are using the nuclide grid search, it will have to be
// done inside of the "calculate_micro_xs" function for each different
// nuclide in the material.
if( grid_type == UNIONIZED )
{
idx = grid_search( n_isotopes * n_gridpoints, p_energy, egrid);
}
else if( grid_type == HASH )
{
double du = 1.0 / hash_bins;
idx = p_energy / du;
}
// Once we find the pointer array on the UEG, we can pull the data
// from the respective nuclide grids, as well as the nuclide
// concentration data for the material
// Each nuclide from the material needs to have its micro-XS array
// looked up & interpolatied (via calculate_micro_xs). Then, the
// micro XS is multiplied by the concentration of that nuclide
// in the material, and added to the total macro XS array.
// (Independent -- though if parallelizing, must use atomic operations
// or otherwise control access to the xs_vector and macro_xs_vector to
// avoid simulataneous writing to the same data structure)
for( int j = 0; j < num_nucs[mat]; j++ )
{
double xs_vector[5];
p_nuc = mats[mat*max_num_nucs + j];
conc = concs[mat*max_num_nucs + j];
calculate_micro_xs( p_energy, p_nuc, n_isotopes,
n_gridpoints, egrid, index_data,
nuclide_grids, idx, xs_vector, grid_type, hash_bins );
for( int k = 0; k < 5; k++ )
macro_xs_vector[k] += xs_vector[k] * conc;
}
//#ifdef USE_CALI_REG
//CALI_MARK_FUNCTION_END;
//#endif
}
// binary search for energy on unionized energy grid
// returns lower index
long grid_search( long n, double quarry, double * restrict A)
{
long lowerLimit = 0;
long upperLimit = n-1;
long examinationPoint;
long length = upperLimit - lowerLimit;
while( length > 1 )
{
examinationPoint = lowerLimit + ( length / 2 );
if( A[examinationPoint] > quarry )
upperLimit = examinationPoint;
else
lowerLimit = examinationPoint;
length = upperLimit - lowerLimit;
}
return lowerLimit;
}
// binary search for energy on nuclide energy grid
long grid_search_nuclide( long n, double quarry, NuclideGridPoint * A, long low, long high)
{
long lowerLimit = low;
long upperLimit = high;
long examinationPoint;
long length = upperLimit - lowerLimit;
while( length > 1 )
{
examinationPoint = lowerLimit + ( length / 2 );
if( A[examinationPoint].energy > quarry )
upperLimit = examinationPoint;
else
lowerLimit = examinationPoint;
length = upperLimit - lowerLimit;
}
return lowerLimit;
}
// picks a material based on a probabilistic distribution
int pick_mat( uint64_t * seed )
{
// I have a nice spreadsheet supporting these numbers. They are
// the fractions (by volume) of material in the core. Not a
// *perfect* approximation of where XS lookups are going to occur,
// but this will do a good job of biasing the system nonetheless.
double dist[12];
dist[0] = 0.140; // fuel
dist[1] = 0.052; // cladding
dist[2] = 0.275; // cold, borated water
dist[3] = 0.134; // hot, borated water
dist[4] = 0.154; // RPV
dist[5] = 0.064; // Lower, radial reflector
dist[6] = 0.066; // Upper reflector / top plate
dist[7] = 0.055; // bottom plate
dist[8] = 0.008; // bottom nozzle
dist[9] = 0.015; // top nozzle
dist[10] = 0.025; // top of fuel assemblies
dist[11] = 0.013; // bottom of fuel assemblies
double roll = LCG_random_double(seed);
// makes a pick based on the distro
for( int i = 0; i < 12; i++ )
{
double running = 0;
for( int j = i; j > 0; j-- )
running += dist[j];
if( roll < running )
return i;
}
return 0;
}
double LCG_random_double(uint64_t * seed)
{
// LCG parameters
const uint64_t m = 9223372036854775808ULL; // 2^63
const uint64_t a = 2806196910506780709ULL;
const uint64_t c = 1ULL;
*seed = (a * (*seed) + c) % m;
return (double) (*seed) / (double) m;
}
uint64_t fast_forward_LCG(uint64_t seed, uint64_t n)
{
// LCG parameters
const uint64_t m = 9223372036854775808ULL; // 2^63
uint64_t a = 2806196910506780709ULL;
uint64_t c = 1ULL;
n = n % m;
uint64_t a_new = 1;
uint64_t c_new = 0;
while(n > 0)
{
if(n & 1)
{
a_new *= a;
c_new = c_new * a + c;
}
c *= (a + 1);
a *= a;
n >>= 1;
}
return (a_new * seed + c_new) % m;
}
////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////
// OPTIMIZED VARIANT FUNCTIONS
////////////////////////////////////////////////////////////////////////////////////
// This section contains a number of optimized variants of some of the above
// functions, which each deploy a different combination of optimizations strategies.
// By default, XSBench will not run any of these variants. They
// must be specifically selected using the "-k <optimized variant ID>" command
// line argument.
//
// As fast parallel sorting will be required for these optimizations, we will
// first define a set of key-value parallel quicksort routines.
////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////
// Parallel Quicksort Key-Value Sorting Algorithms
////////////////////////////////////////////////////////////////////////////////////
//
// These algorithms are based on the parallel quicksort implementation by
// Eduard Lopez published at https://github.com/eduardlopez/quicksort-parallel
//
// Eduard's original version was for an integer type quicksort, but I have modified
// it to form two different versions that can sort key-value pairs together without
// having to bundle them into a separate object. Additionally, I have modified the
// optimal chunk sizes and restricted the number of threads for the array sizing
// that XSBench will be using by default.
//
// Eduard's original implementation carries the following license, which applies to
// the following functions only:
//
// void quickSort_parallel_internal_i_d(int* key,double * value, int left, int right, int cutoff)
// void quickSort_parallel_i_d(int* key,double * value, int lenArray, int numThreads)
// void quickSort_parallel_internal_d_i(double* key,int * value, int left, int right, int cutoff)
// void quickSort_parallel_d_i(double* key,int * value, int lenArray, int numThreads)
//
// The MIT License (MIT)
//
// Copyright (c) 2016 Eduard López
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
//
////////////////////////////////////////////////////////////////////////////////////
void quickSort_parallel_internal_i_d(int* key,double * value, int left, int right, int cutoff)
{
int i = left, j = right;
int tmp;
int pivot = key[(left + right) / 2];
{
while (i <= j) {
while (key[i] < pivot)
i++;
while (key[j] > pivot)
j--;
if (i <= j) {
tmp = key[i];
key[i] = key[j];
key[j] = tmp;
double tmp_v = value[i];
value[i] = value[j];
value[j] = tmp_v;
i++;
j--;
}
}
}
if ( ((right-left)<cutoff) ){
if (left < j){ quickSort_parallel_internal_i_d(key, value, left, j, cutoff); }
if (i < right){ quickSort_parallel_internal_i_d(key, value, i, right, cutoff); }
}else{
#pragma omp task
{ quickSort_parallel_internal_i_d(key, value, left, j, cutoff); }
#pragma omp task
{ quickSort_parallel_internal_i_d(key, value, i, right, cutoff); }
}
}
void quickSort_parallel_i_d(int* key,double * value, int lenArray, int numThreads){
// Set minumum problem size to still spawn threads for
int cutoff = 10000;
// For this problem size, more than 16 threads on CPU is not helpful
if( numThreads > 16 )
numThreads = 16;
#pragma omp parallel num_threads(numThreads)
{
#pragma omp single nowait
{
quickSort_parallel_internal_i_d(key,value, 0, lenArray-1, cutoff);
}
}
}
void quickSort_parallel_internal_d_i(double* key,int * value, int left, int right, int cutoff)
{
int i = left, j = right;
double tmp;
double pivot = key[(left + right) / 2];
{
while (i <= j) {
while (key[i] < pivot)
i++;
while (key[j] > pivot)
j--;
if (i <= j) {
tmp = key[i];
key[i] = key[j];
key[j] = tmp;
int tmp_v = value[i];
value[i] = value[j];
value[j] = tmp_v;
i++;
j--;
}
}
}
if ( ((right-left)<cutoff) ){
if (left < j){ quickSort_parallel_internal_d_i(key, value, left, j, cutoff); }
if (i < right){ quickSort_parallel_internal_d_i(key, value, i, right, cutoff); }
}else{
#pragma omp task
{ quickSort_parallel_internal_d_i(key, value, left, j, cutoff); }
#pragma omp task
{ quickSort_parallel_internal_d_i(key, value, i, right, cutoff); }
}
}
void quickSort_parallel_d_i(double* key,int * value, int lenArray, int numThreads){
// Set minumum problem size to still spawn threads for
int cutoff = 10000;
// For this problem size, more than 16 threads on CPU is not helpful
if( numThreads > 16 )
numThreads = 16;
#pragma omp parallel num_threads(numThreads)
{
#pragma omp single nowait
{
quickSort_parallel_internal_d_i(key,value, 0, lenArray-1, cutoff);
}
}
}
////////////////////////////////////////////////////////////////////////////////////
// Optimization 1 -- Event-based Sample/XS Lookup kernel splitting + Sorting
// lookups by material and energy
////////////////////////////////////////////////////////////////////////////////////
// This kernel separates out the sampling and lookup regions of the event-based
// model, and then sorts the lookups by material type and energy. The goal of this
// optimization is to allow for greatly improved cache locality, and XS indices
// loaded from memory may be re-used for multiple lookups.
//
// As efficienct sorting is key for performance, we also must implement an
// efficient key-value parallel sorting algorithm. We also experimented with using
// the C++ version of thrust for these purposes, but found that our own implemtation
// was slightly faster than the thrust library version, so for speed and
// simplicity we will do not add the thrust dependency.
////////////////////////////////////////////////////////////////////////////////////
unsigned long long run_event_based_simulation_optimization_1(Inputs in, SimulationData SD, int mype)
{
#ifdef USE_CALI_REG
CALI_MARK_FUNCTION_BEGIN;
#endif
char * optimization_name = "Optimization 1 - Kernel splitting + full material & energy sort";
if( mype == 0) printf("Simulation Kernel:\"%s\"\n", optimization_name);
////////////////////////////////////////////////////////////////////////////////
// Allocate Additional Data Structures Needed by Optimized Kernel
////////////////////////////////////////////////////////////////////////////////
if( mype == 0) printf("Allocating additional data required by optimized kernel...\n");
size_t sz;
size_t total_sz = 0;
double start, stop;
sz = in.lookups * sizeof(double);
SD.p_energy_samples = (double *) malloc(sz);
total_sz += sz;
SD.length_p_energy_samples = in.lookups;
sz = in.lookups * sizeof(int);
SD.mat_samples = (int *) malloc(sz);
total_sz += sz;
SD.length_mat_samples = in.lookups;
if( mype == 0) printf("Allocated an additional %.0lf MB of data on GPU.\n", total_sz/1024.0/1024.0);
////////////////////////////////////////////////////////////////////////////////
// Begin Actual Simulation
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
// Sample Materials and Energies
////////////////////////////////////////////////////////////////////////////////
#pragma omp parallel for schedule(dynamic, 100)
for( int i = 0; i < in.lookups; i++ )
{
// Set the initial seed value
uint64_t seed = STARTING_SEED;
// Forward seed to lookup index (we need 2 samples per lookup)
seed = fast_forward_LCG(seed, 2*i);
// Randomly pick an energy and material for the particle
double p_energy = LCG_random_double(&seed);
int mat = pick_mat(&seed);
SD.p_energy_samples[i] = p_energy;
SD.mat_samples[i] = mat;
}
if(mype == 0) printf("finished sampling...\n");
////////////////////////////////////////////////////////////////////////////////
// Sort by Material
////////////////////////////////////////////////////////////////////////////////
start = get_time();
quickSort_parallel_i_d(SD.mat_samples, SD.p_energy_samples, in.lookups, in.nthreads);
stop = get_time();
if(mype == 0) printf("Material sort took %.3lf seconds\n", stop-start);
////////////////////////////////////////////////////////////////////////////////
// Sort by Energy
////////////////////////////////////////////////////////////////////////////////
start = get_time();
// Count up number of each type of sample.
int num_samples_per_mat[12] = {0};
for( int l = 0; l < in.lookups; l++ )
num_samples_per_mat[ SD.mat_samples[l] ]++;
// Determine offsets
int offsets[12] = {0};
for( int m = 1; m < 12; m++ )
offsets[m] = offsets[m-1] + num_samples_per_mat[m-1];
stop = get_time();
if(mype == 0) printf("Counting samples and offsets took %.3lf seconds\n", stop-start);
start = stop;
// Sort each material type by energy level
int offset = 0;
for( int m = 0; m < 12; m++ )
quickSort_parallel_d_i(SD.p_energy_samples + offsets[m],SD.mat_samples + offsets[m], num_samples_per_mat[m], in.nthreads);
stop = get_time();
if(mype == 0) printf("Energy Sorts took %.3lf seconds\n", stop-start);
////////////////////////////////////////////////////////////////////////////////
// Perform lookups for each material separately
////////////////////////////////////////////////////////////////////////////////
start = get_time();
unsigned long long verification = 0;
// Individual Materials
offset = 0;
for( int m = 0; m < 12; m++ )
{
#ifdef USE_CALI_UNCORE
CALI_MARK_BEGIN("event_simulation_1");
#endif
#pragma omp parallel
{
#ifdef USE_CALI_REG
CALI_MARK_BEGIN("event_simulation_1");
#endif
#pragma omp for schedule(dynamic,100) reduction(+:verification)
for( int i = offset; i < offset + num_samples_per_mat[m]; i++)
{
// load pre-sampled energy and material for the particle
double p_energy = SD.p_energy_samples[i];
int mat = SD.mat_samples[i];
double macro_xs_vector[5] = {0};
// Perform macroscopic Cross Section Lookup
calculate_macro_xs(
p_energy, // Sampled neutron energy (in lethargy)
mat, // Sampled material type index neutron is in
in.n_isotopes, // Total number of isotopes in simulation
in.n_gridpoints, // Number of gridpoints per isotope in simulation
SD.num_nucs, // 1-D array with number of nuclides per material
SD.concs, // Flattened 2-D array with concentration of each nuclide in each material
SD.unionized_energy_array, // 1-D Unionized energy array
SD.index_grid, // Flattened 2-D grid holding indices into nuclide grid for each unionized energy level
SD.nuclide_grid, // Flattened 2-D grid holding energy levels and XS_data for all nuclides in simulation
SD.mats, // Flattened 2-D array with nuclide indices defining composition of each type of material
macro_xs_vector, // 1-D array with result of the macroscopic cross section (5 different reaction channels)
in.grid_type, // Lookup type (nuclide, hash, or unionized)
in.hash_bins, // Number of hash bins used (if using hash lookup type)
SD.max_num_nucs // Maximum number of nuclides present in any material
);
// For verification, and to prevent the compiler from optimizing
// all work out, we interrogate the returned macro_xs_vector array
// to find its maximum value index, then increment the verification
// value by that index. In this implementation, we prevent thread
// contention by using an OMP reduction on the verification value.
// For accelerators, a different approach might be required
// (e.g., atomics, reduction of thread-specific values in large
// array via CUDA thrust, etc).
double max = -1.0;
int max_idx = 0;
for(int j = 0; j < 5; j++ )
{
if( macro_xs_vector[j] > max )
{
max = macro_xs_vector[j];
max_idx = j;
}
}
verification += max_idx+1;
}
#ifdef USE_CALI_REG
CALI_MARK_END("event_simulation_1");
#endif
} // OMP parallel
#ifdef USE_CALI_UNCORE
CALI_MARK_END("event_simulation_1");
#endif
offset += num_samples_per_mat[m];
}
stop = get_time();
if(mype == 0) printf("XS Lookups took %.3lf seconds\n", stop-start);
#ifdef USE_CALI_REG
CALI_MARK_FUNCTION_END;
#endif
return verification;
}
unsigned long long run_event_based_simulation_optimization_2(Inputs in, SimulationData SD, int mype)
{
//#ifdef USE_CALI_REG
//CALI_MARK_FUNCTION_BEGIN;
//#endif
if( mype == 0)
printf("Beginning event based simulation...\n");
////////////////////////////////////////////////////////////////////////////////
// SUMMARY: Simulation Data Structure Manifest for "SD" Object
// Here we list all heap arrays (and lengths) in SD that would need to be
// offloaded manually if using an accelerator with a seperate memory space
////////////////////////////////////////////////////////////////////////////////
// int * num_nucs; // Length = length_num_nucs;
// double * concs; // Length = length_concs
// int * mats; // Length = length_mats
// double * unionized_energy_array; // Length = length_unionized_energy_array
// int * index_grid; // Length = length_index_grid
// NuclideGridPoint * nuclide_grid; // Length = length_nuclide_grid
//
// Note: "unionized_energy_array" and "index_grid" can be of zero length
// depending on lookup method.
//
// Note: "Lengths" are given as the number of objects in the array, not the
// number of bytes.
////////////////////////////////////////////////////////////////////////////////
char * optimization_name = "Optimization 2 - Just energy sort";
if( mype == 0) printf("Simulation Kernel:\"%s\"\n", optimization_name);
////////////////////////////////////////////////////////////////////////////////
// Allocate Additional Data Structures Needed by Optimized Kernel
////////////////////////////////////////////////////////////////////////////////
if( mype == 0) printf("Allocating additional data required by optimized kernel...\n");
size_t sz;
size_t total_sz = 0;
double start, stop;
sz = in.lookups * sizeof(double);
SD.p_energy_samples = (double *) malloc(sz);
total_sz += sz;
SD.length_p_energy_samples = in.lookups;
sz = in.lookups * sizeof(int);
SD.mat_samples = (int *) malloc(sz);
total_sz += sz;
SD.length_mat_samples = in.lookups;
if( mype == 0) printf("Allocated an additional %.0lf MB of data.\n", total_sz/1024.0/1024.0);
////////////////////////////////////////////////////////////////////////////////
// Begin Actual Simulation
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
// Sample Materials and Energies
////////////////////////////////////////////////////////////////////////////////
#pragma omp parallel for schedule(dynamic, 100)
for( int i = 0; i < in.lookups; i++ )
{
// Set the initial seed value
uint64_t seed = STARTING_SEED;
// Forward seed to lookup index (we need 2 samples per lookup)
seed = fast_forward_LCG(seed, 2*i);
// Randomly pick an energy and material for the particle
double p_energy = LCG_random_double(&seed);
int mat = pick_mat(&seed);
SD.p_energy_samples[i] = p_energy;
SD.mat_samples[i] = mat;
}
if(mype == 0) printf("finished sampling...\n");
////////////////////////////////////////////////////////////////////////////////
// Sort by Material
////////////////////////////////////////////////////////////////////////////////
start = omp_get_wtime();
quickSort_parallel_d_i(SD.p_energy_samples, SD.mat_samples, in.lookups, in.nthreads);
stop = omp_get_wtime();
if(mype == 0) printf("Energy sort took %.3lf seconds\n", stop-start);
////////////////////////////////////////////////////////////////////////////////
// Begin Actual Simulation Loop
////////////////////////////////////////////////////////////////////////////////
unsigned long long verification = 0;
#ifdef USE_CALI_UNCORE
CALI_MARK_BEGIN("event_simulation");
#endif
#pragma omp parallel
{
#ifdef USE_CALI_REG
CALI_MARK_BEGIN("event_simulation");
#endif
#pragma omp for schedule(dynamic,100) reduction(+:verification)
for( int i = 0; i < in.lookups; i++ )
{
// Randomly pick an energy and material for the particle
double p_energy = SD.p_energy_samples[i];
int mat = SD.mat_samples[i] ;
double macro_xs_vector[5] = {0};
// Perform macroscopic Cross Section Lookup
calculate_macro_xs(
p_energy, // Sampled neutron energy (in lethargy)
mat, // Sampled material type index neutron is in
in.n_isotopes, // Total number of isotopes in simulation
in.n_gridpoints, // Number of gridpoints per isotope in simulation
SD.num_nucs, // 1-D array with number of nuclides per material
SD.concs, // Flattened 2-D array with concentration of each nuclide in each material
SD.unionized_energy_array, // 1-D Unionized energy array
SD.index_grid, // Flattened 2-D grid holding indices into nuclide grid for each unionized energy level
SD.nuclide_grid, // Flattened 2-D grid holding energy levels and XS_data for all nuclides in simulation
SD.mats, // Flattened 2-D array with nuclide indices defining composition of each type of material
macro_xs_vector, // 1-D array with result of the macroscopic cross section (5 different reaction channels)
in.grid_type, // Lookup type (nuclide, hash, or unionized)
in.hash_bins, // Number of hash bins used (if using hash lookup type)
SD.max_num_nucs // Maximum number of nuclides present in any material
);
// For verification, and to prevent the compiler from optimizing
// all work out, we interrogate the returned macro_xs_vector array
// to find its maximum value index, then increment the verification
// value by that index. In this implementation, we prevent thread
// contention by using an OMP reduction on the verification value.
// For accelerators, a different approach might be required
// (e.g., atomics, reduction of thread-specific values in large
// array via CUDA thrust, etc).
double max = -1.0;
int max_idx = 0;
for(int j = 0; j < 5; j++ )
{
if( macro_xs_vector[j] > max )
{
max = macro_xs_vector[j];
max_idx = j;
}
}
verification += max_idx+1;
}
#ifdef USE_CALI_REG
CALI_MARK_END("event_simulation");
#endif
}
#ifdef USE_CALI_UNCORE
CALI_MARK_BEGIN("event_simulation");
#endif
//#ifdef USE_CALI_REG
//CALI_MARK_FUNCTION_END;
//#endif
return verification;
}
|
oskar_cross_correlate_point_omp.c | /*
* Copyright (c) 2013-2015, The University of Oxford
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the University of Oxford nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <math.h>
#include "correlate/private_correlate_functions_inline.h"
#include "correlate/oskar_cross_correlate_point_omp.h"
#include "math/oskar_add_inline.h"
#ifdef __cplusplus
extern "C" {
#endif
/* Single precision. */
void oskar_cross_correlate_point_omp_f(int num_sources, int num_stations,
const float4c* jones, const float* source_I, const float* source_Q,
const float* source_U, const float* source_V, const float* source_l,
const float* source_m, const float* source_n, const float* station_u,
const float* station_v, const float* station_w, float uv_min_lambda,
float uv_max_lambda, float inv_wavelength, float frac_bandwidth,
float4c* vis)
{
int SQ;
/* Loop over stations. */
#pragma omp parallel for private(SQ) schedule(dynamic, 1)
for (SQ = 0; SQ < num_stations; ++SQ)
{
int SP, i;
const float4c *station_p, *station_q;
/* Pointer to source vector for station q. */
station_q = &jones[SQ * num_sources];
/* Loop over baselines for this station. */
for (SP = SQ + 1; SP < num_stations; ++SP)
{
float uv_len, uu, vv, ww, uu2, vv2, uuvv;
float4c sum, guard;
oskar_clear_complex_matrix_f(&sum);
oskar_clear_complex_matrix_f(&guard);
/* Pointer to source vector for station p. */
station_p = &jones[SP * num_sources];
/* Get common baseline values. */
oskar_evaluate_baseline_terms_inline_f(station_u[SP],
station_u[SQ], station_v[SP], station_v[SQ],
station_w[SP], station_w[SQ], inv_wavelength,
frac_bandwidth, &uv_len, &uu, &vv, &ww, &uu2, &vv2, &uuvv);
/* Apply the baseline length filter. */
if (uv_len < uv_min_lambda || uv_len > uv_max_lambda)
continue;
/* Loop over sources. */
for (i = 0; i < num_sources; ++i)
{
float l, m, n, rb;
/* Get source direction cosines. */
l = source_l[i];
m = source_m[i];
n = source_n[i];
/* Compute bandwidth-smearing term. */
rb = oskar_sinc_f(uu * l + vv * m + ww * (n - 1.0f));
/* Accumulate baseline visibility response for source. */
oskar_accumulate_baseline_visibility_for_source_inline_f(&sum,
i, source_I, source_Q, source_U, source_V,
station_p, station_q, rb, &guard);
}
/* Add result to the baseline visibility. */
i = oskar_evaluate_baseline_index_inline(num_stations, SP, SQ);
oskar_add_complex_matrix_in_place_f(&vis[i], &sum);
}
}
}
/* Double precision. */
void oskar_cross_correlate_point_omp_d(int num_sources, int num_stations,
const double4c* jones, const double* source_I, const double* source_Q,
const double* source_U, const double* source_V, const double* source_l,
const double* source_m, const double* source_n, const double* station_u,
const double* station_v, const double* station_w, double uv_min_lambda,
double uv_max_lambda, double inv_wavelength, double frac_bandwidth,
double4c* vis)
{
int SQ;
/* Loop over stations. */
#pragma omp parallel for private(SQ) schedule(dynamic, 1)
for (SQ = 0; SQ < num_stations; ++SQ)
{
int SP, i;
const double4c *station_p, *station_q;
/* Pointer to source vector for station q. */
station_q = &jones[SQ * num_sources];
/* Loop over baselines for this station. */
for (SP = SQ + 1; SP < num_stations; ++SP)
{
double uv_len, uu, vv, ww, uu2, vv2, uuvv;
double4c sum;
oskar_clear_complex_matrix_d(&sum);
/* Pointer to source vector for station p. */
station_p = &jones[SP * num_sources];
/* Get common baseline values. */
oskar_evaluate_baseline_terms_inline_d(station_u[SP],
station_u[SQ], station_v[SP], station_v[SQ],
station_w[SP], station_w[SQ], inv_wavelength,
frac_bandwidth, &uv_len, &uu, &vv, &ww, &uu2, &vv2, &uuvv);
/* Apply the baseline length filter. */
if (uv_len < uv_min_lambda || uv_len > uv_max_lambda)
continue;
/* Loop over sources. */
for (i = 0; i < num_sources; ++i)
{
double l, m, n, rb;
/* Get source direction cosines. */
l = source_l[i];
m = source_m[i];
n = source_n[i];
/* Compute bandwidth-smearing term. */
rb = oskar_sinc_d(uu * l + vv * m + ww * (n - 1.0));
/* Accumulate baseline visibility response for source. */
oskar_accumulate_baseline_visibility_for_source_inline_d(&sum,
i, source_I, source_Q, source_U, source_V,
station_p, station_q, rb);
}
/* Add result to the baseline visibility. */
i = oskar_evaluate_baseline_index_inline(num_stations, SP, SQ);
oskar_add_complex_matrix_in_place_d(&vis[i], &sum);
}
}
}
#ifdef __cplusplus
}
#endif
|
Compute.h | #ifndef COMPUTE_H_INCLUDED
#define COMPUTE_H_INCLUDED
#include <stdio.h>
#include <stdlib.h>
#include <SDL2/SDL.h>
#include <math.h>
#include <omp.h>
#include <time.h>
#include <immintrin.h>
#include "Grad.h"
__float128 dx[10000],dy[10000],x0,y01;
float Ax,Ay,Bx,By,Cx,Cy;
float A,B,C,Ai,Bi,Ci;
float dxl[10000],dyl[10000];
int apnum;
char setpoint=1;
inline static void Screenshot(__float128 m,__float128 ph,__float128 pv,int iter, int res,__float128 mcx,__float128 mcy)
{
char file[30];
int height=HEIGHT*res, width=WIDTH*res;
unsigned char *pixels = malloc(height*4*width),tcb,tcg,tcr;
__float128 prex=(width*(-0.5)+1.0*m*ph*res),prey=(height*(-0.5)-1.0*m*pv*res);
__m256 zx,zy,cx,cy,x,y,four,mask,sum;
__m256 xy,tx,tx1;
__m256 k,iterace,one;
int off,i,j,off1,l,off2,off3;
iterace=_mm256_set1_ps(iter);
one=_mm256_set1_ps(1.0);
four= _mm256_set1_ps(4.0);
__float128 invert=1.0/(360*m*res);
#pragma omp parallel for simd collapse (2) schedule(dynamic,100) shared(pixels,dx,dy,x0,y01,Ax,Ay,Bx,By,Cx,Cy) private(off,i,j,k,zx,zy,x,y,cy,cx,sum,mask,xy,tx,tx1,tcb,tcg,tcr)
for(i=4; i<height-4; i+=4)
{
for(j=4; j<width-4; j+=4)
{
off=4*(width*i+j);
off1=4*(width*(i+1)+j);
off2=4*(width*(i+2)+j);
off3=4*(width*(i+3)+j);
x=cx=_mm256_setr_ps(((j+prex)*invert-x0),((j+prex+1)*invert-x0),((j+prex+2)*invert-x0),((j+prex+3)*invert-x0),((j+prex)*invert-x0),((j+prex+1)*invert-x0),((j+prex+2)*invert-x0),((j+prex+3)*invert-x0));
y=cy=_mm256_setr_ps(((i+prey)*invert-y01),((i+prey+1)*invert-y01),((i+prey)*invert-y01),((i+prey+1)*invert-y01),((i+prey+2)*invert-y01),((i+prey+3)*invert-y01),((i+prey+2)*invert-y01),((i+prey+3)*invert-y01));
k=_mm256_setzero_ps();
l=0;
if(m>1e15&&apnum)
{
zx=_mm256_mul_ps(x,x);
zy=_mm256_mul_ps(y,y);
sum=_mm256_sub_ps(zx,zy);
xy=x*y;
tx=Ax*x-Ay*y+Bx*(sum)-2.0f*By*xy+Cx*x*(zx-3.0f*zy)+Cy*y*(zy-3.0f*zx);
y=Ax*y+Ay*x+2.0f*Bx*xy+By*(sum)+Cx*y*(3.0f*zx-zy)+Cy*x*(zx-3.0f*zy);
x=tx;
l=apnum;
k+=(float)apnum;
}
do
{
tx=_mm256_set1_ps(dxl[l]);
tx1=_mm256_set1_ps(dyl[l]);
zx=_mm256_mul_ps(x,x);
zy=_mm256_mul_ps(y,y);
sum=_mm256_add_ps(zy,zx);
xy=2.0f*(y*(x+tx)+x*tx1);
x=2.0f*(tx*x-tx1*y)+zx-zy+cx;
y=_mm256_add_ps(xy,cy);
mask= _mm256_cmp_ps(sum,four,_CMP_LT_OQ);
k=_mm256_add_ps(k,_mm256_and_ps(one,mask));
}
while(++l<iter&&_mm256_movemask_ps(mask));
k=_mm256_div_ps(k,iterace);k*=8000.0f;
tcb=pixels[off] = colb(k[0]);
tcg=pixels[off+1] = colg(k[0]);
tcr=pixels[off+2] = colr(k[0]);
pixels[off1+4] = colb(k[1]);
pixels[off1+5] = colg(k[1]);
pixels[off1+6] = colr(k[1]);
pixels[off+8] = colb(k[2]);
pixels[off+9] = colg(k[2]);
pixels[off+10] = colr(k[2]);
pixels[off1+12] = colb(k[3]);
pixels[off1+13] = colg(k[3]);
pixels[off1+14] = colr(k[3]);
pixels[off2] = colb(k[4]);
pixels[off2+1] = colg(k[4]);
pixels[off2+2] = colr(k[4]);
pixels[off3+4] = colb(k[5]);
pixels[off3+5] = colg(k[5]);
pixels[off3+6] = colr(k[5]);
pixels[off2+8] = colb(k[6]);
pixels[off2+9] = colg(k[6]);
pixels[off2+10] = colr(k[6]);
pixels[off3+12] = colb(k[7]);
pixels[off3+13] = colg(k[7]);
pixels[off3+14] = colr(k[7]);
if(tcb==pixels[off1+4]&&tcb==pixels[off+8]&&tcb==pixels[off1+12]&&tcb==pixels[off2]&&tcb==pixels[off3+4]&&tcb==pixels[off2+8]&&tcb==pixels[off3+12]&&
tcg==pixels[off1+5]&&tcg==pixels[off+9]&&tcg==pixels[off1+13]&&tcg==pixels[off2+1]&&tcg==pixels[off3+5]&&tcg==pixels[off2+9]&&tcg==pixels[off3+13]&&
tcr==pixels[off1+6]&&tcr==pixels[off+10]&&tcr==pixels[off1+14]&&tcr==pixels[off2+2]&&tcr==pixels[off3+6]&&tcr==pixels[off2+10]&&tcr==pixels[off3+14]){
pixels[off+4]=pixels[off+12]=pixels[off1]=pixels[off1+8]=pixels[off2+4]=pixels[off2+12]= pixels[off3]=pixels[off3+8]=tcb;
pixels[off+5]=pixels[off+13]=pixels[off1+1]=pixels[off1+9]=pixels[off2+5]=pixels[off2+13]= pixels[off3+1]=pixels[off3+9]=tcg;
pixels[off+6]=pixels[off+14]=pixels[off1+2]=pixels[off1+10]=pixels[off2+6]=pixels[off2+14]= pixels[off3+2]=pixels[off3+10]=tcr;
}
else{
x=cx;
y=cy=_mm256_setr_ps(((i+prey+1)*invert-y01),((i+prey)*invert-y01),((i+prey+1)*invert-y01),((i+prey)*invert-y01),((i+prey+3)*invert-y01),((i+prey+2)*invert-y01),((i+prey+3)*invert-y01),((i+prey+2)*invert-y01));
k=_mm256_setzero_ps();
l=0;
if(m>1e15&&apnum)
{
zx=_mm256_mul_ps(x,x);
zy=_mm256_mul_ps(y,y);
sum=_mm256_sub_ps(zx,zy);
xy=x*y;
tx=Ax*x-Ay*y+Bx*(sum)-2.0f*By*xy+Cx*x*(zx-3.0f*zy)+Cy*y*(zy-3.0f*zx);
y=Ax*y+Ay*x+2.0f*Bx*xy+By*(sum)+Cx*y*(3.0f*zx-zy)+Cy*x*(zx-3.0f*zy);
x=tx;
l=apnum;
k+=(float)apnum;
}
do
{
tx=_mm256_set1_ps(dxl[l]);
tx1=_mm256_set1_ps(dyl[l]);
zx=_mm256_mul_ps(x,x);
zy=_mm256_mul_ps(y,y);
sum=_mm256_add_ps(zy,zx);
xy=2.0f*(y*(x+tx)+x*tx1);
x=2.0f*(tx*x-tx1*y)+zx-zy+cx;
y=_mm256_add_ps(xy,cy);
mask= _mm256_cmp_ps(sum,four,_CMP_LT_OQ);
k=_mm256_add_ps(k,_mm256_and_ps(one,mask));
}
while(++l<iter&&_mm256_movemask_ps(mask));
k=_mm256_div_ps(k,iterace);k*=8000.0f;
pixels[off1] = colb(k[0]);
pixels[off1+1] = colg(k[0]);
pixels[off1+2] = colr(k[0]);
pixels[off+4] = colb(k[1]);
pixels[off+5] = colg(k[1]);
pixels[off+6] = colr(k[1]);
pixels[off1+8] = colb(k[2]);
pixels[off1+9] = colg(k[2]);
pixels[off1+10] = colr(k[2]);
pixels[off+12] = colb(k[3]);
pixels[off+13] = colg(k[3]);
pixels[off+14] = colr(k[3]);
pixels[off3] = colb(k[4]);
pixels[off3+1] = colg(k[4]);
pixels[off3+2] = colr(k[4]);
pixels[off2+4] = colb(k[5]);
pixels[off2+5] = colg(k[5]);
pixels[off2+6] = colr(k[5]);
pixels[off3+8] = colb(k[6]);
pixels[off3+9] = colg(k[6]);
pixels[off3+10] = colr(k[6]);
pixels[off2+12] = colb(k[7]);
pixels[off2+13] = colg(k[7]);
pixels[off2+14] = colr(k[7]);
}
}
}
SDL_Surface *surf = SDL_CreateRGBSurfaceFrom(pixels, width, height, 8*4, width*4, 0, 0, 0, 0);
sprintf_s(file,30,"images/%d.bmp",time(NULL));
SDL_SaveBMP(surf,file);
SDL_FreeSurface(surf);
free(pixels);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
inline static void Render(unsigned char *pixels,__float128 m,__float128 ph,__float128 pv,int iter,__float128 mcx,
__float128 mcy,char index,char index2,char index3)
{
__float128 prex=(m*ph-HWIDTH),prey=(-HHEIGHT-m*pv);
__float128 px,py;
__m256 zx,zy,cx,cy,x,y,four,mask,sum;
__m256 xy,tx,tx1;
__m256 k,iterace,one;
iterace=_mm256_set1_ps(iter);
one=_mm256_set1_ps(1.0f);
four= _mm256_set1_ps(4.0f);
__float128 invert=(1.0/(360.0*m));
int off,i,j,off1,l,f,off2,off3;
unsigned char tcb,tcr,tcg;
if(setpoint)
{
dx[0]=x0=mcx;
dy[0]=y01=mcy;
for(f=0; f<9999; f++)
{
px=dx[f]*dx[f];
py=dy[f]*dy[f];
dy[f+1]=2.0*dx[f]*dy[f]+y01;
dx[f+1]=px-py+x0;
}
#pragma omp parallel for simd
for(i=0; i<9999; i++)
{
dxl[i]=dx[i];
dyl[i]=dy[i];
}
Ax=1.0f;
Ay=Bx=By=Cx=Cy=0.0f;
apnum=0;
for(f=0; f<iter; f++)
{
C=2.0f*(dxl[f]*Cx-dyl[f]*Cy+Ax*Bx-Ay*By);
Ci=2.0f*(dyl[f]*Cx+dxl[f]*Cy+Ay*Bx+Ax*By);
B=2.0f*(dxl[f]*Bx-dyl[f]*By)+Ax*Ax-Ay*Ay;
Bi=2.0f*(dyl[f]*Bx+dxl[f]*By+Ax*Ay);
A=2.0f*(dxl[f]*Ax-dyl[f]*Ay)+1.0f;
Ai=2.0f*(dyl[f]*Ax+dxl[f]*Ay);
if(A>2e200||Ai>2e200||B>2e200||Bi>2e200||C>2e200||Ci>2e200)break;
if(A<-2e200||Ai<-2e200||B<-2e200||Bi<-2e200||C<-2e200||Ci<-2e200)break;
Cx=C;
Cy=Ci;
Bx=B;
By=Bi;
Ax=A;
Ay=Ai;
}
apnum=f;
printf("A %e %e B %e %e C %e %e Skipped: %d/%d\n",Ax,Ay,Bx,By,Cx,Cy,apnum,iter);
setpoint=0;
}
#pragma omp parallel for simd collapse (2) schedule(dynamic,100) shared(pixels,dx,dy,x0,y01,Ax,Ay,Bx,By,Cx,Cy) private(off,i,j,k,zx,zy,x,y,cy,cx,sum,mask,xy,tx,tx1,tcb,tcg,tcr)
for(i=4*index*index2+4; i<HEIGHT-4; i+=4*(1+index2))
{
for(j=(index*index2+index3)*4; j<WIDTH-4; j+=4+index2*4)
{
if(j<4)continue;
off = 4*WIDTH*i+(j<<2);
off1 = 4*WIDTH*(i+1)+(j<<2);
off2 = 4*WIDTH*(i+2)+(j<<2);
off3 = 4*WIDTH*(i+3)+(j<<2);
x=cx=_mm256_setr_ps(((j+prex)*invert-x0),((j+prex+1)*invert-x0),((j+prex+2)*invert-x0),((j+prex+3)*invert-x0),((j+prex)*invert-x0),((j+prex+1)*invert-x0),((j+prex+2)*invert-x0),((j+prex+3)*invert-x0));
y=cy=_mm256_setr_ps(((i+prey)*invert-y01),((i+prey+1)*invert-y01),((i+prey)*invert-y01),((i+prey+1)*invert-y01),((i+prey+2)*invert-y01),((i+prey+3)*invert-y01),((i+prey+2)*invert-y01),((i+prey+3)*invert-y01));
k=_mm256_setzero_ps();
l=0;
if(m>1e15&&apnum)
{
zx=_mm256_mul_ps(x,x);
zy=_mm256_mul_ps(y,y);
sum=_mm256_sub_ps(zx,zy);
xy=x*y;
tx=Ax*x-Ay*y+Bx*(sum)-2.0f*By*xy+Cx*x*(zx-3.0f*zy)+Cy*y*(zy-3.0f*zx);
y=Ax*y+Ay*x+2.0f*Bx*xy+By*(sum)+Cx*y*(3.0f*zx-zy)+Cy*x*(zx-3.0f*zy);
x=tx;
l=apnum;
k+=(float)apnum;
}
do
{
tx=_mm256_set1_ps(dxl[l]);
tx1=_mm256_set1_ps(dyl[l]);
zx=_mm256_mul_ps(x,x);
zy=_mm256_mul_ps(y,y);
sum=_mm256_add_ps(zy,zx);
xy=2.0f*(y*(x+tx)+x*tx1);
x=2.0f*(tx*x-tx1*y)+zx-zy+cx;
y=_mm256_add_ps(xy,cy);
mask= _mm256_cmp_ps(sum,four,_CMP_LT_OQ);
k=_mm256_add_ps(k,_mm256_and_ps(one,mask));
}
while(++l<iter&&_mm256_movemask_ps(mask));
k=_mm256_div_ps(k,iterace);k*=8000.0f;
tcb=pixels[off] = colb(k[0]);
tcg=pixels[off+1] = colg(k[0]);
tcr=pixels[off+2] = colr(k[0]);
pixels[off1+4] = colb(k[1]);
pixels[off1+5] = colg(k[1]);
pixels[off1+6] = colr(k[1]);
pixels[off+8] = colb(k[2]);
pixels[off+9] = colg(k[2]);
pixels[off+10] = colr(k[2]);
pixels[off1+12] = colb(k[3]);
pixels[off1+13] = colg(k[3]);
pixels[off1+14] = colr(k[3]);
pixels[off2] = colb(k[4]);
pixels[off2+1] = colg(k[4]);
pixels[off2+2] = colr(k[4]);
pixels[off3+4] = colb(k[5]);
pixels[off3+5] = colg(k[5]);
pixels[off3+6] = colr(k[5]);
pixels[off2+8] = colb(k[6]);
pixels[off2+9] = colg(k[6]);
pixels[off2+10] = colr(k[6]);
pixels[off3+12] = colb(k[7]);
pixels[off3+13] = colg(k[7]);
pixels[off3+14] = colr(k[7]);
if(tcb==pixels[off1+4]&&tcb==pixels[off+8]&&tcb==pixels[off1+12]&&tcb==pixels[off2]&&tcb==pixels[off3+4]&&tcb==pixels[off2+8]&&tcb==pixels[off3+12]&&
tcg==pixels[off1+5]&&tcg==pixels[off+9]&&tcg==pixels[off1+13]&&tcg==pixels[off2+1]&&tcg==pixels[off3+5]&&tcg==pixels[off2+9]&&tcg==pixels[off3+13]&&
tcr==pixels[off1+6]&&tcr==pixels[off+10]&&tcr==pixels[off1+14]&&tcr==pixels[off2+2]&&tcr==pixels[off3+6]&&tcr==pixels[off2+10]&&tcr==pixels[off3+14]){
pixels[off+4]=pixels[off+12]=pixels[off1]=pixels[off1+8]=pixels[off2+4]=pixels[off2+12]= pixels[off3]=pixels[off3+8]=tcb;
pixels[off+5]=pixels[off+13]=pixels[off1+1]=pixels[off1+9]=pixels[off2+5]=pixels[off2+13]= pixels[off3+1]=pixels[off3+9]=tcg;
pixels[off+6]=pixels[off+14]=pixels[off1+2]=pixels[off1+10]=pixels[off2+6]=pixels[off2+14]= pixels[off3+2]=pixels[off3+10]=tcr;
}
else{
x=cx;
y=cy=_mm256_setr_ps(((i+prey+1)*invert-y01),((i+prey)*invert-y01),((i+prey+1)*invert-y01),((i+prey)*invert-y01),((i+prey+3)*invert-y01),((i+prey+2)*invert-y01),((i+prey+3)*invert-y01),((i+prey+2)*invert-y01));
k=_mm256_setzero_ps();
l=0;
if(m>1e15&&apnum)
{
zx=_mm256_mul_ps(x,x);
zy=_mm256_mul_ps(y,y);
sum=_mm256_sub_ps(zx,zy);
xy=x*y;
tx=Ax*x-Ay*y+Bx*(sum)-2.0f*By*xy+Cx*x*(zx-3.0f*zy)+Cy*y*(zy-3.0f*zx);
y=Ax*y+Ay*x+2.0f*Bx*xy+By*(sum)+Cx*y*(3.0f*zx-zy)+Cy*x*(zx-3.0f*zy);
x=tx;
l=apnum;
k+=(float)apnum;
}
do
{
tx=_mm256_set1_ps(dxl[l]);
tx1=_mm256_set1_ps(dyl[l]);
zx=_mm256_mul_ps(x,x);
zy=_mm256_mul_ps(y,y);
sum=_mm256_add_ps(zy,zx);
xy=2.0f*(y*(x+tx)+x*tx1);
x=2.0f*(tx*x-tx1*y)+zx-zy+cx;
y=_mm256_add_ps(xy,cy);
mask= _mm256_cmp_ps(sum,four,_CMP_LT_OQ);
k=_mm256_add_ps(k,_mm256_and_ps(one,mask));
}
while(++l<iter&&_mm256_movemask_ps(mask));
k=_mm256_div_ps(k,iterace);k*=8000.0f;
pixels[off1] = colb(k[0]);
pixels[off1+1] = colg(k[0]);
pixels[off1+2] = colr(k[0]);
pixels[off+4] = colb(k[1]);
pixels[off+5] = colg(k[1]);
pixels[off+6] = colr(k[1]);
pixels[off1+8] = colb(k[2]);
pixels[off1+9] = colg(k[2]);
pixels[off1+10] = colr(k[2]);
pixels[off+12] = colb(k[3]);
pixels[off+13] = colg(k[3]);
pixels[off+14] = colr(k[3]);
pixels[off3] = colb(k[4]);
pixels[off3+1] = colg(k[4]);
pixels[off3+2] = colr(k[4]);
pixels[off2+4] = colb(k[5]);
pixels[off2+5] = colg(k[5]);
pixels[off2+6] = colr(k[5]);
pixels[off3+8] = colb(k[6]);
pixels[off3+9] = colg(k[6]);
pixels[off3+10] = colr(k[6]);
pixels[off2+12] = colb(k[7]);
pixels[off2+13] = colg(k[7]);
pixels[off2+14] = colr(k[7]);
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
}
inline static void RenderCol(unsigned char *pixels,__float128 m,__float128 ph,__float128 pv,int iter,__float128 mcx,
__float128 mcy,char index,char index2,char index3)
{
__float128 prex=(m*ph-HWIDTH),prey=(-HHEIGHT-m*pv);
__float128 px,py;
__m256 zx,zy,cx,cy,x,y,four,mask,sum;
__m256 xy,tx,tx1;
__m256 k,iterace,one;
iterace=_mm256_set1_ps(iter);
one=_mm256_set1_ps(1.0f);
four= _mm256_set1_ps(4.0f);
__float128 invert=(1.0/(360.0*m));
int off,i,j,off1,l,f,off2,off3;
unsigned char tcb,tcr,tcg;
if(setpoint)
{
dx[0]=x0=mcx;
dy[0]=y01=mcy;
for(f=0; f<9999; f++)
{
px=dx[f]*dx[f];
py=dy[f]*dy[f];
dy[f+1]=2.0*dx[f]*dy[f]+y01;
dx[f+1]=px-py+x0;
}
#pragma omp parallel for simd
for(i=0; i<9999; i++)
{
dxl[i]=dx[i];
dyl[i]=dy[i];
}
Ax=1.0f;
Ay=Bx=By=Cx=Cy=0.0f;
apnum=0;
for(f=0; f<iter; f++)
{
C=2.0f*(dxl[f]*Cx-dyl[f]*Cy+Ax*Bx-Ay*By);
Ci=2.0f*(dyl[f]*Cx+dxl[f]*Cy+Ay*Bx+Ax*By);
B=2.0f*(dxl[f]*Bx-dyl[f]*By)+Ax*Ax-Ay*Ay;
Bi=2.0f*(dyl[f]*Bx+dxl[f]*By+Ax*Ay);
A=2.0f*(dxl[f]*Ax-dyl[f]*Ay)+1.0f;
Ai=2.0f*(dyl[f]*Ax+dxl[f]*Ay);
if(A>2e200||Ai>2e200||B>2e200||Bi>2e200||C>2e200||Ci>2e200)break;
if(A<-2e200||Ai<-2e200||B<-2e200||Bi<-2e200||C<-2e200||Ci<-2e200)break;
Cx=C;
Cy=Ci;
Bx=B;
By=Bi;
Ax=A;
Ay=Ai;
}
apnum=f;
printf("A %e %e B %e %e C %e %e Skipped: %d/%d\n",Ax,Ay,Bx,By,Cx,Cy,apnum,iter);
setpoint=0;
}
#pragma omp parallel for simd collapse (2) schedule(dynamic,100) shared(pixels,dx,dy,x0,y01,Ax,Ay,Bx,By,Cx,Cy) private(off,i,j,k,zx,zy,x,y,cy,cx,sum,mask,xy,tx,tx1,tcb,tcg,tcr)
for(i=4*index*index2+4; i<HEIGHT-4; i+=4*(1+index2))
{
for(j=(index*index2+index3)*4+2*WIDTH/5-4; j<WIDTH-4; j+=4+index2*4)
{
if(j<2*WIDTH/5)continue;
off = 4*WIDTH*i+(j<<2);
off1 = 4*WIDTH*(i+1)+(j<<2);
off2 = 4*WIDTH*(i+2)+(j<<2);
off3 = 4*WIDTH*(i+3)+(j<<2);
x=cx=_mm256_setr_ps(((j+prex)*invert-x0),((j+prex+1)*invert-x0),((j+prex+2)*invert-x0),((j+prex+3)*invert-x0),((j+prex)*invert-x0),((j+prex+1)*invert-x0),((j+prex+2)*invert-x0),((j+prex+3)*invert-x0));
y=cy=_mm256_setr_ps(((i+prey)*invert-y01),((i+prey+1)*invert-y01),((i+prey)*invert-y01),((i+prey+1)*invert-y01),((i+prey+2)*invert-y01),((i+prey+3)*invert-y01),((i+prey+2)*invert-y01),((i+prey+3)*invert-y01));
k=_mm256_setzero_ps();
l=0;
if(m>1e15&&apnum)
{
zx=_mm256_mul_ps(x,x);
zy=_mm256_mul_ps(y,y);
sum=_mm256_sub_ps(zx,zy);
xy=x*y;
tx=Ax*x-Ay*y+Bx*(sum)-2.0f*By*xy+Cx*x*(zx-3.0f*zy)+Cy*y*(zy-3.0f*zx);
y=Ax*y+Ay*x+2.0f*Bx*xy+By*(sum)+Cx*y*(3.0f*zx-zy)+Cy*x*(zx-3.0f*zy);
x=tx;
l=apnum;
k+=(float)apnum;
}
do
{
tx=_mm256_set1_ps(dxl[l]);
tx1=_mm256_set1_ps(dyl[l]);
zx=_mm256_mul_ps(x,x);
zy=_mm256_mul_ps(y,y);
sum=_mm256_add_ps(zy,zx);
xy=2.0f*(y*(x+tx)+x*tx1);
x=2.0f*(tx*x-tx1*y)+zx-zy+cx;
y=_mm256_add_ps(xy,cy);
mask= _mm256_cmp_ps(sum,four,_CMP_LT_OQ);
k=_mm256_add_ps(k,_mm256_and_ps(one,mask));
}
while(++l<iter&&_mm256_movemask_ps(mask));
k=_mm256_div_ps(k,iterace);k*=8000.0f;
tcb=pixels[off] = colb(k[0]);
tcg=pixels[off+1] = colg(k[0]);
tcr=pixels[off+2] = colr(k[0]);
pixels[off1+4] = colb(k[1]);
pixels[off1+5] = colg(k[1]);
pixels[off1+6] = colr(k[1]);
pixels[off+8] = colb(k[2]);
pixels[off+9] = colg(k[2]);
pixels[off+10] = colr(k[2]);
pixels[off1+12] = colb(k[3]);
pixels[off1+13] = colg(k[3]);
pixels[off1+14] = colr(k[3]);
pixels[off2] = colb(k[4]);
pixels[off2+1] = colg(k[4]);
pixels[off2+2] = colr(k[4]);
pixels[off3+4] = colb(k[5]);
pixels[off3+5] = colg(k[5]);
pixels[off3+6] = colr(k[5]);
pixels[off2+8] = colb(k[6]);
pixels[off2+9] = colg(k[6]);
pixels[off2+10] = colr(k[6]);
pixels[off3+12] = colb(k[7]);
pixels[off3+13] = colg(k[7]);
pixels[off3+14] = colr(k[7]);
if(tcb==pixels[off1+4]&&tcb==pixels[off+8]&&tcb==pixels[off1+12]&&tcb==pixels[off2]&&tcb==pixels[off3+4]&&tcb==pixels[off2+8]&&tcb==pixels[off3+12]&&
tcg==pixels[off1+5]&&tcg==pixels[off+9]&&tcg==pixels[off1+13]&&tcg==pixels[off2+1]&&tcg==pixels[off3+5]&&tcg==pixels[off2+9]&&tcg==pixels[off3+13]&&
tcr==pixels[off1+6]&&tcr==pixels[off+10]&&tcr==pixels[off1+14]&&tcr==pixels[off2+2]&&tcr==pixels[off3+6]&&tcr==pixels[off2+10]&&tcr==pixels[off3+14]){
pixels[off+4]=pixels[off+12]=pixels[off1]=pixels[off1+8]=pixels[off2+4]=pixels[off2+12]= pixels[off3]=pixels[off3+8]=tcb;
pixels[off+5]=pixels[off+13]=pixels[off1+1]=pixels[off1+9]=pixels[off2+5]=pixels[off2+13]= pixels[off3+1]=pixels[off3+9]=tcg;
pixels[off+6]=pixels[off+14]=pixels[off1+2]=pixels[off1+10]=pixels[off2+6]=pixels[off2+14]= pixels[off3+2]=pixels[off3+10]=tcr;
}
else{
x=cx;
y=cy=_mm256_setr_ps(((i+prey+1)*invert-y01),((i+prey)*invert-y01),((i+prey+1)*invert-y01),((i+prey)*invert-y01),((i+prey+3)*invert-y01),((i+prey+2)*invert-y01),((i+prey+3)*invert-y01),((i+prey+2)*invert-y01));
k=_mm256_setzero_ps();
l=0;
if(m>1e15&&apnum)
{
zx=_mm256_mul_ps(x,x);
zy=_mm256_mul_ps(y,y);
sum=_mm256_sub_ps(zx,zy);
xy=x*y;
tx=Ax*x-Ay*y+Bx*(sum)-2.0f*By*xy+Cx*x*(zx-3.0f*zy)+Cy*y*(zy-3.0f*zx);
y=Ax*y+Ay*x+2.0f*Bx*xy+By*(sum)+Cx*y*(3.0f*zx-zy)+Cy*x*(zx-3.0f*zy);
x=tx;
l=apnum;
k+=(float)apnum;
}
do
{
tx=_mm256_set1_ps(dxl[l]);
tx1=_mm256_set1_ps(dyl[l]);
zx=_mm256_mul_ps(x,x);
zy=_mm256_mul_ps(y,y);
sum=_mm256_add_ps(zy,zx);
xy=2.0f*(y*(x+tx)+x*tx1);
x=2.0f*(tx*x-tx1*y)+zx-zy+cx;
y=_mm256_add_ps(xy,cy);
mask= _mm256_cmp_ps(sum,four,_CMP_LT_OQ);
k=_mm256_add_ps(k,_mm256_and_ps(one,mask));
}
while(++l<iter&&_mm256_movemask_ps(mask));
k=_mm256_div_ps(k,iterace);k*=8000.0f;
pixels[off1] = colb(k[0]);
pixels[off1+1] = colg(k[0]);
pixels[off1+2] = colr(k[0]);
pixels[off+4] = colb(k[1]);
pixels[off+5] = colg(k[1]);
pixels[off+6] = colr(k[1]);
pixels[off1+8] = colb(k[2]);
pixels[off1+9] = colg(k[2]);
pixels[off1+10] = colr(k[2]);
pixels[off+12] = colb(k[3]);
pixels[off+13] = colg(k[3]);
pixels[off+14] = colr(k[3]);
pixels[off3] = colb(k[4]);
pixels[off3+1] = colg(k[4]);
pixels[off3+2] = colr(k[4]);
pixels[off2+4] = colb(k[5]);
pixels[off2+5] = colg(k[5]);
pixels[off2+6] = colr(k[5]);
pixels[off3+8] = colb(k[6]);
pixels[off3+9] = colg(k[6]);
pixels[off3+10] = colr(k[6]);
pixels[off2+12] = colb(k[7]);
pixels[off2+13] = colg(k[7]);
pixels[off2+14] = colr(k[7]);
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
}
#endif // COMPUTE_H_INCLUDED
|
bitshuffle_core.c | /*
* Bitshuffle - Filter for improving compression of typed binary data.
*
* Author: Kiyoshi Masui <kiyo@physics.ubc.ca>
* Website: http://www.github.com/kiyo-masui/bitshuffle
* Created: 2014
*
* See LICENSE file for details about copyright and rights to use.
*
*/
#include "bitshuffle_core.h"
#include "bitshuffle_internals.h"
#include <stdio.h>
#include <string.h>
#if defined(__AVX2__) && defined (__SSE2__)
#define USEAVX2
#endif
#if defined(__SSE2__)
#define USESSE2
#endif
// Conditional includes for SSE2 and AVX2.
#ifdef USEAVX2
#include <immintrin.h>
#elif defined USESSE2
#include <emmintrin.h>
#endif
// Macros.
#define CHECK_MULT_EIGHT(n) if (n % 8) return -80;
#define MAX(X,Y) ((X) > (Y) ? (X) : (Y))
/* ---- Functions indicating compile time instruction set. ---- */
int bshuf_using_SSE2(void) {
#ifdef USESSE2
return 1;
#else
return 0;
#endif
}
int bshuf_using_AVX2(void) {
#ifdef USEAVX2
return 1;
#else
return 0;
#endif
}
/* ---- Worker code not requiring special instruction sets. ----
*
* The following code does not use any x86 specific vectorized instructions
* and should compile on any machine
*
*/
/* Transpose 8x8 bit array packed into a single quadword *x*.
* *t* is workspace. */
#define TRANS_BIT_8X8(x, t) { \
t = (x ^ (x >> 7)) & 0x00AA00AA00AA00AALL; \
x = x ^ t ^ (t << 7); \
t = (x ^ (x >> 14)) & 0x0000CCCC0000CCCCLL; \
x = x ^ t ^ (t << 14); \
t = (x ^ (x >> 28)) & 0x00000000F0F0F0F0LL; \
x = x ^ t ^ (t << 28); \
}
/* Transpose of an array of arbitrarily typed elements. */
#define TRANS_ELEM_TYPE(in, out, lda, ldb, type_t) { \
size_t ii, jj, kk; \
const type_t* in_type = (const type_t*) in; \
type_t* out_type = (type_t*) out; \
for(ii = 0; ii + 7 < lda; ii += 8) { \
for(jj = 0; jj < ldb; jj++) { \
for(kk = 0; kk < 8; kk++) { \
out_type[jj*lda + ii + kk] = \
in_type[ii*ldb + kk * ldb + jj]; \
} \
} \
} \
for(ii = lda - lda % 8; ii < lda; ii ++) { \
for(jj = 0; jj < ldb; jj++) { \
out_type[jj*lda + ii] = in_type[ii*ldb + jj]; \
} \
} \
}
/* Memory copy with bshuf call signature. For testing and profiling. */
int64_t bshuf_copy(const void* in, void* out, const size_t size,
const size_t elem_size) {
const char* in_b = (const char*) in;
char* out_b = (char*) out;
memcpy(out_b, in_b, size * elem_size);
return size * elem_size;
}
/* Transpose bytes within elements, starting partway through input. */
int64_t bshuf_trans_byte_elem_remainder(const void* in, void* out, const size_t size,
const size_t elem_size, const size_t start) {
size_t ii, jj, kk;
const char* in_b = (const char*) in;
char* out_b = (char*) out;
CHECK_MULT_EIGHT(start);
if (size > start) {
// ii loop separated into 2 loops so the compiler can unroll
// the inner one.
for (ii = start; ii + 7 < size; ii += 8) {
for (jj = 0; jj < elem_size; jj++) {
for (kk = 0; kk < 8; kk++) {
out_b[jj * size + ii + kk]
= in_b[ii * elem_size + kk * elem_size + jj];
}
}
}
for (ii = size - size % 8; ii < size; ii ++) {
for (jj = 0; jj < elem_size; jj++) {
out_b[jj * size + ii] = in_b[ii * elem_size + jj];
}
}
}
return size * elem_size;
}
/* Transpose bytes within elements. */
int64_t bshuf_trans_byte_elem_scal(const void* in, void* out, const size_t size,
const size_t elem_size) {
return bshuf_trans_byte_elem_remainder(in, out, size, elem_size, 0);
}
/* Transpose bits within bytes. */
int64_t bshuf_trans_bit_byte_remainder(const void* in, void* out, const size_t size,
const size_t elem_size, const size_t start_byte) {
int ii, kk;
const uint64_t* in_b = (const uint64_t*) in;
uint8_t* out_b = (uint8_t*) out;
uint64_t x, t;
size_t nbyte = elem_size * size;
size_t nbyte_bitrow = nbyte / 8;
CHECK_MULT_EIGHT(nbyte);
CHECK_MULT_EIGHT(start_byte);
for (ii = start_byte / 8; ii < nbyte_bitrow; ii ++) {
x = in_b[ii];
TRANS_BIT_8X8(x, t);
for (kk = 0; kk < 8; kk ++) {
out_b[kk * nbyte_bitrow + ii] = x;
x = x >> 8;
}
}
return size * elem_size;
}
/* Transpose bits within bytes. */
int64_t bshuf_trans_bit_byte_scal(const void* in, void* out, const size_t size,
const size_t elem_size) {
return bshuf_trans_bit_byte_remainder(in, out, size, elem_size, 0);
}
/* General transpose of an array, optimized for large element sizes. */
int64_t bshuf_trans_elem(const void* in, void* out, const size_t lda,
const size_t ldb, const size_t elem_size) {
size_t ii, jj;
const char* in_b = (const char*) in;
char* out_b = (char*) out;
for(ii = 0; ii < lda; ii++) {
for(jj = 0; jj < ldb; jj++) {
memcpy(&out_b[(jj*lda + ii) * elem_size],
&in_b[(ii*ldb + jj) * elem_size], elem_size);
}
}
return lda * ldb * elem_size;
}
/* Transpose rows of shuffled bits (size / 8 bytes) within groups of 8. */
int64_t bshuf_trans_bitrow_eight(const void* in, void* out, const size_t size,
const size_t elem_size) {
size_t nbyte_bitrow = size / 8;
CHECK_MULT_EIGHT(size);
return bshuf_trans_elem(in, out, 8, elem_size, nbyte_bitrow);
}
/* Transpose bits within elements. */
int64_t bshuf_trans_bit_elem_scal(const void* in, void* out, const size_t size,
const size_t elem_size) {
int64_t count;
void *tmp_buf;
CHECK_MULT_EIGHT(size);
tmp_buf = malloc(size * elem_size);
if (tmp_buf == NULL) return -1;
count = bshuf_trans_byte_elem_scal(in, out, size, elem_size);
CHECK_ERR_FREE(count, tmp_buf);
count = bshuf_trans_bit_byte_scal(out, tmp_buf, size, elem_size);
CHECK_ERR_FREE(count, tmp_buf);
count = bshuf_trans_bitrow_eight(tmp_buf, out, size, elem_size);
free(tmp_buf);
return count;
}
/* For data organized into a row for each bit (8 * elem_size rows), transpose
* the bytes. */
int64_t bshuf_trans_byte_bitrow_scal(const void* in, void* out, const size_t size,
const size_t elem_size) {
size_t ii, jj, kk, nbyte_row;
const char *in_b;
char *out_b;
in_b = (const char*) in;
out_b = (char*) out;
nbyte_row = size / 8;
CHECK_MULT_EIGHT(size);
for (jj = 0; jj < elem_size; jj++) {
for (ii = 0; ii < nbyte_row; ii++) {
for (kk = 0; kk < 8; kk++) {
out_b[ii * 8 * elem_size + jj * 8 + kk] = \
in_b[(jj * 8 + kk) * nbyte_row + ii];
}
}
}
return size * elem_size;
}
/* Shuffle bits within the bytes of eight element blocks. */
int64_t bshuf_shuffle_bit_eightelem_scal(const void* in, void* out, \
const size_t size, const size_t elem_size) {
size_t ii, jj, kk;
const char *in_b;
char *out_b;
uint64_t x, t;
size_t nbyte;
CHECK_MULT_EIGHT(size);
in_b = (const char*) in;
out_b = (char*) out;
nbyte = elem_size * size;
for (jj = 0; jj < 8 * elem_size; jj += 8) {
for (ii = 0; ii + 8 * elem_size - 1 < nbyte; ii += 8 * elem_size) {
x = *((uint64_t*) &in_b[ii + jj]);
TRANS_BIT_8X8(x, t);
for (kk = 0; kk < 8; kk++) {
*((uint8_t*) &out_b[ii + jj / 8 + kk * elem_size]) = x;
x = x >> 8;
}
}
}
return size * elem_size;
}
/* Untranspose bits within elements. */
int64_t bshuf_untrans_bit_elem_scal(const void* in, void* out, const size_t size,
const size_t elem_size) {
int64_t count;
void *tmp_buf;
CHECK_MULT_EIGHT(size);
tmp_buf = malloc(size * elem_size);
if (tmp_buf == NULL) return -1;
count = bshuf_trans_byte_bitrow_scal(in, tmp_buf, size, elem_size);
CHECK_ERR_FREE(count, tmp_buf);
count = bshuf_shuffle_bit_eightelem_scal(tmp_buf, out, size, elem_size);
free(tmp_buf);
return count;
}
/* ---- Worker code that uses SSE2 ----
*
* The following code makes use of the SSE2 instruction set and specialized
* 16 byte registers. The SSE2 instructions are present on modern x86
* processors. The first Intel processor microarchitecture supporting SSE2 was
* Pentium 4 (2000).
*
*/
#ifdef USESSE2
/* Transpose bytes within elements for 16 bit elements. */
int64_t bshuf_trans_byte_elem_SSE_16(const void* in, void* out, const size_t size) {
size_t ii;
const char *in_b = (const char*) in;
char *out_b = (char*) out;
__m128i a0, b0, a1, b1;
for (ii=0; ii + 15 < size; ii += 16) {
a0 = _mm_loadu_si128((__m128i *) &in_b[2*ii + 0*16]);
b0 = _mm_loadu_si128((__m128i *) &in_b[2*ii + 1*16]);
a1 = _mm_unpacklo_epi8(a0, b0);
b1 = _mm_unpackhi_epi8(a0, b0);
a0 = _mm_unpacklo_epi8(a1, b1);
b0 = _mm_unpackhi_epi8(a1, b1);
a1 = _mm_unpacklo_epi8(a0, b0);
b1 = _mm_unpackhi_epi8(a0, b0);
a0 = _mm_unpacklo_epi8(a1, b1);
b0 = _mm_unpackhi_epi8(a1, b1);
_mm_storeu_si128((__m128i *) &out_b[0*size + ii], a0);
_mm_storeu_si128((__m128i *) &out_b[1*size + ii], b0);
}
return bshuf_trans_byte_elem_remainder(in, out, size, 2,
size - size % 16);
}
/* Transpose bytes within elements for 32 bit elements. */
int64_t bshuf_trans_byte_elem_SSE_32(const void* in, void* out, const size_t size) {
size_t ii;
const char *in_b;
char *out_b;
in_b = (const char*) in;
out_b = (char*) out;
__m128i a0, b0, c0, d0, a1, b1, c1, d1;
for (ii=0; ii + 15 < size; ii += 16) {
a0 = _mm_loadu_si128((__m128i *) &in_b[4*ii + 0*16]);
b0 = _mm_loadu_si128((__m128i *) &in_b[4*ii + 1*16]);
c0 = _mm_loadu_si128((__m128i *) &in_b[4*ii + 2*16]);
d0 = _mm_loadu_si128((__m128i *) &in_b[4*ii + 3*16]);
a1 = _mm_unpacklo_epi8(a0, b0);
b1 = _mm_unpackhi_epi8(a0, b0);
c1 = _mm_unpacklo_epi8(c0, d0);
d1 = _mm_unpackhi_epi8(c0, d0);
a0 = _mm_unpacklo_epi8(a1, b1);
b0 = _mm_unpackhi_epi8(a1, b1);
c0 = _mm_unpacklo_epi8(c1, d1);
d0 = _mm_unpackhi_epi8(c1, d1);
a1 = _mm_unpacklo_epi8(a0, b0);
b1 = _mm_unpackhi_epi8(a0, b0);
c1 = _mm_unpacklo_epi8(c0, d0);
d1 = _mm_unpackhi_epi8(c0, d0);
a0 = _mm_unpacklo_epi64(a1, c1);
b0 = _mm_unpackhi_epi64(a1, c1);
c0 = _mm_unpacklo_epi64(b1, d1);
d0 = _mm_unpackhi_epi64(b1, d1);
_mm_storeu_si128((__m128i *) &out_b[0*size + ii], a0);
_mm_storeu_si128((__m128i *) &out_b[1*size + ii], b0);
_mm_storeu_si128((__m128i *) &out_b[2*size + ii], c0);
_mm_storeu_si128((__m128i *) &out_b[3*size + ii], d0);
}
return bshuf_trans_byte_elem_remainder(in, out, size, 4,
size - size % 16);
}
/* Transpose bytes within elements for 64 bit elements. */
int64_t bshuf_trans_byte_elem_SSE_64(const void* in, void* out, const size_t size) {
size_t ii;
const char* in_b = (const char*) in;
char* out_b = (char*) out;
__m128i a0, b0, c0, d0, e0, f0, g0, h0;
__m128i a1, b1, c1, d1, e1, f1, g1, h1;
for (ii=0; ii + 15 < size; ii += 16) {
a0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 0*16]);
b0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 1*16]);
c0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 2*16]);
d0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 3*16]);
e0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 4*16]);
f0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 5*16]);
g0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 6*16]);
h0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 7*16]);
a1 = _mm_unpacklo_epi8(a0, b0);
b1 = _mm_unpackhi_epi8(a0, b0);
c1 = _mm_unpacklo_epi8(c0, d0);
d1 = _mm_unpackhi_epi8(c0, d0);
e1 = _mm_unpacklo_epi8(e0, f0);
f1 = _mm_unpackhi_epi8(e0, f0);
g1 = _mm_unpacklo_epi8(g0, h0);
h1 = _mm_unpackhi_epi8(g0, h0);
a0 = _mm_unpacklo_epi8(a1, b1);
b0 = _mm_unpackhi_epi8(a1, b1);
c0 = _mm_unpacklo_epi8(c1, d1);
d0 = _mm_unpackhi_epi8(c1, d1);
e0 = _mm_unpacklo_epi8(e1, f1);
f0 = _mm_unpackhi_epi8(e1, f1);
g0 = _mm_unpacklo_epi8(g1, h1);
h0 = _mm_unpackhi_epi8(g1, h1);
a1 = _mm_unpacklo_epi32(a0, c0);
b1 = _mm_unpackhi_epi32(a0, c0);
c1 = _mm_unpacklo_epi32(b0, d0);
d1 = _mm_unpackhi_epi32(b0, d0);
e1 = _mm_unpacklo_epi32(e0, g0);
f1 = _mm_unpackhi_epi32(e0, g0);
g1 = _mm_unpacklo_epi32(f0, h0);
h1 = _mm_unpackhi_epi32(f0, h0);
a0 = _mm_unpacklo_epi64(a1, e1);
b0 = _mm_unpackhi_epi64(a1, e1);
c0 = _mm_unpacklo_epi64(b1, f1);
d0 = _mm_unpackhi_epi64(b1, f1);
e0 = _mm_unpacklo_epi64(c1, g1);
f0 = _mm_unpackhi_epi64(c1, g1);
g0 = _mm_unpacklo_epi64(d1, h1);
h0 = _mm_unpackhi_epi64(d1, h1);
_mm_storeu_si128((__m128i *) &out_b[0*size + ii], a0);
_mm_storeu_si128((__m128i *) &out_b[1*size + ii], b0);
_mm_storeu_si128((__m128i *) &out_b[2*size + ii], c0);
_mm_storeu_si128((__m128i *) &out_b[3*size + ii], d0);
_mm_storeu_si128((__m128i *) &out_b[4*size + ii], e0);
_mm_storeu_si128((__m128i *) &out_b[5*size + ii], f0);
_mm_storeu_si128((__m128i *) &out_b[6*size + ii], g0);
_mm_storeu_si128((__m128i *) &out_b[7*size + ii], h0);
}
return bshuf_trans_byte_elem_remainder(in, out, size, 8,
size - size % 16);
}
/* Transpose bytes within elements using best SSE algorithm available. */
int64_t bshuf_trans_byte_elem_SSE(const void* in, void* out, const size_t size,
const size_t elem_size) {
int64_t count;
// Trivial cases: power of 2 bytes.
switch (elem_size) {
case 1:
count = bshuf_copy(in, out, size, elem_size);
return count;
case 2:
count = bshuf_trans_byte_elem_SSE_16(in, out, size);
return count;
case 4:
count = bshuf_trans_byte_elem_SSE_32(in, out, size);
return count;
case 8:
count = bshuf_trans_byte_elem_SSE_64(in, out, size);
return count;
}
// Worst case: odd number of bytes. Turns out that this is faster for
// (odd * 2) byte elements as well (hence % 4).
if (elem_size % 4) {
count = bshuf_trans_byte_elem_scal(in, out, size, elem_size);
return count;
}
// Multiple of power of 2: transpose hierarchically.
{
size_t nchunk_elem;
void* tmp_buf = malloc(size * elem_size);
if (tmp_buf == NULL) return -1;
if ((elem_size % 8) == 0) {
nchunk_elem = elem_size / 8;
TRANS_ELEM_TYPE(in, out, size, nchunk_elem, int64_t);
count = bshuf_trans_byte_elem_SSE_64(out, tmp_buf,
size * nchunk_elem);
bshuf_trans_elem(tmp_buf, out, 8, nchunk_elem, size);
} else if ((elem_size % 4) == 0) {
nchunk_elem = elem_size / 4;
TRANS_ELEM_TYPE(in, out, size, nchunk_elem, int32_t);
count = bshuf_trans_byte_elem_SSE_32(out, tmp_buf,
size * nchunk_elem);
bshuf_trans_elem(tmp_buf, out, 4, nchunk_elem, size);
} else {
// Not used since scalar algorithm is faster.
nchunk_elem = elem_size / 2;
TRANS_ELEM_TYPE(in, out, size, nchunk_elem, int16_t);
count = bshuf_trans_byte_elem_SSE_16(out, tmp_buf,
size * nchunk_elem);
bshuf_trans_elem(tmp_buf, out, 2, nchunk_elem, size);
}
free(tmp_buf);
return count;
}
}
/* Transpose bits within bytes. */
int64_t bshuf_trans_bit_byte_SSE(const void* in, void* out, const size_t size,
const size_t elem_size) {
size_t ii, kk;
const char* in_b = (const char*) in;
char* out_b = (char*) out;
uint16_t* out_ui16;
int64_t count;
size_t nbyte = elem_size * size;
CHECK_MULT_EIGHT(nbyte);
__m128i xmm;
int32_t bt;
for (ii = 0; ii + 15 < nbyte; ii += 16) {
xmm = _mm_loadu_si128((__m128i *) &in_b[ii]);
for (kk = 0; kk < 8; kk++) {
bt = _mm_movemask_epi8(xmm);
xmm = _mm_slli_epi16(xmm, 1);
out_ui16 = (uint16_t*) &out_b[((7 - kk) * nbyte + ii) / 8];
*out_ui16 = bt;
}
}
count = bshuf_trans_bit_byte_remainder(in, out, size, elem_size,
nbyte - nbyte % 16);
return count;
}
/* Transpose bits within elements. */
int64_t bshuf_trans_bit_elem_SSE(const void* in, void* out, const size_t size,
const size_t elem_size) {
int64_t count;
CHECK_MULT_EIGHT(size);
void* tmp_buf = malloc(size * elem_size);
if (tmp_buf == NULL) return -1;
count = bshuf_trans_byte_elem_SSE(in, out, size, elem_size);
CHECK_ERR_FREE(count, tmp_buf);
count = bshuf_trans_bit_byte_SSE(out, tmp_buf, size, elem_size);
CHECK_ERR_FREE(count, tmp_buf);
count = bshuf_trans_bitrow_eight(tmp_buf, out, size, elem_size);
free(tmp_buf);
return count;
}
/* For data organized into a row for each bit (8 * elem_size rows), transpose
* the bytes. */
int64_t bshuf_trans_byte_bitrow_SSE(const void* in, void* out, const size_t size,
const size_t elem_size) {
size_t ii, jj;
const char* in_b = (const char*) in;
char* out_b = (char*) out;
CHECK_MULT_EIGHT(size);
size_t nrows = 8 * elem_size;
size_t nbyte_row = size / 8;
__m128i a0, b0, c0, d0, e0, f0, g0, h0;
__m128i a1, b1, c1, d1, e1, f1, g1, h1;
__m128 *as, *bs, *cs, *ds, *es, *fs, *gs, *hs;
for (ii = 0; ii + 7 < nrows; ii += 8) {
for (jj = 0; jj + 15 < nbyte_row; jj += 16) {
a0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 0)*nbyte_row + jj]);
b0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 1)*nbyte_row + jj]);
c0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 2)*nbyte_row + jj]);
d0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 3)*nbyte_row + jj]);
e0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 4)*nbyte_row + jj]);
f0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 5)*nbyte_row + jj]);
g0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 6)*nbyte_row + jj]);
h0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 7)*nbyte_row + jj]);
a1 = _mm_unpacklo_epi8(a0, b0);
b1 = _mm_unpacklo_epi8(c0, d0);
c1 = _mm_unpacklo_epi8(e0, f0);
d1 = _mm_unpacklo_epi8(g0, h0);
e1 = _mm_unpackhi_epi8(a0, b0);
f1 = _mm_unpackhi_epi8(c0, d0);
g1 = _mm_unpackhi_epi8(e0, f0);
h1 = _mm_unpackhi_epi8(g0, h0);
a0 = _mm_unpacklo_epi16(a1, b1);
b0 = _mm_unpacklo_epi16(c1, d1);
c0 = _mm_unpackhi_epi16(a1, b1);
d0 = _mm_unpackhi_epi16(c1, d1);
e0 = _mm_unpacklo_epi16(e1, f1);
f0 = _mm_unpacklo_epi16(g1, h1);
g0 = _mm_unpackhi_epi16(e1, f1);
h0 = _mm_unpackhi_epi16(g1, h1);
a1 = _mm_unpacklo_epi32(a0, b0);
b1 = _mm_unpackhi_epi32(a0, b0);
c1 = _mm_unpacklo_epi32(c0, d0);
d1 = _mm_unpackhi_epi32(c0, d0);
e1 = _mm_unpacklo_epi32(e0, f0);
f1 = _mm_unpackhi_epi32(e0, f0);
g1 = _mm_unpacklo_epi32(g0, h0);
h1 = _mm_unpackhi_epi32(g0, h0);
// We don't have a storeh instruction for integers, so interpret
// as a float. Have a storel (_mm_storel_epi64).
as = (__m128 *) &a1;
bs = (__m128 *) &b1;
cs = (__m128 *) &c1;
ds = (__m128 *) &d1;
es = (__m128 *) &e1;
fs = (__m128 *) &f1;
gs = (__m128 *) &g1;
hs = (__m128 *) &h1;
_mm_storel_pi((__m64 *) &out_b[(jj + 0) * nrows + ii], *as);
_mm_storel_pi((__m64 *) &out_b[(jj + 2) * nrows + ii], *bs);
_mm_storel_pi((__m64 *) &out_b[(jj + 4) * nrows + ii], *cs);
_mm_storel_pi((__m64 *) &out_b[(jj + 6) * nrows + ii], *ds);
_mm_storel_pi((__m64 *) &out_b[(jj + 8) * nrows + ii], *es);
_mm_storel_pi((__m64 *) &out_b[(jj + 10) * nrows + ii], *fs);
_mm_storel_pi((__m64 *) &out_b[(jj + 12) * nrows + ii], *gs);
_mm_storel_pi((__m64 *) &out_b[(jj + 14) * nrows + ii], *hs);
_mm_storeh_pi((__m64 *) &out_b[(jj + 1) * nrows + ii], *as);
_mm_storeh_pi((__m64 *) &out_b[(jj + 3) * nrows + ii], *bs);
_mm_storeh_pi((__m64 *) &out_b[(jj + 5) * nrows + ii], *cs);
_mm_storeh_pi((__m64 *) &out_b[(jj + 7) * nrows + ii], *ds);
_mm_storeh_pi((__m64 *) &out_b[(jj + 9) * nrows + ii], *es);
_mm_storeh_pi((__m64 *) &out_b[(jj + 11) * nrows + ii], *fs);
_mm_storeh_pi((__m64 *) &out_b[(jj + 13) * nrows + ii], *gs);
_mm_storeh_pi((__m64 *) &out_b[(jj + 15) * nrows + ii], *hs);
}
for (jj = nbyte_row - nbyte_row % 16; jj < nbyte_row; jj ++) {
out_b[jj * nrows + ii + 0] = in_b[(ii + 0)*nbyte_row + jj];
out_b[jj * nrows + ii + 1] = in_b[(ii + 1)*nbyte_row + jj];
out_b[jj * nrows + ii + 2] = in_b[(ii + 2)*nbyte_row + jj];
out_b[jj * nrows + ii + 3] = in_b[(ii + 3)*nbyte_row + jj];
out_b[jj * nrows + ii + 4] = in_b[(ii + 4)*nbyte_row + jj];
out_b[jj * nrows + ii + 5] = in_b[(ii + 5)*nbyte_row + jj];
out_b[jj * nrows + ii + 6] = in_b[(ii + 6)*nbyte_row + jj];
out_b[jj * nrows + ii + 7] = in_b[(ii + 7)*nbyte_row + jj];
}
}
return size * elem_size;
}
/* Shuffle bits within the bytes of eight element blocks. */
int64_t bshuf_shuffle_bit_eightelem_SSE(const void* in, void* out, const size_t size,
const size_t elem_size) {
CHECK_MULT_EIGHT(size);
// With a bit of care, this could be written such that such that it is
// in_buf = out_buf safe.
const char* in_b = (const char*) in;
uint16_t* out_ui16 = (uint16_t*) out;
size_t ii, jj, kk;
size_t nbyte = elem_size * size;
__m128i xmm;
int32_t bt;
if (elem_size % 2) {
bshuf_shuffle_bit_eightelem_scal(in, out, size, elem_size);
} else {
for (ii = 0; ii + 8 * elem_size - 1 < nbyte;
ii += 8 * elem_size) {
for (jj = 0; jj + 15 < 8 * elem_size; jj += 16) {
xmm = _mm_loadu_si128((__m128i *) &in_b[ii + jj]);
for (kk = 0; kk < 8; kk++) {
bt = _mm_movemask_epi8(xmm);
xmm = _mm_slli_epi16(xmm, 1);
size_t ind = (ii + jj / 8 + (7 - kk) * elem_size);
out_ui16[ind / 2] = bt;
}
}
}
}
return size * elem_size;
}
/* Untranspose bits within elements. */
int64_t bshuf_untrans_bit_elem_SSE(const void* in, void* out, const size_t size,
const size_t elem_size) {
int64_t count;
CHECK_MULT_EIGHT(size);
void* tmp_buf = malloc(size * elem_size);
if (tmp_buf == NULL) return -1;
count = bshuf_trans_byte_bitrow_SSE(in, tmp_buf, size, elem_size);
CHECK_ERR_FREE(count, tmp_buf);
count = bshuf_shuffle_bit_eightelem_SSE(tmp_buf, out, size, elem_size);
free(tmp_buf);
return count;
}
#else // #ifdef USESSE2
int64_t bshuf_untrans_bit_elem_SSE(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -11;
}
int64_t bshuf_trans_bit_elem_SSE(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -11;
}
int64_t bshuf_trans_byte_bitrow_SSE(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -11;
}
int64_t bshuf_trans_bit_byte_SSE(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -11;
}
int64_t bshuf_trans_byte_elem_SSE(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -11;
}
int64_t bshuf_trans_byte_elem_SSE_64(const void* in, void* out, const size_t size) {
return -11;
}
int64_t bshuf_trans_byte_elem_SSE_32(const void* in, void* out, const size_t size) {
return -11;
}
int64_t bshuf_trans_byte_elem_SSE_16(const void* in, void* out, const size_t size) {
return -11;
}
int64_t bshuf_shuffle_bit_eightelem_SSE(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -11;
}
#endif // #ifdef USESSE2
/* ---- Code that requires AVX2. Intel Haswell (2013) and later. ---- */
/* ---- Worker code that uses AVX2 ----
*
* The following code makes use of the AVX2 instruction set and specialized
* 32 byte registers. The AVX2 instructions are present on newer x86
* processors. The first Intel processor microarchitecture supporting AVX2 was
* Haswell (2013).
*
*/
#ifdef USEAVX2
/* Transpose bits within bytes. */
int64_t bshuf_trans_bit_byte_AVX(const void* in, void* out, const size_t size,
const size_t elem_size) {
size_t ii, kk;
const char* in_b = (const char*) in;
char* out_b = (char*) out;
int32_t* out_i32;
size_t nbyte = elem_size * size;
int64_t count;
__m256i ymm;
int32_t bt;
for (ii = 0; ii + 31 < nbyte; ii += 32) {
ymm = _mm256_loadu_si256((__m256i *) &in_b[ii]);
for (kk = 0; kk < 8; kk++) {
bt = _mm256_movemask_epi8(ymm);
ymm = _mm256_slli_epi16(ymm, 1);
out_i32 = (int32_t*) &out_b[((7 - kk) * nbyte + ii) / 8];
*out_i32 = bt;
}
}
count = bshuf_trans_bit_byte_remainder(in, out, size, elem_size,
nbyte - nbyte % 32);
return count;
}
/* Transpose bits within elements. */
int64_t bshuf_trans_bit_elem_AVX(const void* in, void* out, const size_t size,
const size_t elem_size) {
int64_t count;
CHECK_MULT_EIGHT(size);
void* tmp_buf = malloc(size * elem_size);
if (tmp_buf == NULL) return -1;
count = bshuf_trans_byte_elem_SSE(in, out, size, elem_size);
CHECK_ERR_FREE(count, tmp_buf);
count = bshuf_trans_bit_byte_AVX(out, tmp_buf, size, elem_size);
CHECK_ERR_FREE(count, tmp_buf);
count = bshuf_trans_bitrow_eight(tmp_buf, out, size, elem_size);
free(tmp_buf);
return count;
}
/* For data organized into a row for each bit (8 * elem_size rows), transpose
* the bytes. */
int64_t bshuf_trans_byte_bitrow_AVX(const void* in, void* out, const size_t size,
const size_t elem_size) {
size_t hh, ii, jj, kk, mm;
const char* in_b = (const char*) in;
char* out_b = (char*) out;
CHECK_MULT_EIGHT(size);
size_t nrows = 8 * elem_size;
size_t nbyte_row = size / 8;
if (elem_size % 4) return bshuf_trans_byte_bitrow_SSE(in, out, size,
elem_size);
__m256i ymm_0[8];
__m256i ymm_1[8];
__m256i ymm_storeage[8][4];
for (jj = 0; jj + 31 < nbyte_row; jj += 32) {
for (ii = 0; ii + 3 < elem_size; ii += 4) {
for (hh = 0; hh < 4; hh ++) {
for (kk = 0; kk < 8; kk ++){
ymm_0[kk] = _mm256_loadu_si256((__m256i *) &in_b[
(ii * 8 + hh * 8 + kk) * nbyte_row + jj]);
}
for (kk = 0; kk < 4; kk ++){
ymm_1[kk] = _mm256_unpacklo_epi8(ymm_0[kk * 2],
ymm_0[kk * 2 + 1]);
ymm_1[kk + 4] = _mm256_unpackhi_epi8(ymm_0[kk * 2],
ymm_0[kk * 2 + 1]);
}
for (kk = 0; kk < 2; kk ++){
for (mm = 0; mm < 2; mm ++){
ymm_0[kk * 4 + mm] = _mm256_unpacklo_epi16(
ymm_1[kk * 4 + mm * 2],
ymm_1[kk * 4 + mm * 2 + 1]);
ymm_0[kk * 4 + mm + 2] = _mm256_unpackhi_epi16(
ymm_1[kk * 4 + mm * 2],
ymm_1[kk * 4 + mm * 2 + 1]);
}
}
for (kk = 0; kk < 4; kk ++){
ymm_1[kk * 2] = _mm256_unpacklo_epi32(ymm_0[kk * 2],
ymm_0[kk * 2 + 1]);
ymm_1[kk * 2 + 1] = _mm256_unpackhi_epi32(ymm_0[kk * 2],
ymm_0[kk * 2 + 1]);
}
for (kk = 0; kk < 8; kk ++){
ymm_storeage[kk][hh] = ymm_1[kk];
}
}
for (mm = 0; mm < 8; mm ++) {
for (kk = 0; kk < 4; kk ++){
ymm_0[kk] = ymm_storeage[mm][kk];
}
ymm_1[0] = _mm256_unpacklo_epi64(ymm_0[0], ymm_0[1]);
ymm_1[1] = _mm256_unpacklo_epi64(ymm_0[2], ymm_0[3]);
ymm_1[2] = _mm256_unpackhi_epi64(ymm_0[0], ymm_0[1]);
ymm_1[3] = _mm256_unpackhi_epi64(ymm_0[2], ymm_0[3]);
ymm_0[0] = _mm256_permute2x128_si256(ymm_1[0], ymm_1[1], 32);
ymm_0[1] = _mm256_permute2x128_si256(ymm_1[2], ymm_1[3], 32);
ymm_0[2] = _mm256_permute2x128_si256(ymm_1[0], ymm_1[1], 49);
ymm_0[3] = _mm256_permute2x128_si256(ymm_1[2], ymm_1[3], 49);
_mm256_storeu_si256((__m256i *) &out_b[
(jj + mm * 2 + 0 * 16) * nrows + ii * 8], ymm_0[0]);
_mm256_storeu_si256((__m256i *) &out_b[
(jj + mm * 2 + 0 * 16 + 1) * nrows + ii * 8], ymm_0[1]);
_mm256_storeu_si256((__m256i *) &out_b[
(jj + mm * 2 + 1 * 16) * nrows + ii * 8], ymm_0[2]);
_mm256_storeu_si256((__m256i *) &out_b[
(jj + mm * 2 + 1 * 16 + 1) * nrows + ii * 8], ymm_0[3]);
}
}
}
for (ii = 0; ii < nrows; ii ++ ) {
for (jj = nbyte_row - nbyte_row % 32; jj < nbyte_row; jj ++) {
out_b[jj * nrows + ii] = in_b[ii * nbyte_row + jj];
}
}
return size * elem_size;
}
/* Shuffle bits within the bytes of eight element blocks. */
int64_t bshuf_shuffle_bit_eightelem_AVX(const void* in, void* out, const size_t size,
const size_t elem_size) {
CHECK_MULT_EIGHT(size);
// With a bit of care, this could be written such that such that it is
// in_buf = out_buf safe.
const char* in_b = (const char*) in;
char* out_b = (char*) out;
size_t ii, jj, kk;
size_t nbyte = elem_size * size;
__m256i ymm;
int32_t bt;
if (elem_size % 4) {
return bshuf_shuffle_bit_eightelem_SSE(in, out, size, elem_size);
} else {
for (jj = 0; jj + 31 < 8 * elem_size; jj += 32) {
for (ii = 0; ii + 8 * elem_size - 1 < nbyte;
ii += 8 * elem_size) {
ymm = _mm256_loadu_si256((__m256i *) &in_b[ii + jj]);
for (kk = 0; kk < 8; kk++) {
bt = _mm256_movemask_epi8(ymm);
ymm = _mm256_slli_epi16(ymm, 1);
size_t ind = (ii + jj / 8 + (7 - kk) * elem_size);
* (int32_t *) &out_b[ind] = bt;
}
}
}
}
return size * elem_size;
}
/* Untranspose bits within elements. */
int64_t bshuf_untrans_bit_elem_AVX(const void* in, void* out, const size_t size,
const size_t elem_size) {
int64_t count;
CHECK_MULT_EIGHT(size);
void* tmp_buf = malloc(size * elem_size);
if (tmp_buf == NULL) return -1;
count = bshuf_trans_byte_bitrow_AVX(in, tmp_buf, size, elem_size);
CHECK_ERR_FREE(count, tmp_buf);
count = bshuf_shuffle_bit_eightelem_AVX(tmp_buf, out, size, elem_size);
free(tmp_buf);
return count;
}
#else // #ifdef USEAVX2
int64_t bshuf_trans_bit_byte_AVX(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -12;
}
int64_t bshuf_trans_bit_elem_AVX(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -12;
}
int64_t bshuf_trans_byte_bitrow_AVX(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -12;
}
int64_t bshuf_shuffle_bit_eightelem_AVX(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -12;
}
int64_t bshuf_untrans_bit_elem_AVX(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -12;
}
#endif // #ifdef USEAVX2
/* ---- Drivers selecting best instruction set at compile time. ---- */
int64_t bshuf_trans_bit_elem(const void* in, void* out, const size_t size,
const size_t elem_size) {
int64_t count;
#ifdef USEAVX2
count = bshuf_trans_bit_elem_AVX(in, out, size, elem_size);
#elif defined(USESSE2)
count = bshuf_trans_bit_elem_SSE(in, out, size, elem_size);
#else
count = bshuf_trans_bit_elem_scal(in, out, size, elem_size);
#endif
return count;
}
int64_t bshuf_untrans_bit_elem(const void* in, void* out, const size_t size,
const size_t elem_size) {
int64_t count;
#ifdef USEAVX2
count = bshuf_untrans_bit_elem_AVX(in, out, size, elem_size);
#elif defined(USESSE2)
count = bshuf_untrans_bit_elem_SSE(in, out, size, elem_size);
#else
count = bshuf_untrans_bit_elem_scal(in, out, size, elem_size);
#endif
return count;
}
/* ---- Wrappers for implementing blocking ---- */
/* Wrap a function for processing a single block to process an entire buffer in
* parallel. */
int64_t bshuf_blocked_wrap_fun(bshufBlockFunDef fun, const void* in, void* out, \
const size_t size, const size_t elem_size, size_t block_size) {
size_t ii;
int64_t err = 0;
int64_t count, cum_count=0;
size_t last_block_size;
size_t leftover_bytes;
size_t this_iter;
char *last_in;
char *last_out;
ioc_chain C;
ioc_init(&C, in, out);
if (block_size == 0) {
block_size = bshuf_default_block_size(elem_size);
}
if (block_size % BSHUF_BLOCKED_MULT) return -81;
#if defined(_OPENMP)
#pragma omp parallel for schedule(dynamic, 1) \
private(count) reduction(+ : cum_count)
#endif
for (ii = 0; ii < size / block_size; ii ++) {
count = fun(&C, block_size, elem_size);
if (count < 0) err = count;
cum_count += count;
}
last_block_size = size % block_size;
last_block_size = last_block_size - last_block_size % BSHUF_BLOCKED_MULT;
if (last_block_size) {
count = fun(&C, last_block_size, elem_size);
if (count < 0) err = count;
cum_count += count;
}
if (err < 0) return err;
leftover_bytes = size % BSHUF_BLOCKED_MULT * elem_size;
//this_iter;
last_in = (char *) ioc_get_in(&C, &this_iter);
ioc_set_next_in(&C, &this_iter, (void *) (last_in + leftover_bytes));
last_out = (char *) ioc_get_out(&C, &this_iter);
ioc_set_next_out(&C, &this_iter, (void *) (last_out + leftover_bytes));
memcpy(last_out, last_in, leftover_bytes);
ioc_destroy(&C);
return cum_count + leftover_bytes;
}
/* Bitshuffle a single block. */
int64_t bshuf_bitshuffle_block(ioc_chain *C_ptr, \
const size_t size, const size_t elem_size) {
size_t this_iter;
const void *in;
void *out;
int64_t count;
in = ioc_get_in(C_ptr, &this_iter);
ioc_set_next_in(C_ptr, &this_iter,
(void*) ((char*) in + size * elem_size));
out = ioc_get_out(C_ptr, &this_iter);
ioc_set_next_out(C_ptr, &this_iter,
(void *) ((char *) out + size * elem_size));
count = bshuf_trans_bit_elem(in, out, size, elem_size);
return count;
}
/* Bitunshuffle a single block. */
int64_t bshuf_bitunshuffle_block(ioc_chain* C_ptr, \
const size_t size, const size_t elem_size) {
size_t this_iter;
const void *in;
void *out;
int64_t count;
in = ioc_get_in(C_ptr, &this_iter);
ioc_set_next_in(C_ptr, &this_iter,
(void*) ((char*) in + size * elem_size));
out = ioc_get_out(C_ptr, &this_iter);
ioc_set_next_out(C_ptr, &this_iter,
(void *) ((char *) out + size * elem_size));
count = bshuf_untrans_bit_elem(in, out, size, elem_size);
return count;
}
/* Write a 64 bit unsigned integer to a buffer in big endian order. */
void bshuf_write_uint64_BE(void* buf, uint64_t num) {
int ii;
uint8_t* b = (uint8_t*) buf;
uint64_t pow28 = 1 << 8;
for (ii = 7; ii >= 0; ii--) {
b[ii] = num % pow28;
num = num / pow28;
}
}
/* Read a 64 bit unsigned integer from a buffer big endian order. */
uint64_t bshuf_read_uint64_BE(void* buf) {
int ii;
uint8_t* b = (uint8_t*) buf;
uint64_t num = 0, pow28 = 1 << 8, cp = 1;
for (ii = 7; ii >= 0; ii--) {
num += b[ii] * cp;
cp *= pow28;
}
return num;
}
/* Write a 32 bit unsigned integer to a buffer in big endian order. */
void bshuf_write_uint32_BE(void* buf, uint32_t num) {
int ii;
uint8_t* b = (uint8_t*) buf;
uint32_t pow28 = 1 << 8;
for (ii = 3; ii >= 0; ii--) {
b[ii] = num % pow28;
num = num / pow28;
}
}
/* Read a 32 bit unsigned integer from a buffer big endian order. */
uint32_t bshuf_read_uint32_BE(const void* buf) {
int ii;
uint8_t* b = (uint8_t*) buf;
uint32_t num = 0, pow28 = 1 << 8, cp = 1;
for (ii = 3; ii >= 0; ii--) {
num += b[ii] * cp;
cp *= pow28;
}
return num;
}
/* ---- Public functions ----
*
* See header file for description and usage.
*
*/
size_t bshuf_default_block_size(const size_t elem_size) {
// This function needs to be absolutely stable between versions.
// Otherwise encoded data will not be decodable.
size_t block_size = BSHUF_TARGET_BLOCK_SIZE_B / elem_size;
// Ensure it is a required multiple.
block_size = (block_size / BSHUF_BLOCKED_MULT) * BSHUF_BLOCKED_MULT;
return MAX(block_size, BSHUF_MIN_RECOMMEND_BLOCK);
}
int64_t bshuf_bitshuffle(const void* in, void* out, const size_t size,
const size_t elem_size, size_t block_size) {
return bshuf_blocked_wrap_fun(&bshuf_bitshuffle_block, in, out, size,
elem_size, block_size);
}
int64_t bshuf_bitunshuffle(const void* in, void* out, const size_t size,
const size_t elem_size, size_t block_size) {
return bshuf_blocked_wrap_fun(&bshuf_bitunshuffle_block, in, out, size,
elem_size, block_size);
}
#undef TRANS_BIT_8X8
#undef TRANS_ELEM_TYPE
#undef MAX
#undef CHECK_MULT_EIGHT
#undef CHECK_ERR_FREE
#undef USESSE2
#undef USEAVX2
|
oskar_imager_rotate_coords.c | /*
* Copyright (c) 2016-2017, The University of Oxford
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the University of Oxford nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "imager/private_imager.h"
#include "imager/oskar_imager.h"
#ifdef __cplusplus
extern "C" {
#endif
void oskar_imager_rotate_coords(const oskar_Imager* h, size_t num_coords,
const oskar_Mem* uu_in, const oskar_Mem* vv_in, const oskar_Mem* ww_in,
oskar_Mem* uu_out, oskar_Mem* vv_out, oskar_Mem* ww_out)
{
#ifdef OSKAR_OS_WIN
int i;
const int num = (const int) num_coords;
#else
size_t i;
const size_t num = num_coords;
#endif
const double *M = h->M;
if (oskar_mem_precision(uu_in) == OSKAR_SINGLE)
{
float *uu_o, *vv_o, *ww_o;
const float *uu_i, *vv_i, *ww_i;
uu_i = (const float*)oskar_mem_void_const(uu_in);
vv_i = (const float*)oskar_mem_void_const(vv_in);
ww_i = (const float*)oskar_mem_void_const(ww_in);
uu_o = (float*)oskar_mem_void(uu_out);
vv_o = (float*)oskar_mem_void(vv_out);
ww_o = (float*)oskar_mem_void(ww_out);
#pragma omp parallel for private(i)
for (i = 0; i < num; ++i)
{
double s0, s1, s2, t0, t1, t2;
s0 = uu_i[i]; s1 = vv_i[i]; s2 = ww_i[i];
t0 = M[0] * s0 + M[1] * s1 + M[2] * s2;
t1 = M[3] * s0 + M[4] * s1 + M[5] * s2;
t2 = M[6] * s0 + M[7] * s1 + M[8] * s2;
uu_o[i] = t0; vv_o[i] = t1; ww_o[i] = t2;
}
}
else
{
double *uu_o, *vv_o, *ww_o;
const double *uu_i, *vv_i, *ww_i;
uu_i = (const double*)oskar_mem_void_const(uu_in);
vv_i = (const double*)oskar_mem_void_const(vv_in);
ww_i = (const double*)oskar_mem_void_const(ww_in);
uu_o = (double*)oskar_mem_void(uu_out);
vv_o = (double*)oskar_mem_void(vv_out);
ww_o = (double*)oskar_mem_void(ww_out);
#pragma omp parallel for private(i)
for (i = 0; i < num; ++i)
{
double s0, s1, s2, t0, t1, t2;
s0 = uu_i[i]; s1 = vv_i[i]; s2 = ww_i[i];
t0 = M[0] * s0 + M[1] * s1 + M[2] * s2;
t1 = M[3] * s0 + M[4] * s1 + M[5] * s2;
t2 = M[6] * s0 + M[7] * s1 + M[8] * s2;
uu_o[i] = t0; vv_o[i] = t1; ww_o[i] = t2;
}
}
}
#ifdef __cplusplus
}
#endif
|
12.c | #include<stdio.h>
#include<stdlib.h>
#include<omp.h>
int main()
{
// omp_set_num_threads(4);
#pragma omp parallel
{
int tid = omp_get_thread_num();
printf("\n\t\t \"Hello World\" is being printed by the thread : %d\n", tid);
if (tid == 0)
{
int nthreads = omp_get_num_threads();
printf("\n\t\t Master thread printing total number of threads for this execution are : %d\n", nthreads);
}
}
return 0;
}
|
GB_unop__carg_fp64_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__carg_fp64_fc64
// op(A') function: GB_unop_tran__carg_fp64_fc64
// C type: double
// A type: GxB_FC64_t
// cast: GxB_FC64_t cij = (aij)
// unaryop: cij = carg (aij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = carg (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = (aij) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = (aij) ; \
Cx [pC] = carg (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_CARG || GxB_NO_FP64 || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__carg_fp64_fc64
(
double *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC64_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = (aij) ;
Cx [p] = carg (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = (aij) ;
Cx [p] = carg (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__carg_fp64_fc64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
simd_loop_safelen.c | /* Example of the safelen clause on the simd construct
The safelen clause ensures that a vector length of up to 16
is correct.
*/
void simd_loop_safelen(double *a, double *b, double *c, int n,
int offset)
{
int i;
#pragma omp simd safelen(16)
for (i=offset; i<n; i++)
a[i] = b[i-offset] + c[i];
}
|
lab2-stephen-stengel.h | #ifndef STEPHEN_STENGEL_LAB2lol
#define STEPHEN_STENGEL_LAB2lol
#include <stdio.h>
#include <omp.h>
#include "mylab1functions.h"
#define MAX_FILENAME_LEN 500
#define MAX_STR_LEN 50
//function definitions
void testStringPass(char * dataStr);
void writeDataToFile(char * filename, int m, int n, int t, double time, char * testType);
void doAlmostNothing();
int runFuncTest(double **myFunc(), char * dataFile, char * picsFile, int maxM, int N, int maxT, char * testType);
void timeThisWay(double **myFunc(), double **myArray, char *dataFile, char *picsFile, int m, int N, int tee, char *testType);
double powerArraysThisWay(double **myFunc(), double **myArray, int m, int N, int tee);
//functions
//Appends a (m n t time) datapoint to the end of the given filename.
void writeDataToFile(char * filename, int m, int n, int t, double time, char * testType)
{
FILE *myFile = fopen(filename, "a");
fprintf(myFile, "%d %d %d %f %s\n", m, n, t, time, testType);
fclose(myFile);
}
//This tests the time of the given function. col, row, or individual. maybe block if I have time.
//The given function will handle printing? This function will handle the parameters going to the
//test function.
int runFuncTest(double **myFunc(), char * dataFile, char * picsFile, int maxM, int N, int maxT, char * testType)
{
double start = omp_get_wtime();
int numThreadTests = 10;
int tIncrement = 1;
if ((maxT / numThreadTests) > 1)
{
tIncrement = (maxT / numThreadTests);
}
printf("tIncrement: %d\n", tIncrement);
int mStep = 1;
if ( maxM > 100 )
{
mStep = maxM / 20; //20 chosen because it is a good balance.
}
int tTop = maxT - (maxT - (tIncrement * (numThreadTests - 1))) + 1;
printf("tTop: %d\n", tTop);
for (int tee = tTop; tee > 0; tee -= tIncrement)//backwards keeps openMP from killing useful threads
{
printf("This T: %d\n", tee);
for (int m = 1; m < maxM + 1; m += mStep)
{
double **myArray = createSquareArray(m);
myArray = fillSquareArrayRandomDoubles(myArray, m);
//This function will find the min of three times for these inputs and write data to file.
timeThisWay(myFunc, myArray, dataFile, picsFile, m, N, tee, testType);
freeSquareDoubleArray(myArray, m);
}
}//end of loop that varies num threads
double elapsedTime = omp_get_wtime() - start;
printf("runfunctest: %s complete in %.2fs! now printing to file...\n", testType, elapsedTime);
//make graph
char printCommandCell[MAX_COMMAND_LEN];
sprintf(printCommandCell, "python3 cArrayGraph.py %s %s %s", dataFile, picsFile, testType);
system( printCommandCell );
printf("done!\n");
return 0;
}
//gets the time of doin it this way
void timeThisWay(double **myFunc(), double **myArray, char *dataFile, char *picsFile, int m, int N, int tee, char *testType)
{
double lowest = INT_MAX;
double elapsedTime = -1;
//do three tests timing each
for (int k = 0; k < 3; k++)
{
elapsedTime = powerArraysThisWay(myFunc, myArray, m, N, tee);
if (elapsedTime < lowest)
{
lowest = elapsedTime;
}
}
//~ //append the i, j, time to a file
writeDataToFile(dataFile, m, N, tee, lowest, testType);
}
//calls the specific multiplication func needed and returns runtime minus thread creation (should be zero because openmp keeps threads sleeping)
double powerArraysThisWay(double **myFunc(), double **myArray, int m, int N, int tee)
{
//~ double **originalCopy = copySquareDoubleArray(myArray, m);
double **originalCopy = myArray;
double **intermediate = copySquareDoubleArray(myArray, m);
//create timing vars here
struct timespec start, finish;
double elapsedTime = -1;
clock_gettime(CLOCK_MONOTONIC, &start);
//~ double ompTimeStart = omp_get_wtime();
for (int i = 1; i < N; i++)
{
double **output = createSquareArray(m);
output = myFunc(originalCopy, intermediate, output, m, tee);
//copy output into intermediate
intermediate = copySquareDoubleArray(output, m);
freeSquareDoubleArray(output, m);
}
elapsedTime = getElapsedTime(start, finish);
//~ double ompTimeElapsed = omp_get_wtime() - ompTimeStart;
//~ if ( abs(ompTimeElapsed - elapsedTime) > 0 )
//~ {
//~ printf("time.h time: %f\tomp_get_wtime(): %f\n", elapsedTime, ompTimeElapsed);
//~ }
freeSquareDoubleArray(intermediate, m);
//~ freeSquareDoubleArray(originalCopy, m);
return elapsedTime;
}
double **multSquareArraysThreadCell(double **original, double **intermediate, double **output, int size, int tee)
{
#pragma omp parallel num_threads(tee) //only use this many threads out of the threadpool
{
register int i = 0;
register int j = 0;
register int k = 0;
//~ printf("current T: %d\tnumThreads: %d\n", tee, omp_get_num_threads());
#pragma omp for collapse(2) nowait //each thread loops through the k's for each cell
for (i = 0; i < size; i++)
{
for (j = 0; j < size; j++)
{
register double tmp = output[i][j];
for (k = 0; k < size; k++)
{
tmp += original[i][k] * intermediate[k][j];
}
output[i][j] = tmp;
}
}
}
return output;
}
double **multSquareArraysThreadRow(double **original, double **intermediate, double **output, int size, int tee)
{
#pragma omp parallel num_threads(tee) //only use this many threads out of the threadpool
{
register int i = 0;
register int j = 0;
register int k = 0;
#pragma omp for nowait //each thread has its own row i.
for (i = 0; i < size; i++)
{
for (j = 0; j < size; j++)
{
register double tmp = output[i][j];
for (k = 0; k < size; k++)
{
tmp += original[i][k] * intermediate[k][j];
}
output[i][j] = tmp;
}
}
}
return output;
}
//Same as row but with the i and j loops switched.
double **multSquareArraysThreadCol(double **original, double **intermediate, double **output, int size, int tee)
{
#pragma omp parallel num_threads(tee) //only use this many threads out of the threadpool
{
register int i = 0;
register int j = 0;
register int k = 0;
#pragma omp for nowait //each thread has its own col j.
for (j = 0; j < size; j++)
{
for (i = 0; i < size; i++)
{
register double tmp = output[i][j];
for (k = 0; k < size; k++)
{
tmp += original[i][k] * intermediate[k][j];
}
output[i][j] = tmp;
}
}
}
return output;
}
//Do almost nothing, used to make sure threads are created at start and not as needed.
void doAlmostNothing()
{
int a = 0;
int b = 0;
a += b;
}
#endif
//Unused
/*
//this tests times for running each separate thread size
int runThreadTest(char * dataFile, char * picsFile, int maxM, int N, int maxT, char * testType)
{
//~ printf("enter any number to continue...\n");
//~ int a;
//~ scanf("%d", &a);
//~ printf("num threads in pool: %d\n", omp_get_num_threads());
//create timer variables.
struct timespec start, finish;
double lowest = INT_MAX;
double elapsedTime = -1;
int tIncrement = 1;
if ((maxT / 10) > 1)
{
tIncrement = maxT / 10;
}
printf("tIncrement: %d\n", tIncrement);
for (int tee = 1; tee <= maxT; tee += tIncrement)
{
//~ #pragma omp parallel num_threads(tee)
{
//~ #pragma omp single nowait
printf("num threads in use: %d\n", omp_get_num_threads());
//~ #pragma omp for private(start, finish, lowest, elapsedTime) nowait
for (int i = 1; i < maxM + 1; i++)
{
//create array
int sizeThisRound = i;
//~ int teeThisRound = j;
double **myArray = createSquareArray(sizeThisRound);
myArray = fillSquareArrayRandomDoubles(myArray, sizeThisRound);
//do three tests timing each
lowest = INT_MAX;
for (int k = 0; k < 3; k++)
{
clock_gettime(CLOCK_MONOTONIC, &start);
powerArrays(myArray, sizeThisRound, N);
elapsedTime = getElapsedTime(start, finish);
if (elapsedTime < lowest)
{
lowest = elapsedTime;
}
}
//~ //append the i, j, time to a file
//~ #pragma omp critical
{
//~ writeDataToFile(dataFile, sizeThisRound, N, omp_get_num_threads(), lowest, testType);
writeDataToFile(dataFile, sizeThisRound, N, tee, lowest, testType);
}
freeSquareDoubleArray(myArray, sizeThisRound);
}
}//end of parallel section.
}//end of loop that varies num threads
printf("runthreadtest complete!\n");
return 0;
}
*/
|
perftest.c | /**
* Copyright (C) Mellanox Technologies Ltd. 2001-2014. ALL RIGHTS RESERVED.
* Copyright (C) The University of Tennessee and The University
* of Tennessee Research Foundation. 2015. ALL RIGHTS RESERVED.
* Copyright (C) UT-Battelle, LLC. 2015. ALL RIGHTS RESERVED.
* Copyright (C) ARM Ltd. 2017-2021. ALL RIGHTS RESERVED.
*
* See file LICENSE for terms.
*/
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
#include "perftest.h"
#include <ucs/sys/string.h>
#include <ucs/sys/sys.h>
#include <ucs/sys/sock.h>
#include <ucs/debug/log.h>
#include <sys/socket.h>
#include <arpa/inet.h>
#include <stdlib.h>
#include <unistd.h>
#include <netdb.h>
#include <sys/poll.h>
test_type_t tests[] = {
{"am_lat", UCX_PERF_API_UCT, UCX_PERF_CMD_AM, UCX_PERF_TEST_TYPE_PINGPONG,
"active message latency", "latency", 1},
{"put_lat", UCX_PERF_API_UCT, UCX_PERF_CMD_PUT, UCX_PERF_TEST_TYPE_PINGPONG,
"put latency", "latency", 1},
{"add_lat", UCX_PERF_API_UCT, UCX_PERF_CMD_ADD, UCX_PERF_TEST_TYPE_PINGPONG,
"atomic add latency", "latency", 1},
{"get", UCX_PERF_API_UCT, UCX_PERF_CMD_GET, UCX_PERF_TEST_TYPE_STREAM_UNI,
"get latency / bandwidth / message rate", "latency", 1},
{"fadd", UCX_PERF_API_UCT, UCX_PERF_CMD_FADD, UCX_PERF_TEST_TYPE_STREAM_UNI,
"atomic fetch-and-add latency / rate", "latency", 1},
{"swap", UCX_PERF_API_UCT, UCX_PERF_CMD_SWAP, UCX_PERF_TEST_TYPE_STREAM_UNI,
"atomic swap latency / rate", "latency", 1},
{"cswap", UCX_PERF_API_UCT, UCX_PERF_CMD_CSWAP, UCX_PERF_TEST_TYPE_STREAM_UNI,
"atomic compare-and-swap latency / rate", "latency", 1},
{"am_bw", UCX_PERF_API_UCT, UCX_PERF_CMD_AM, UCX_PERF_TEST_TYPE_STREAM_UNI,
"active message bandwidth / message rate", "overhead", 1},
{"put_bw", UCX_PERF_API_UCT, UCX_PERF_CMD_PUT, UCX_PERF_TEST_TYPE_STREAM_UNI,
"put bandwidth / message rate", "overhead", 1},
{"add_mr", UCX_PERF_API_UCT, UCX_PERF_CMD_ADD, UCX_PERF_TEST_TYPE_STREAM_UNI,
"atomic add message rate", "overhead", 1},
{"tag_lat", UCX_PERF_API_UCP, UCX_PERF_CMD_TAG, UCX_PERF_TEST_TYPE_PINGPONG,
"tag match latency", "latency", 1},
{"tag_bw", UCX_PERF_API_UCP, UCX_PERF_CMD_TAG, UCX_PERF_TEST_TYPE_STREAM_UNI,
"tag match bandwidth", "overhead", 32},
{"tag_sync_lat", UCX_PERF_API_UCP, UCX_PERF_CMD_TAG_SYNC, UCX_PERF_TEST_TYPE_PINGPONG,
"tag sync match latency", "latency", 1},
{"tag_sync_bw", UCX_PERF_API_UCP, UCX_PERF_CMD_TAG_SYNC, UCX_PERF_TEST_TYPE_STREAM_UNI,
"tag sync match bandwidth", "overhead", 32},
{"ucp_put_lat", UCX_PERF_API_UCP, UCX_PERF_CMD_PUT, UCX_PERF_TEST_TYPE_PINGPONG,
"put latency", "latency", 1},
{"ucp_put_bw", UCX_PERF_API_UCP, UCX_PERF_CMD_PUT, UCX_PERF_TEST_TYPE_STREAM_UNI,
"put bandwidth", "overhead", 32},
{"ucp_get", UCX_PERF_API_UCP, UCX_PERF_CMD_GET, UCX_PERF_TEST_TYPE_STREAM_UNI,
"get latency / bandwidth / message rate", "latency", 1},
{"ucp_add", UCX_PERF_API_UCP, UCX_PERF_CMD_ADD, UCX_PERF_TEST_TYPE_STREAM_UNI,
"atomic add bandwidth / message rate", "overhead", 1},
{"ucp_fadd", UCX_PERF_API_UCP, UCX_PERF_CMD_FADD, UCX_PERF_TEST_TYPE_STREAM_UNI,
"atomic fetch-and-add latency / bandwidth / rate", "latency", 1},
{"ucp_swap", UCX_PERF_API_UCP, UCX_PERF_CMD_SWAP, UCX_PERF_TEST_TYPE_STREAM_UNI,
"atomic swap latency / bandwidth / rate", "latency", 1},
{"ucp_cswap", UCX_PERF_API_UCP, UCX_PERF_CMD_CSWAP, UCX_PERF_TEST_TYPE_STREAM_UNI,
"atomic compare-and-swap latency / bandwidth / rate", "latency", 1},
{"stream_bw", UCX_PERF_API_UCP, UCX_PERF_CMD_STREAM, UCX_PERF_TEST_TYPE_STREAM_UNI,
"stream bandwidth", "overhead", 1},
{"stream_lat", UCX_PERF_API_UCP, UCX_PERF_CMD_STREAM, UCX_PERF_TEST_TYPE_PINGPONG,
"stream latency", "latency", 1},
{"ucp_am_lat", UCX_PERF_API_UCP, UCX_PERF_CMD_AM, UCX_PERF_TEST_TYPE_PINGPONG,
"am latency", "latency", 1},
{"ucp_am_bw", UCX_PERF_API_UCP, UCX_PERF_CMD_AM, UCX_PERF_TEST_TYPE_STREAM_UNI,
"am bandwidth / message rate", "overhead", 32},
{NULL}
};
static int sock_io(int sock, ssize_t (*sock_call)(int, void *, size_t, int),
int poll_events, void *data, size_t size,
void (*progress)(void *arg), void *arg, const char *name)
{
size_t total = 0;
struct pollfd pfd;
int ret;
while (total < size) {
pfd.fd = sock;
pfd.events = poll_events;
pfd.revents = 0;
ret = poll(&pfd, 1, 1); /* poll for 1ms */
if (ret > 0) {
ucs_assert(ret == 1);
ucs_assert(pfd.revents & poll_events);
ret = sock_call(sock, (char*)data + total, size - total, 0);
if (ret < 0) {
ucs_error("%s() failed: %m", name);
return -1;
}
total += ret;
} else if ((ret < 0) && (errno != EINTR)) {
ucs_error("poll(fd=%d) failed: %m", sock);
return -1;
}
/* progress user context */
if (progress != NULL) {
progress(arg);
}
}
return 0;
}
static int safe_send(int sock, void *data, size_t size,
void (*progress)(void *arg), void *arg)
{
typedef ssize_t (*sock_call)(int, void *, size_t, int);
ucs_assert(sock >= 0);
return sock_io(sock, (sock_call)send, POLLOUT, data, size, progress, arg, "send");
}
static int safe_recv(int sock, void *data, size_t size,
void (*progress)(void *arg), void *arg)
{
ucs_assert(sock >= 0);
return sock_io(sock, recv, POLLIN, data, size, progress, arg, "recv");
}
ucs_status_t init_test_params(perftest_params_t *params)
{
memset(params, 0, sizeof(*params));
params->super.api = UCX_PERF_API_LAST;
params->super.command = UCX_PERF_CMD_LAST;
params->super.test_type = UCX_PERF_TEST_TYPE_LAST;
params->super.thread_mode = UCS_THREAD_MODE_SINGLE;
params->super.thread_count = 1;
params->super.async_mode = UCS_ASYNC_THREAD_LOCK_TYPE;
params->super.wait_mode = UCX_PERF_WAIT_MODE_LAST;
params->super.max_outstanding = 0;
params->super.warmup_iter = 10000;
params->super.alignment = ucs_get_page_size();
params->super.max_iter = 1000000l;
params->super.max_time = 0.0;
params->super.report_interval = 1.0;
params->super.percentile_rank = 50.0;
params->super.flags = UCX_PERF_TEST_FLAG_VERBOSE;
params->super.uct.fc_window = UCT_PERF_TEST_MAX_FC_WINDOW;
params->super.uct.data_layout = UCT_PERF_DATA_LAYOUT_SHORT;
params->super.uct.am_hdr_size = 8;
params->super.send_mem_type = UCS_MEMORY_TYPE_HOST;
params->super.recv_mem_type = UCS_MEMORY_TYPE_HOST;
params->super.msg_size_cnt = 1;
params->super.iov_stride = 0;
params->super.ucp.send_datatype = UCP_PERF_DATATYPE_CONTIG;
params->super.ucp.recv_datatype = UCP_PERF_DATATYPE_CONTIG;
params->super.ucp.am_hdr_size = 0;
strcpy(params->super.uct.dev_name, TL_RESOURCE_NAME_NONE);
strcpy(params->super.uct.tl_name, TL_RESOURCE_NAME_NONE);
params->super.msg_size_list = calloc(params->super.msg_size_cnt,
sizeof(*params->super.msg_size_list));
if (params->super.msg_size_list == NULL) {
return UCS_ERR_NO_MEMORY;
}
params->super.msg_size_list[0] = 8;
params->test_id = TEST_ID_UNDEFINED;
return UCS_OK;
}
static unsigned sock_rte_group_size(void *rte_group)
{
sock_rte_group_t *group = rte_group;
return group->size;
}
static unsigned sock_rte_group_index(void *rte_group)
{
sock_rte_group_t *group = rte_group;
return group->is_server ? 0 : 1;
}
static void sock_rte_barrier(void *rte_group, void (*progress)(void *arg),
void *arg)
{
#pragma omp barrier
#pragma omp master
{
sock_rte_group_t *group = rte_group;
if (group->size > 1) {
const unsigned magic = 0xdeadbeef;
unsigned snc;
snc = magic;
safe_send(group->sendfd, &snc, sizeof(unsigned), progress, arg);
snc = 0;
if (safe_recv(group->recvfd, &snc, sizeof(unsigned), progress, arg) == 0) {
ucs_assert(snc == magic);
}
}
}
#pragma omp barrier
}
static void sock_rte_post_vec(void *rte_group, const struct iovec *iovec,
int iovcnt, void **req)
{
sock_rte_group_t *group = rte_group;
size_t size;
int i;
size = 0;
for (i = 0; i < iovcnt; ++i) {
size += iovec[i].iov_len;
}
safe_send(group->sendfd, &size, sizeof(size), NULL, NULL);
for (i = 0; i < iovcnt; ++i) {
safe_send(group->sendfd, iovec[i].iov_base, iovec[i].iov_len, NULL,
NULL);
}
}
static void sock_rte_recv(void *rte_group, unsigned src, void *buffer,
size_t max, void *req)
{
sock_rte_group_t *group = rte_group;
size_t size;
if (src != group->peer) {
return;
}
safe_recv(group->recvfd, &size, sizeof(size), NULL, NULL);
ucs_assert_always(size <= max);
safe_recv(group->recvfd, buffer, size, NULL, NULL);
}
static void sock_rte_report(void *rte_group, const ucx_perf_result_t *result,
void *arg, int is_final, int is_multi_thread)
{
struct perftest_context *ctx = arg;
print_progress(ctx->test_names, ctx->num_batch_files, result, ctx->flags,
is_final, ctx->server_addr == NULL, is_multi_thread);
}
static ucx_perf_rte_t sock_rte = {
.group_size = sock_rte_group_size,
.group_index = sock_rte_group_index,
.barrier = sock_rte_barrier,
.post_vec = sock_rte_post_vec,
.recv = sock_rte_recv,
.exchange_vec = (ucx_perf_rte_exchange_vec_func_t)ucs_empty_function,
.report = sock_rte_report,
};
static ucs_status_t setup_sock_rte_loobkack(struct perftest_context *ctx)
{
int connfds[2];
int ret;
ctx->flags |= TEST_FLAG_PRINT_TEST | TEST_FLAG_PRINT_RESULTS;
ret = socketpair(AF_UNIX, SOCK_STREAM, 0, connfds);
if (ret < 0) {
ucs_error("socketpair() failed: %m");
return UCS_ERR_IO_ERROR;
}
ctx->sock_rte_group.peer = 0;
ctx->sock_rte_group.size = 1;
ctx->sock_rte_group.is_server = 1;
ctx->sock_rte_group.sendfd = connfds[0];
ctx->sock_rte_group.recvfd = connfds[1];
return UCS_OK;
}
static ucs_status_t setup_sock_rte_p2p(struct perftest_context *ctx)
{
struct sockaddr_in inaddr;
struct hostent *he;
ucs_status_t status;
int optval = 1;
int sockfd, connfd;
int ret;
sockfd = socket(AF_INET, SOCK_STREAM, 0);
if (sockfd < 0) {
ucs_error("socket() failed: %m");
status = UCS_ERR_IO_ERROR;
goto err;
}
if (ctx->server_addr == NULL) {
status = ucs_socket_setopt(sockfd, SOL_SOCKET, SO_REUSEADDR,
&optval, sizeof(optval));
if (status != UCS_OK) {
goto err_close_sockfd;
}
inaddr.sin_family = AF_INET;
inaddr.sin_port = htons(ctx->port);
inaddr.sin_addr.s_addr = INADDR_ANY;
memset(inaddr.sin_zero, 0, sizeof(inaddr.sin_zero));
ret = bind(sockfd, (struct sockaddr*)&inaddr, sizeof(inaddr));
if (ret < 0) {
ucs_error("bind() failed: %m");
status = UCS_ERR_INVALID_ADDR;
goto err_close_sockfd;
}
ret = listen(sockfd, 10);
if (ret < 0) {
ucs_error("listen() failed: %m");
status = UCS_ERR_IO_ERROR;
goto err_close_sockfd;
}
printf("Waiting for connection...\n");
/* Accept next connection */
connfd = accept(sockfd, NULL, NULL);
if (connfd < 0) {
ucs_error("accept() failed: %m");
status = UCS_ERR_IO_ERROR;
goto err_close_sockfd;
}
close(sockfd);
/* release the memory for the list of the message sizes allocated
* during the initialization of the default testing parameters */
free(ctx->params.super.msg_size_list);
ctx->params.super.msg_size_list = NULL;
ret = safe_recv(connfd, &ctx->params, sizeof(ctx->params), NULL, NULL);
if (ret) {
status = UCS_ERR_IO_ERROR;
goto err_close_connfd;
}
if (ctx->params.super.msg_size_cnt != 0) {
ctx->params.super.msg_size_list =
calloc(ctx->params.super.msg_size_cnt,
sizeof(*ctx->params.super.msg_size_list));
if (NULL == ctx->params.super.msg_size_list) {
status = UCS_ERR_NO_MEMORY;
goto err_close_connfd;
}
ret = safe_recv(connfd, ctx->params.super.msg_size_list,
sizeof(*ctx->params.super.msg_size_list) *
ctx->params.super.msg_size_cnt,
NULL, NULL);
if (ret) {
status = UCS_ERR_IO_ERROR;
goto err_close_connfd;
}
}
ctx->sock_rte_group.sendfd = connfd;
ctx->sock_rte_group.recvfd = connfd;
ctx->sock_rte_group.peer = 1;
ctx->sock_rte_group.is_server = 1;
} else {
he = gethostbyname(ctx->server_addr);
if (he == NULL || he->h_addr_list == NULL) {
ucs_error("host %s not found: %s", ctx->server_addr,
hstrerror(h_errno));
status = UCS_ERR_INVALID_ADDR;
goto err_close_sockfd;
}
inaddr.sin_family = he->h_addrtype;
inaddr.sin_port = htons(ctx->port);
ucs_assert(he->h_length == sizeof(inaddr.sin_addr));
memcpy(&inaddr.sin_addr, he->h_addr_list[0], he->h_length);
memset(inaddr.sin_zero, 0, sizeof(inaddr.sin_zero));
ret = connect(sockfd, (struct sockaddr*)&inaddr, sizeof(inaddr));
if (ret < 0) {
ucs_error("connect() failed: %m");
status = UCS_ERR_UNREACHABLE;
goto err_close_sockfd;
}
safe_send(sockfd, &ctx->params, sizeof(ctx->params), NULL, NULL);
if (ctx->params.super.msg_size_cnt != 0) {
safe_send(sockfd, ctx->params.super.msg_size_list,
sizeof(*ctx->params.super.msg_size_list) *
ctx->params.super.msg_size_cnt,
NULL, NULL);
}
ctx->sock_rte_group.sendfd = sockfd;
ctx->sock_rte_group.recvfd = sockfd;
ctx->sock_rte_group.peer = 0;
ctx->sock_rte_group.is_server = 0;
}
ctx->sock_rte_group.size = 2;
if (ctx->sock_rte_group.is_server) {
ctx->flags |= TEST_FLAG_PRINT_TEST;
} else {
ctx->flags |= TEST_FLAG_PRINT_RESULTS;
}
return UCS_OK;
err_close_connfd:
close(connfd);
goto err;
err_close_sockfd:
close(sockfd);
err:
return status;
}
static ucs_status_t setup_sock_rte(struct perftest_context *ctx)
{
ucs_status_t status;
if (ctx->params.super.flags & UCX_PERF_TEST_FLAG_LOOPBACK) {
status = setup_sock_rte_loobkack(ctx);
} else {
status = setup_sock_rte_p2p(ctx);
}
if (status != UCS_OK) {
return status;
}
ctx->params.super.rte_group = &ctx->sock_rte_group;
ctx->params.super.rte = &sock_rte;
ctx->params.super.report_arg = ctx;
return UCS_OK;
}
static ucs_status_t cleanup_sock_rte(struct perftest_context *ctx)
{
sock_rte_group_t *rte_group = &ctx->sock_rte_group;
close(rte_group->sendfd);
if (rte_group->sendfd != rte_group->recvfd) {
close(rte_group->recvfd);
}
return UCS_OK;
}
#if defined (HAVE_MPI)
static unsigned mpi_rte_group_size(void *rte_group)
{
int size;
MPI_Comm_size(MPI_COMM_WORLD, &size);
return size;
}
static unsigned mpi_rte_group_index(void *rte_group)
{
int rank;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
return rank;
}
static void mpi_rte_barrier(void *rte_group, void (*progress)(void *arg),
void *arg)
{
int group_size, my_rank, i;
MPI_Request *reqs;
int nreqs = 0;
int dummy;
int flag;
#pragma omp barrier
#pragma omp master
{
/*
* Naive non-blocking barrier implementation over send/recv, to call user
* progress while waiting for completion.
* Not using MPI_Ibarrier to be compatible with MPI-1.
*/
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
MPI_Comm_size(MPI_COMM_WORLD, &group_size);
/* allocate maximal possible number of requests */
reqs = (MPI_Request*)alloca(sizeof(*reqs) * group_size);
if (my_rank == 0) {
/* root gathers "ping" from all other ranks */
for (i = 1; i < group_size; ++i) {
MPI_Irecv(&dummy, 0, MPI_INT,
i /* source */,
1 /* tag */,
MPI_COMM_WORLD,
&reqs[nreqs++]);
}
} else {
/* every non-root rank sends "ping" and waits for "pong" */
MPI_Send(&dummy, 0, MPI_INT,
0 /* dest */,
1 /* tag */,
MPI_COMM_WORLD);
MPI_Irecv(&dummy, 0, MPI_INT,
0 /* source */,
2 /* tag */,
MPI_COMM_WORLD,
&reqs[nreqs++]);
}
/* Waiting for receive requests */
do {
MPI_Testall(nreqs, reqs, &flag, MPI_STATUSES_IGNORE);
progress(arg);
} while (!flag);
if (my_rank == 0) {
/* root sends "pong" to all ranks */
for (i = 1; i < group_size; ++i) {
MPI_Send(&dummy, 0, MPI_INT,
i /* dest */,
2 /* tag */,
MPI_COMM_WORLD);
}
}
}
#pragma omp barrier
}
static void mpi_rte_post_vec(void *rte_group, const struct iovec *iovec,
int iovcnt, void **req)
{
int group_size;
int my_rank;
int dest, i;
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
MPI_Comm_size(MPI_COMM_WORLD, &group_size);
for (dest = 0; dest < group_size; ++dest) {
if (dest != rte_peer_index(group_size, my_rank)) {
continue;
}
for (i = 0; i < iovcnt; ++i) {
MPI_Send(iovec[i].iov_base, iovec[i].iov_len, MPI_BYTE, dest,
i == (iovcnt - 1), /* Send last iov with tag == 1 */
MPI_COMM_WORLD);
}
}
*req = (void*)(uintptr_t)1;
}
static void mpi_rte_recv(void *rte_group, unsigned src, void *buffer, size_t max,
void *req)
{
MPI_Status status;
int my_rank, size;
size_t offset;
int count;
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
if (src != rte_peer_index(size, my_rank)) {
return;
}
offset = 0;
do {
ucs_assert_always(offset < max);
MPI_Recv(buffer + offset, max - offset, MPI_BYTE, src, MPI_ANY_TAG,
MPI_COMM_WORLD, &status);
MPI_Get_count(&status, MPI_BYTE, &count);
offset += count;
} while (status.MPI_TAG != 1);
}
static void mpi_rte_report(void *rte_group, const ucx_perf_result_t *result,
void *arg, int is_final, int is_multi_thread)
{
struct perftest_context *ctx = arg;
print_progress(ctx->test_names, ctx->num_batch_files, result, ctx->flags,
is_final, ctx->server_addr == NULL, is_multi_thread);
}
#elif defined (HAVE_RTE)
static unsigned ext_rte_group_size(void *rte_group)
{
rte_group_t group = (rte_group_t)rte_group;
return rte_group_size(group);
}
static unsigned ext_rte_group_index(void *rte_group)
{
rte_group_t group = (rte_group_t)rte_group;
return rte_group_rank(group);
}
static void ext_rte_barrier(void *rte_group, void (*progress)(void *arg),
void *arg)
{
#pragma omp barrier
#pragma omp master
{
rte_group_t group = (rte_group_t)rte_group;
int rc;
rc = rte_barrier(group);
if (RTE_SUCCESS != rc) {
ucs_error("Failed to rte_barrier");
}
}
#pragma omp barrier
}
static void ext_rte_post_vec(void *rte_group, const struct iovec* iovec,
int iovcnt, void **req)
{
rte_group_t group = (rte_group_t)rte_group;
rte_srs_session_t session;
rte_iovec_t *r_vec;
int i, rc;
rc = rte_srs_session_create(group, 0, &session);
if (RTE_SUCCESS != rc) {
ucs_error("Failed to rte_srs_session_create");
}
r_vec = calloc(iovcnt, sizeof(rte_iovec_t));
if (r_vec == NULL) {
return;
}
for (i = 0; i < iovcnt; ++i) {
r_vec[i].iov_base = iovec[i].iov_base;
r_vec[i].type = rte_datatype_uint8_t;
r_vec[i].count = iovec[i].iov_len;
}
rc = rte_srs_set_data(session, "KEY_PERF", r_vec, iovcnt);
if (RTE_SUCCESS != rc) {
ucs_error("Failed to rte_srs_set_data");
}
*req = session;
free(r_vec);
}
static void ext_rte_recv(void *rte_group, unsigned src, void *buffer,
size_t max, void *req)
{
rte_group_t group = (rte_group_t)rte_group;
rte_srs_session_t session = (rte_srs_session_t)req;
void *rte_buffer = NULL;
rte_iovec_t r_vec;
uint32_t offset;
int size;
int rc;
rc = rte_srs_get_data(session, rte_group_index_to_ec(group, src),
"KEY_PERF", &rte_buffer, &size);
if (RTE_SUCCESS != rc) {
ucs_error("Failed to rte_srs_get_data");
return;
}
r_vec.iov_base = buffer;
r_vec.type = rte_datatype_uint8_t;
r_vec.count = max;
offset = 0;
rte_unpack(&r_vec, rte_buffer, &offset);
rc = rte_srs_session_destroy(session);
if (RTE_SUCCESS != rc) {
ucs_error("Failed to rte_srs_session_destroy");
}
free(rte_buffer);
}
static void ext_rte_exchange_vec(void *rte_group, void * req)
{
rte_srs_session_t session = (rte_srs_session_t)req;
int rc;
rc = rte_srs_exchange_data(session);
if (RTE_SUCCESS != rc) {
ucs_error("Failed to rte_srs_exchange_data");
}
}
static void ext_rte_report(void *rte_group, const ucx_perf_result_t *result,
void *arg, int is_final, int is_multi_thread)
{
struct perftest_context *ctx = arg;
print_progress(ctx->test_names, ctx->num_batch_files, result, ctx->flags,
is_final, ctx->server_addr == NULL, is_multi_thread);
}
static ucx_perf_rte_t ext_rte = {
.group_size = ext_rte_group_size,
.group_index = ext_rte_group_index,
.barrier = ext_rte_barrier,
.report = ext_rte_report,
.post_vec = ext_rte_post_vec,
.recv = ext_rte_recv,
.exchange_vec = ext_rte_exchange_vec,
};
#endif
static ucs_status_t setup_mpi_rte(struct perftest_context *ctx)
{
#if defined (HAVE_MPI)
static ucx_perf_rte_t mpi_rte = {
.group_size = mpi_rte_group_size,
.group_index = mpi_rte_group_index,
.barrier = mpi_rte_barrier,
.post_vec = mpi_rte_post_vec,
.recv = mpi_rte_recv,
.exchange_vec = (void*)ucs_empty_function,
.report = mpi_rte_report,
};
int size, rank;
ucs_trace_func("");
MPI_Comm_size(MPI_COMM_WORLD, &size);
if ((ctx->params.super.flags & UCX_PERF_TEST_FLAG_LOOPBACK) &&
(size != 1)) {
ucs_error("This test should be run with 1 process "
"in loopback case (actual: %d)", size);
return UCS_ERR_INVALID_PARAM;
}
if (!(ctx->params.super.flags & UCX_PERF_TEST_FLAG_LOOPBACK) &&
(size != 2)) {
ucs_error("This test should be run with exactly 2 processes "
"in p2p case (actual: %d)", size);
return UCS_ERR_INVALID_PARAM;
}
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
/* Let the last rank print the results */
if (rank == (size - 1)) {
ctx->flags |= TEST_FLAG_PRINT_RESULTS;
}
ctx->params.super.rte_group = NULL;
ctx->params.super.rte = &mpi_rte;
ctx->params.super.report_arg = ctx;
#elif defined (HAVE_RTE)
ucs_trace_func("");
ctx->params.rte_group = NULL;
ctx->params.rte = &mpi_rte;
ctx->params.report_arg = ctx;
rte_group_t group;
rte_init(NULL, NULL, &group);
/* Let the last rank print the results */
if (rte_group_rank(group) == (rte_group_size(group) - 1)) {
ctx->flags |= TEST_FLAG_PRINT_RESULTS;
}
ctx->params.super.rte_group = group;
ctx->params.super.rte = &ext_rte;
ctx->params.super.report_arg = ctx;
#endif
return UCS_OK;
}
static ucs_status_t cleanup_mpi_rte(struct perftest_context *ctx)
{
#ifdef HAVE_RTE
rte_finalize();
#endif
return UCS_OK;
}
static ucs_status_t check_system(struct perftest_context *ctx)
{
ucs_sys_cpuset_t cpuset;
unsigned i, count, nr_cpus;
int ret;
ucs_trace_func("");
ret = ucs_sys_get_num_cpus();
if (ret < 0) {
return UCS_ERR_INVALID_PARAM;
}
nr_cpus = ret;
memset(&cpuset, 0, sizeof(cpuset));
if (ctx->flags & TEST_FLAG_SET_AFFINITY) {
for (i = 0; i < ctx->num_cpus; i++) {
if (ctx->cpus[i] >= nr_cpus) {
ucs_error("cpu (%u) out of range (0..%u)", ctx->cpus[i], nr_cpus - 1);
return UCS_ERR_INVALID_PARAM;
}
}
for (i = 0; i < ctx->num_cpus; i++) {
CPU_SET(ctx->cpus[i], &cpuset);
}
ret = ucs_sys_setaffinity(&cpuset);
if (ret) {
ucs_warn("sched_setaffinity() failed: %m");
return UCS_ERR_INVALID_PARAM;
}
} else {
ret = ucs_sys_getaffinity(&cpuset);
if (ret) {
ucs_warn("sched_getaffinity() failed: %m");
return UCS_ERR_INVALID_PARAM;
}
count = 0;
for (i = 0; i < CPU_SETSIZE; ++i) {
if (CPU_ISSET(i, &cpuset)) {
++count;
}
}
if (count > 2) {
ucs_warn("CPU affinity is not set (bound to %u cpus)."
" Performance may be impacted.", count);
}
}
return UCS_OK;
}
int main(int argc, char **argv)
{
struct perftest_context ctx;
ucs_status_t status;
int mpi_initialized;
int mpi_rte;
int ret;
#ifdef HAVE_MPI
int provided;
mpi_initialized = !isatty(0) &&
/* Using MPI_THREAD_FUNNELED since ucx_perftest supports
* using multiple threads when only the main one makes
* MPI calls (which is also suitable for a single threaded
* run).
* MPI_THREAD_FUNNELED:
* The process may be multi-threaded, but only the main
* thread will make MPI calls (all MPI calls are funneled
* to the main thread). */
(MPI_Init_thread(&argc, &argv, MPI_THREAD_FUNNELED, &provided) == 0);
if (mpi_initialized && (provided != MPI_THREAD_FUNNELED)) {
printf("MPI_Init_thread failed to set MPI_THREAD_FUNNELED. (provided = %d)\n",
provided);
ret = -1;
goto out;
}
#else
mpi_initialized = 0;
#endif
/* Parse command line */
status = parse_opts(&ctx, mpi_initialized, argc, argv);
if (status != UCS_OK) {
ret = (status == UCS_ERR_CANCELED) ? 0 : -127;
goto out_msg_size_list;
}
#ifdef __COVERITY__
/* coverity[dont_call] */
mpi_rte = rand(); /* Shut up deadcode error */
#endif
if (ctx.mpi) {
mpi_rte = 1;
} else {
#ifdef HAVE_RTE
mpi_rte = 1;
#else
mpi_rte = 0;
#endif
}
status = check_system(&ctx);
if (status != UCS_OK) {
ret = -1;
goto out_msg_size_list;
}
/* Create RTE */
status = (mpi_rte) ? setup_mpi_rte(&ctx) : setup_sock_rte(&ctx);
if (status != UCS_OK) {
ret = -1;
goto out_msg_size_list;
}
/* Run the test */
status = run_test(&ctx);
if (status != UCS_OK) {
ret = -1;
goto out_cleanup_rte;
}
ret = 0;
out_cleanup_rte:
(mpi_rte) ? cleanup_mpi_rte(&ctx) : cleanup_sock_rte(&ctx);
out_msg_size_list:
free(ctx.params.super.msg_size_list);
#if HAVE_MPI
out:
#endif
if (mpi_initialized) {
#ifdef HAVE_MPI
MPI_Finalize();
#endif
}
return ret;
}
|
lu_par_loop.c | #include "trace.h"
#include "common.h"
#include <omp.h>
/* This routine performs the LU factorization of a square matrix by
block-columns */
void lu_par_loop(Matrix A, info_type info){
int i, j;
/* Initialize the tracing system */
trace_init();
for(i=0; i<info.NB; i++){
/* Do the Panel operation on column i */
panel(A[i], i, info);
/* Parallelize this loop */
#pragma omp parallel for
for(j = i + 1; j < info.NB; j++){
/* Update column j with respect to the result of panel(A, i) */
update(A[i], A[j], i, j, info);
}
}
/* This routine applies permutations resulting from numerical
pivoting. It has to be executed sequentially. */
backperm(A, info);
/* Write the trace in file (ignore) */
trace_dump("trace_par_loop.svg");
return;
}
|
rankmatrix.c | /*
This file is part of ParTI!.
ParTI! is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
ParTI! is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with ParTI!.
If not, see <http://www.gnu.org/licenses/>.
*/
#include <ParTI.h>
#include <assert.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <math.h>
#include "../error/error.h"
/**
* Initialize a new dense rank matrix
*
* @param mtx a valid pointer to an uninitialized sptMatrix variable
* @param nrows the number of rows
* @param ncols the number of columns
*
* The memory layout of this dense matrix is a flat 2D array, with `ncols`
* rounded up to multiples of 8
*/
int sptNewRankMatrix(sptRankMatrix *mtx, sptIndex const nrows, sptElementIndex const ncols) {
mtx->nrows = nrows;
mtx->ncols = ncols;
mtx->cap = nrows != 0 ? nrows : 1;
mtx->stride = ((ncols-1)/8+1)*8;
#ifdef _ISOC11_SOURCE
mtx->values = aligned_alloc(8 * sizeof (sptValue), mtx->cap * mtx->stride * sizeof (sptValue));
#elif _POSIX_C_SOURCE >= 200112L
{
int result = posix_memalign((void **) &mtx->values, 8 * sizeof (sptValue), mtx->cap * mtx->stride * sizeof (sptValue));
if(result != 0) {
mtx->values = NULL;
}
}
#else
mtx->values = malloc(mtx->cap * mtx->stride * sizeof (sptValue));
#endif
spt_CheckOSError(!mtx->values, "RankMtx New");
return 0;
}
/**
* Build a matrix with random number
*
* @param mtx a pointer to an uninitialized matrix
* @param nrows fill the specified number of rows
* @param ncols fill the specified number of columns
*
* The matrix is filled with uniform distributed pseudorandom number in [0, 1]
* The random number will have a precision of 31 bits out of 51 bits
*/
int sptRandomizeRankMatrix(sptRankMatrix *mtx, sptIndex const nrows, sptElementIndex const ncols)
{
srand(time(NULL));
for(sptIndex i=0; i<nrows; ++i)
for(sptElementIndex j=0; j<ncols; ++j) {
mtx->values[i * mtx->stride + j] = sptRandomValue();
}
return 0;
}
/**
* Fill an existed dense rank matrix with a specified constant
*
* @param mtx a pointer to a valid matrix
* @param val a given value constant
*
*/
int sptConstantRankMatrix(sptRankMatrix *mtx, sptValue const val) {
for(sptIndex i=0; i<mtx->nrows; ++i)
for(sptElementIndex j=0; j<mtx->ncols; ++j)
mtx->values[i * mtx->stride + j] = val;
return 0;
}
/**
* Shuffle matrix row indices.
*
* @param[in] mtx matrix to be shuffled
* @param[out] map_inds is the renumbering mapping
*
*/
void sptRankMatrixInverseShuffleIndices(sptRankMatrix *mtx, sptIndex * mode_map_inds) {
/* Renumber matrix rows */
sptIndex new_i;
sptValue * tmp_values = malloc(mtx->cap * mtx->stride * sizeof (sptValue));
for(sptIndex i=0; i<mtx->nrows; ++i) {
new_i = mode_map_inds[i];
for(sptElementIndex j=0; j<mtx->ncols; ++j) {
tmp_values[i * mtx->stride + j] = mtx->values[new_i * mtx->stride + j];
}
}
free(mtx->values);
mtx->values = tmp_values;
}
/**
* Release the memory buffer a dense rank matrix is holding
*
* @param mtx a pointer to a valid matrix
*
* By using `sptFreeMatrix`, a valid matrix would become uninitialized and
* should not be used anymore prior to another initialization
*/
void sptFreeRankMatrix(sptRankMatrix *mtx) {
free(mtx->values);
mtx->nrows = 0;
mtx->ncols = 0;
mtx->cap = 0;
mtx->stride = 0;
}
/* mats (aTa) only stores upper triangle elements. */
int sptRankMatrixDotMulSeqTriangle(sptIndex const mode, sptIndex const nmodes, sptRankMatrix ** mats)
{
sptIndex const nrows = mats[0]->nrows;
sptElementIndex const ncols = mats[0]->ncols;
sptElementIndex const stride = mats[0]->stride;
for(sptIndex m=1; m<nmodes+1; ++m) {
assert(mats[m]->ncols == ncols);
assert(mats[m]->nrows == nrows);
}
sptValue * ovals = mats[nmodes]->values;
#ifdef PARTI_USE_OPENMP
#pragma omp parallel for schedule(static)
#endif
for(sptIndex i=0; i < nrows; ++i) {
for(sptElementIndex j=0; j < ncols; ++j) {
ovals[j * stride + i] = 1.0;
}
}
for(sptIndex m=1; m < nmodes; ++m) {
sptIndex const pm = (mode + m) % nmodes;
sptValue const * vals = mats[pm]->values;
#ifdef PARTI_USE_OPENMP
#pragma omp parallel for schedule(static)
#endif
for(sptIndex i=0; i < nrows; ++i) {
for(sptElementIndex j=i; j < ncols; ++j) {
ovals[i * stride + j] *= vals[i * stride + j];
}
}
}
/* Copy upper triangle to lower part */
#ifdef PARTI_USE_OPENMP
#pragma omp parallel for schedule(static)
#endif
for(sptIndex i=0; i < nrows; ++i) {
for(sptElementIndex j=0; j < i; ++j) {
ovals[i * stride + j] = ovals[j * stride + i];
}
}
return 0;
}
// Row-major
int sptRankMatrix2Norm(sptRankMatrix * const A, sptValue * const lambda)
{
sptIndex const nrows = A->nrows;
sptElementIndex const ncols = A->ncols;
sptElementIndex const stride = A->stride;
sptValue * const vals = A->values;
sptValue * buffer_lambda;
int nthreads = 1;
#ifdef PARTI_USE_OPENMP
#pragma omp parallel for schedule(static)
#endif
for(sptElementIndex j=0; j < ncols; ++j) {
lambda[j] = 0.0;
}
#ifdef PARTI_USE_OPENMP
#pragma omp parallel
{
nthreads = omp_get_num_threads();
}
buffer_lambda = (sptValue *)malloc(nthreads * ncols * sizeof(sptValue));
#endif
#ifdef PARTI_USE_OPENMP
#pragma omp parallel
{
#pragma omp for schedule(static)
for(sptNnzIndex j=0; j < (sptNnzIndex)nthreads * ncols; ++j)
buffer_lambda[j] = 0.0;
int const tid = omp_get_thread_num();
int const nthreads = omp_get_num_threads();
sptValue * loc_lambda = buffer_lambda + tid * ncols;
#pragma omp for
for(sptIndex i=0; i < nrows; ++i) {
for(sptElementIndex j=0; j < ncols; ++j) {
loc_lambda[j] += vals[i*stride + j] * vals[i*stride + j];
}
}
#pragma omp for schedule(static)
for(sptElementIndex j=0; j < ncols; ++j) {
for(sptIndex i=0; i < (sptIndex)nthreads; ++i) {
lambda[j] += buffer_lambda[i*ncols + j];
}
}
} /* end parallel pragma */
#else
for(sptIndex i=0; i < nrows; ++i) {
for(sptElementIndex j=0; j < ncols; ++j) {
lambda[j] += vals[i*stride + j] * vals[i*stride + j];
}
}
#endif
#ifdef PARTI_USE_OPENMP
#pragma omp parallel for schedule(static)
#endif
for(sptElementIndex j=0; j < ncols; ++j) {
lambda[j] = sqrt(lambda[j]);
}
#ifdef PARTI_USE_OPENMP
#pragma omp parallel for
#endif
for(sptIndex i=0; i < nrows; ++i) {
for(sptElementIndex j=0; j < ncols; ++j) {
vals[i*stride + j] /= lambda[j];
}
}
#ifdef PARTI_USE_OPENMP
free(buffer_lambda);
#endif
return 0;
}
// Row-major
int sptRankMatrixMaxNorm(sptRankMatrix * const A, sptValue * const lambda)
{
sptIndex const nrows = A->nrows;
sptElementIndex const ncols = A->ncols;
sptElementIndex const stride = A->stride;
sptValue * const vals = A->values;
sptValue * buffer_lambda;
int nthreads = 1;
#ifdef PARTI_USE_OPENMP
#pragma omp parallel for schedule(static)
#endif
for(sptElementIndex j=0; j < ncols; ++j) {
lambda[j] = 0.0;
}
#ifdef PARTI_USE_OPENMP
#pragma omp parallel
{
nthreads = omp_get_num_threads();
}
buffer_lambda = (sptValue *)malloc(nthreads * ncols * sizeof(sptValue));
#endif
#ifdef PARTI_USE_OPENMP
#pragma omp parallel
{
#pragma omp for schedule(static)
for(sptNnzIndex j=0; j < (sptNnzIndex)nthreads * ncols; ++j)
buffer_lambda[j] = 0.0;
int const tid = omp_get_thread_num();
int const nthreads = omp_get_num_threads();
sptValue * loc_lambda = buffer_lambda + tid * ncols;
#pragma omp for
for(sptIndex i=0; i < nrows; ++i) {
for(sptElementIndex j=0; j < ncols; ++j) {
if(vals[i*stride + j] > loc_lambda[j])
loc_lambda[j] = vals[i*stride + j];
}
}
#pragma omp for schedule(static)
for(sptElementIndex j=0; j < ncols; ++j) {
for(sptIndex i=0; i < (sptIndex)nthreads; ++i) {
if(buffer_lambda[i*ncols + j] > lambda[j])
lambda[j] = buffer_lambda[i*ncols + j];
}
}
} /* end parallel pragma */
#else
for(sptIndex i=0; i < nrows; ++i) {
for(sptElementIndex j=0; j < ncols; ++j) {
if(vals[i*stride + j] > lambda[j])
lambda[j] = vals[i*stride + j];
}
}
#endif
#ifdef PARTI_USE_OPENMP
#pragma omp parallel for schedule(static)
#endif
for(sptElementIndex j=0; j < ncols; ++j) {
if(lambda[j] < 1)
lambda[j] = 1;
}
#ifdef PARTI_USE_OPENMP
#pragma omp parallel for
#endif
for(sptIndex i=0; i < nrows; ++i) {
for(sptElementIndex j=0; j < ncols; ++j) {
vals[i*stride + j] /= lambda[j];
}
}
#ifdef PARTI_USE_OPENMP
free(buffer_lambda);
#endif
return 0;
}
void GetRankFinalLambda(
sptElementIndex const rank,
sptIndex const nmodes,
sptRankMatrix ** mats,
sptValue * const lambda)
{
sptValue * tmp_lambda = (sptValue *) malloc(rank * sizeof(*tmp_lambda));
for(sptIndex m=0; m < nmodes; ++m) {
sptRankMatrix2Norm(mats[m], tmp_lambda);
for(sptElementIndex r=0; r < rank; ++r) {
lambda[r] *= tmp_lambda[r];
}
}
free(tmp_lambda);
}
|
ccsd_t.c | /*
*
*/
#include <stdlib.h>
#include <string.h>
#include "config.h"
#include "np_helper/np_helper.h"
#include "vhf/fblas.h"
/*
* 4 * w + w.transpose(1,2,0) + w.transpose(2,0,1)
* - 2 * w.transpose(2,1,0) - 2 * w.transpose(0,2,1)
* - 2 * w.transpose(1,0,2)
*/
static void permute3(double *out, double *w, int n)
{
int nn = n * n;
int i, j, k;
for (i = 0; i < n; i++) {
for (j = 0; j < n; j++) {
for (k = 0; k < n; k++) {
out[i*nn+j*n+k] = w[i*nn+j*n+k] * 4
+ w[j*nn+k*n+i]
+ w[k*nn+i*n+j]
- w[k*nn+j*n+i] * 2
- w[i*nn+k*n+j] * 2
- w[j*nn+i*n+k] * 2;
} } }
}
/*
* ov = vv_op[:,nocc:]
* oo = vv_op[:,:nocc]
* w = numpy.einsum('if,fjk->ijk', ov, t2T[c])
* w-= numpy.einsum('ijm,mk->ijk', eris_vooo[a], t2T[c,b])
* v = numpy.einsum('ij,k->ijk', oo, t1T[c]*.5)
* v+= w
*/
static void get_wv(double *w, double *v, double *vooo, double *vv_op,
double *t1T, double *t2T,
int nocc, int nvir, int a, int b, int c)
{
const double D0 = 0;
const double D1 = 1;
const double DN1 =-1;
const char TRANS_N = 'N';
const int nmo = nocc + nvir;
const int noo = nocc * nocc;
const int nooo = nocc * noo;
const int nvoo = nvir * noo;
double thalf[nvir];
int i, j, k, n;
dgemm_(&TRANS_N, &TRANS_N, &noo, &nocc, &nvir,
&D1, t2T+c*nvoo, &noo, vv_op+nocc, &nmo,
&D0, w, &noo);
dgemm_(&TRANS_N, &TRANS_N, &nocc, &noo, &nocc,
&DN1, t2T+c*nvoo+b*noo, &nocc, vooo+a*nooo, &nocc,
&D1, w, &nocc);
for (i = 0; i < nocc; i++) {
thalf[i] = t1T[c*nocc+i] * .5;
}
for (n = 0, i = 0; i < nocc; i++) {
for (j = 0; j < nocc; j++) {
for (k = 0; k < nocc; k++, n++) {
v[n] = w[n] + vv_op[i*nmo+j] * thalf[k];
} } }
}
static void sym_wv(double *w, double *v, double *vooo, double *vv_op,
double *t1T, double *t2T,
int nocc, int nvir, int a, int b, int c, int nirrep,
int *o_ir_loc, int *v_ir_loc, int *oo_ir_loc, int *orbsym)
{
const double D0 = 0;
const double D1 = 1;
const char TRANS_N = 'N';
const int nmo = nocc + nvir;
const int noo = nocc * nocc;
const int nooo = nocc * noo;
const int nvoo = nvir * noo;
double thalf[nvir];
int a_irrep = orbsym[nocc+a];
int b_irrep = orbsym[nocc+b];
int c_irrep = orbsym[nocc+c];
int ab_irrep = a_irrep ^ b_irrep;
int bc_irrep = c_irrep ^ b_irrep;
int i, j, k, n;
int fr, f0, f1, df, mr, m0, m1, dm, mk0;
int ir, i0, i1, di, kr, k0, k1, dk, jr;
int ijr, ij0, ij1, dij, jkr, jk0, jk1, djk;
double *buf = v;
double *pt2T;
memset(w, 0, sizeof(double)*nooo);
/* symmetry adapted
* w = numpy.einsum('if,fjk->ijk', ov, t2T[c]) */
pt2T = t2T + c * nvoo;
for (ir = 0; ir < nirrep; ir++) {
i0 = o_ir_loc[ir];
i1 = o_ir_loc[ir+1];
di = i1 - i0;
if (di > 0) {
fr = ir ^ ab_irrep;
f0 = v_ir_loc[fr];
f1 = v_ir_loc[fr+1];
df = f1 - f0;
if (df > 0) {
jkr = fr ^ c_irrep;
jk0 = oo_ir_loc[jkr];
jk1 = oo_ir_loc[jkr+1];
djk = jk1 - jk0;
if (djk > 0) {
dgemm_(&TRANS_N, &TRANS_N, &djk, &di, &df,
&D1, pt2T+f0*noo+jk0, &noo, vv_op+i0*nmo+nocc+f0, &nmo,
&D0, buf, &djk);
for (n = 0, i = o_ir_loc[ir]; i < o_ir_loc[ir+1]; i++) {
for (jr = 0; jr < nirrep; jr++) {
kr = jkr ^ jr;
for (j = o_ir_loc[jr]; j < o_ir_loc[jr+1]; j++) {
for (k = o_ir_loc[kr]; k < o_ir_loc[kr+1]; k++, n++) {
w[i*noo+j*nocc+k] += buf[n];
} }
} }
}
}
}
}
/* symmetry adapted
* w-= numpy.einsum('ijm,mk->ijk', eris_vooo[a], t2T[c,b]) */
pt2T = t2T + c * nvoo + b * noo;
vooo += a * nooo;
mk0 = oo_ir_loc[bc_irrep];
for (mr = 0; mr < nirrep; mr++) {
m0 = o_ir_loc[mr];
m1 = o_ir_loc[mr+1];
dm = m1 - m0;
if (dm > 0) {
kr = mr ^ bc_irrep;
k0 = o_ir_loc[kr];
k1 = o_ir_loc[kr+1];
dk = k1 - k0;
if (dk > 0) {
ijr = mr ^ a_irrep;
ij0 = oo_ir_loc[ijr];
ij1 = oo_ir_loc[ijr+1];
dij = ij1 - ij0;
if (dij > 0) {
dgemm_(&TRANS_N, &TRANS_N, &dk, &dij, &dm,
&D1, pt2T+mk0, &dk, vooo+ij0*nocc+m0, &nocc,
&D0, buf, &dk);
for (n = 0, ir = 0; ir < nirrep; ir++) {
jr = ijr ^ ir;
for (i = o_ir_loc[ir]; i < o_ir_loc[ir+1]; i++) {
for (j = o_ir_loc[jr]; j < o_ir_loc[jr+1]; j++) {
for (k = o_ir_loc[kr]; k < o_ir_loc[kr+1]; k++, n++) {
w[i*noo+j*nocc+k] -= buf[n];
} }
} }
}
mk0 += dm * dk;
}
}
}
for (i = 0; i < nocc; i++) {
thalf[i] = t1T[c*nocc+i] * .5;
}
for (n = 0, i = 0; i < nocc; i++) {
for (j = 0; j < nocc; j++) {
for (k = 0; k < nocc; k++, n++) {
v[n] = w[n] + vv_op[i*nmo+j] * thalf[k];
} } }
}
/*
z0, z1, z2, z3, z4, z5 = z
et = numpy.einsum('ijk,ijk', z[0], w)
et+= numpy.einsum('ijk,ikj', z[1], w)
et+= numpy.einsum('ijk,jik', z[2], w)
et+= numpy.einsum('ijk,kij', z[3], w)
et+= numpy.einsum('ijk,jki', z[4], w)
et+= numpy.einsum('ijk,kji', z[5], w)
*/
static double permute_contract(double *z0, double *z1, double *z2, double *z3,
double *z4, double *z5, double *w, int n)
{
int nn = n * n;
int i, j, k;
double et = 0;
for (i = 0; i < n; i++) {
for (j = 0; j < n; j++) {
for (k = 0; k < n; k++) {
et += z0[i*nn+j*n+k] * w[i*nn+j*n+k];
et += z1[i*nn+j*n+k] * w[i*nn+k*n+j];
et += z2[i*nn+j*n+k] * w[j*nn+i*n+k];
et += z3[i*nn+j*n+k] * w[k*nn+i*n+j];
et += z4[i*nn+j*n+k] * w[j*nn+k*n+i];
et += z5[i*nn+j*n+k] * w[k*nn+j*n+i];
} } }
return et;
}
static void get_denorm(double *d3, double *mo_energy, int nocc,
int a, int b, int c)
{
int i, j, k, n;
double abc = mo_energy[nocc+a] + mo_energy[nocc+b] + mo_energy[nocc+c];
for (n = 0, i = 0; i < nocc; i++) {
for (j = 0; j < nocc; j++) {
for (k = 0; k < nocc; k++, n++) {
d3[n] = 1./(mo_energy[i] + mo_energy[j] + mo_energy[k] - abc);
} } }
}
static double contract6(int nocc, int nvir, int a, int b, int c,
double *mo_energy, double *t1T, double *t2T,
int nirrep, int *o_ir_loc, int *v_ir_loc,
int *oo_ir_loc, int *orbsym,
double *vooo, double *cache1, double **cache)
{
int nooo = nocc * nocc * nocc;
double *denorm = cache1;
double *v0 = denorm + nooo;
double *v1 = v0 + nooo;
double *v2 = v1 + nooo;
double *v3 = v2 + nooo;
double *v4 = v3 + nooo;
double *v5 = v4 + nooo;
double *w0 = v5 + nooo;
double *w1 = w0 + nooo;
double *w2 = w1 + nooo;
double *w3 = w2 + nooo;
double *w4 = w3 + nooo;
double *w5 = w4 + nooo;
double *z0 = w5 + nooo;
double *z1 = z0 + nooo;
double *z2 = z1 + nooo;
double *z3 = z2 + nooo;
double *z4 = z3 + nooo;
double *z5 = z4 + nooo;
int i;
if (nirrep == 1) {
get_wv(w0, v0, vooo, cache[0], t1T, t2T, nocc, nvir, a, b, c);
get_wv(w1, v1, vooo, cache[1], t1T, t2T, nocc, nvir, a, c, b);
get_wv(w2, v2, vooo, cache[2], t1T, t2T, nocc, nvir, b, a, c);
get_wv(w3, v3, vooo, cache[3], t1T, t2T, nocc, nvir, b, c, a);
get_wv(w4, v4, vooo, cache[4], t1T, t2T, nocc, nvir, c, a, b);
get_wv(w5, v5, vooo, cache[5], t1T, t2T, nocc, nvir, c, b, a);
} else {
sym_wv(w0, v0, vooo, cache[0], t1T, t2T, nocc, nvir, a, b, c,
nirrep, o_ir_loc, v_ir_loc, oo_ir_loc, orbsym);
sym_wv(w1, v1, vooo, cache[1], t1T, t2T, nocc, nvir, a, c, b,
nirrep, o_ir_loc, v_ir_loc, oo_ir_loc, orbsym);
sym_wv(w2, v2, vooo, cache[2], t1T, t2T, nocc, nvir, b, a, c,
nirrep, o_ir_loc, v_ir_loc, oo_ir_loc, orbsym);
sym_wv(w3, v3, vooo, cache[3], t1T, t2T, nocc, nvir, b, c, a,
nirrep, o_ir_loc, v_ir_loc, oo_ir_loc, orbsym);
sym_wv(w4, v4, vooo, cache[4], t1T, t2T, nocc, nvir, c, a, b,
nirrep, o_ir_loc, v_ir_loc, oo_ir_loc, orbsym);
sym_wv(w5, v5, vooo, cache[5], t1T, t2T, nocc, nvir, c, b, a,
nirrep, o_ir_loc, v_ir_loc, oo_ir_loc, orbsym);
}
permute3(z0, v0, nocc);
permute3(z1, v1, nocc);
permute3(z2, v2, nocc);
permute3(z3, v3, nocc);
permute3(z4, v4, nocc);
permute3(z5, v5, nocc);
get_denorm(denorm, mo_energy, nocc, a, b, c);
if (a == c) {
for (i = 0; i < nooo; i++) {
denorm[i] *= 1./6;
}
} else if (a == b || b == c) {
for (i = 0; i < nooo; i++) {
denorm[i] *= .5;
}
}
for (i = 0; i < nooo; i++) {
z0[i] *= denorm[i];
z1[i] *= denorm[i];
z2[i] *= denorm[i];
z3[i] *= denorm[i];
z4[i] *= denorm[i];
z5[i] *= denorm[i];
}
double et = 0;
et += permute_contract(z0, z1, z2, z3, z4, z5, w0, nocc);
et += permute_contract(z1, z0, z4, z5, z2, z3, w1, nocc);
et += permute_contract(z2, z3, z0, z1, z5, z4, w2, nocc);
et += permute_contract(z3, z2, z5, z4, z0, z1, w3, nocc);
et += permute_contract(z4, z5, z1, z0, z3, z2, w4, nocc);
et += permute_contract(z5, z4, z3, z2, z1, z0, w5, nocc);
return et;
}
double CCsd_t_contract(double *mo_energy, double *t1T, double *t2T, double *vooo,
int nocc, int nvir, int a0, int a1, int b0, int b1,
int nirrep, int *o_ir_loc, int *v_ir_loc,
int *oo_ir_loc, int *orbsym,
double *cache_row_a, double *cache_col_a,
double *cache_row_b, double *cache_col_b)
{
size_t nov = nocc * (nocc+nvir);
int da = a1 - a0;
int db = b1 - b0;
struct CacheJob {
double *cache[6];
short a;
short b;
short c;
short _padding;
};
struct CacheJob *jobs = malloc(sizeof(struct CacheJob) * da*db*b1);
int a, b, c;
size_t m;
if (b1 <= a0) {
m = 0;
for (a = a0; a < a1; a++) {
for (b = b0; b < b1; b++) {
for (c = 0; c < b0; c++, m++) {
jobs[m].a = a;
jobs[m].b = b;
jobs[m].c = c;
jobs[m].cache[0] = cache_row_a + nov*(a1*(a-a0)+b );
jobs[m].cache[1] = cache_row_a + nov*(a1*(a-a0)+c );
jobs[m].cache[2] = cache_col_a + nov*(da*(b) +a-a0);
jobs[m].cache[3] = cache_row_b + nov*(b1*(b-b0)+c );
jobs[m].cache[4] = cache_col_a + nov*(da*(c) +a-a0);
jobs[m].cache[5] = cache_col_b + nov*(db*(c) +b-b0);
}
for (c = b0; c <= b; c++, m++) {
jobs[m].a = a;
jobs[m].b = b;
jobs[m].c = c;
jobs[m].cache[0] = cache_row_a + nov*(a1*(a-a0)+b );
jobs[m].cache[1] = cache_row_a + nov*(a1*(a-a0)+c );
jobs[m].cache[2] = cache_col_a + nov*(da*(b) +a-a0);
jobs[m].cache[3] = cache_row_b + nov*(b1*(b-b0)+c );
jobs[m].cache[4] = cache_col_a + nov*(da*(c) +a-a0);
jobs[m].cache[5] = cache_row_b + nov*(b1*(c-b0)+b );
}
} }
} else {
m = 0;
for (a = a0; a < a1; a++) {
for (b = a0; b <= a; b++) {
for (c = 0; c < a0; c++, m++) {
jobs[m].a = a;
jobs[m].b = b;
jobs[m].c = c;
jobs[m].cache[0] = cache_row_a + nov*(a1*(a-a0)+b);
jobs[m].cache[1] = cache_row_a + nov*(a1*(a-a0)+c);
jobs[m].cache[2] = cache_row_a + nov*(a1*(b-a0)+a);
jobs[m].cache[3] = cache_row_a + nov*(a1*(b-a0)+c);
jobs[m].cache[4] = cache_col_a + nov*(da*(c)+a-a0);
jobs[m].cache[5] = cache_col_a + nov*(da*(c)+b-a0);
}
for (c = a0; c <= b; c++, m++) {
jobs[m].a = a;
jobs[m].b = b;
jobs[m].c = c;
jobs[m].cache[0] = cache_row_a + nov*(a1*(a-a0)+b);
jobs[m].cache[1] = cache_row_a + nov*(a1*(a-a0)+c);
jobs[m].cache[2] = cache_row_a + nov*(a1*(b-a0)+a);
jobs[m].cache[3] = cache_row_a + nov*(a1*(b-a0)+c);
jobs[m].cache[4] = cache_row_a + nov*(a1*(c-a0)+a);
jobs[m].cache[5] = cache_row_a + nov*(a1*(c-a0)+b);
}
} }
}
double e_tot = 0;
#pragma omp parallel default(none) \
shared(m, nocc, nvir, mo_energy, t1T, t2T, nirrep, o_ir_loc, \
v_ir_loc, oo_ir_loc, orbsym, vooo, jobs, e_tot) \
private(a, b, c)
{
size_t k;
double *cache1 = malloc(sizeof(double) * nocc*nocc*nocc*19);
double e = 0;
#pragma omp for schedule (dynamic, 32)
for (k = 0; k < m; k++) {
a = jobs[k].a;
b = jobs[k].b;
c = jobs[k].c;
e += contract6(nocc, nvir, a, b, c, mo_energy, t1T, t2T,
nirrep, o_ir_loc, v_ir_loc, oo_ir_loc, orbsym,
vooo, cache1, jobs[k].cache);
}
free(cache1);
#pragma omp critical
e_tot += e;
}
return e_tot;
}
/*
* vvop[:,:,:,:nocc] = ovov.transpose(1,3,0,2) # irrep_sort last v
* vvop[:,:,:,nocc:] = ovvv.transpose(1,2,0,3) # irrep_sort o and last vv
*/
static void sort_transpose(double *vvop, double *ovov, double *ovvv,
int *optr, int *vptr,
int nocc, int nvir, int dv, int iv)
{
size_t nmo = nocc + nvir;
size_t nov = nocc * nvir;
size_t nop = nocc * nmo;
size_t nvov = dv * nov;
size_t nvv = nvir * (nvir+1) / 2;
int i, j, a, b, ab;
int ip, jp, ap, bp;
double *pvvop, *povov, *povvv;
vvop += iv * nvir * nop;
ovov += iv * nov;
ovvv += iv * nvv;
for (a = 0; a < nvir; a++) {
ap = vptr[a];
pvvop = vvop + ap * nop;
povov = ovov + a;
for (i = 0; i < nocc; i++) {
ip = optr[i];
for (j = 0; j < nocc; j++) {
jp = optr[j];
pvvop[ip*nmo+jp] = povov[i*nvov+j*nvir];
}
}
}
for (i = 0; i < nocc; i++) {
ip = optr[i];
pvvop = vvop + ip * nmo + nocc;
povvv = ovvv + i * dv * nvv;
for (ab = 0, a = 0; a < nvir; a++) {
ap = vptr[a];
for (b = 0; b <= a; b++, ab++) {
bp = vptr[b];
pvvop[ap*nop+bp] = povvv[ab];
pvvop[bp*nop+ap] = povvv[ab];
}
}
}
}
void CCsd_t_sort_transpose(double *vvop, double *ovov, double *ovvv,
int *orbsym, int nocc, int nvir, int dv)
{
int optr[nocc];
int vptr[nvir];
int i, k, ir;
for (k = 0, ir = 0; ir < 8; ir++) {
for (i = 0; i < nocc; i++) { if (orbsym[i] == ir) {
optr[i] = k;
k++;
} }
}
for (k = 0, ir = 0; ir < 8; ir++) {
for (i = 0; i < nvir; i++) { if (orbsym[nocc+i] == ir) {
vptr[i] = k;
k++;
} }
}
#pragma omp parallel default(none) \
shared(vvop, ovov, ovvv, nocc, nvir, dv, optr, vptr) \
private(i)
{
#pragma omp for schedule (dynamic)
for (i = 0; i < dv; i++) {
sort_transpose(vvop, ovov, ovvv, optr, vptr, nocc, nvir, dv, i);
}
}
}
|
shear.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% SSSSS H H EEEEE AAA RRRR %
% SS H H E A A R R %
% SSS HHHHH EEE AAAAA RRRR %
% SS H H E A A R R %
% SSSSS H H EEEEE A A R R %
% %
% %
% MagickCore Methods to Shear or Rotate an Image by an Arbitrary Angle %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% The XShearImage() and YShearImage() methods are based on the paper "A Fast
% Algorithm for General Raster Rotation" by Alan W. Paeth, Graphics
% Interface '86 (Vancouver). ShearRotateImage() is adapted from a similar
% method based on the Paeth paper written by Michael Halle of the Spatial
% Imaging Group, MIT Media Lab.
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/attribute.h"
#include "magick/blob-private.h"
#include "magick/cache-private.h"
#include "magick/channel.h"
#include "magick/color-private.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/composite-private.h"
#include "magick/decorate.h"
#include "magick/distort.h"
#include "magick/draw.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/geometry.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/memory_.h"
#include "magick/list.h"
#include "magick/matrix.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/nt-base-private.h"
#include "magick/pixel-private.h"
#include "magick/quantum.h"
#include "magick/resource_.h"
#include "magick/shear.h"
#include "magick/statistic.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#include "magick/threshold.h"
#include "magick/token.h"
#include "magick/transform.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C r o p T o F i t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CropToFitImage() crops the sheared image as determined by the bounding box
% as defined by width and height and shearing angles.
%
% The format of the CropToFitImage method is:
%
% MagickBooleanType CropToFitImage(Image **image,
% const MagickRealType x_shear,const MagickRealType x_shear,
% const MagickRealType width,const MagickRealType height,
% const MagickBooleanType rotate,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o x_shear, y_shear, width, height: Defines a region of the image to crop.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType CropToFitImage(Image **image,
const MagickRealType x_shear,const MagickRealType y_shear,
const MagickRealType width,const MagickRealType height,
const MagickBooleanType rotate,ExceptionInfo *exception)
{
Image
*crop_image;
PointInfo
extent[4],
min,
max;
RectangleInfo
geometry,
page;
register ssize_t
i;
/*
Calculate the rotated image size.
*/
extent[0].x=(double) (-width/2.0);
extent[0].y=(double) (-height/2.0);
extent[1].x=(double) width/2.0;
extent[1].y=(double) (-height/2.0);
extent[2].x=(double) (-width/2.0);
extent[2].y=(double) height/2.0;
extent[3].x=(double) width/2.0;
extent[3].y=(double) height/2.0;
for (i=0; i < 4; i++)
{
extent[i].x+=x_shear*extent[i].y;
extent[i].y+=y_shear*extent[i].x;
if (rotate != MagickFalse)
extent[i].x+=x_shear*extent[i].y;
extent[i].x+=(double) (*image)->columns/2.0;
extent[i].y+=(double) (*image)->rows/2.0;
}
min=extent[0];
max=extent[0];
for (i=1; i < 4; i++)
{
if (min.x > extent[i].x)
min.x=extent[i].x;
if (min.y > extent[i].y)
min.y=extent[i].y;
if (max.x < extent[i].x)
max.x=extent[i].x;
if (max.y < extent[i].y)
max.y=extent[i].y;
}
geometry.x=(ssize_t) ceil(min.x-0.5);
geometry.y=(ssize_t) ceil(min.y-0.5);
geometry.width=(size_t) floor(max.x-min.x+0.5);
geometry.height=(size_t) floor(max.y-min.y+0.5);
page=(*image)->page;
(void) ParseAbsoluteGeometry("0x0+0+0",&(*image)->page);
crop_image=CropImage(*image,&geometry,exception);
if (crop_image == (Image *) NULL)
return(MagickFalse);
crop_image->page=page;
*image=DestroyImage(*image);
*image=crop_image;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s k e w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DeskewImage() removes skew from the image. Skew is an artifact that
% occurs in scanned images because of the camera being misaligned,
% imperfections in the scanning or surface, or simply because the paper was
% not placed completely flat when scanned.
%
% The amount of rotation calculated to deskew the image is saved in the
% artifact "deskew:angle".
%
% If the artifact "deskew:auto-crop" is given the image will be automatically
% cropped of the excess background.
%
% The format of the DeskewImage method is:
%
% Image *DeskewImage(const Image *image,const double threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: separate background from foreground.
%
% o exception: return any errors or warnings in this structure.
%
*/
static void RadonProjection(const Image *image,MatrixInfo *source_matrix,
MatrixInfo *destination_matrix,const ssize_t sign,size_t *projection)
{
MatrixInfo
*swap;
register MatrixInfo
*p,
*q;
register ssize_t
x;
size_t
step;
p=source_matrix;
q=destination_matrix;
for (step=1; step < GetMatrixColumns(p); step*=2)
{
for (x=0; x < (ssize_t) GetMatrixColumns(p); x+=2*(ssize_t) step)
{
register ssize_t
i;
ssize_t
y;
unsigned short
element,
neighbor;
for (i=0; i < (ssize_t) step; i++)
{
for (y=0; y < (ssize_t) (GetMatrixRows(p)-i-1); y++)
{
if (GetMatrixElement(p,x+i,y,&element) == MagickFalse)
continue;
if (GetMatrixElement(p,x+i+step,y+i,&neighbor) == MagickFalse)
continue;
neighbor+=element;
if (SetMatrixElement(q,x+2*i,y,&neighbor) == MagickFalse)
continue;
if (GetMatrixElement(p,x+i+step,y+i+1,&neighbor) == MagickFalse)
continue;
neighbor+=element;
if (SetMatrixElement(q,x+2*i+1,y,&neighbor) == MagickFalse)
continue;
}
for ( ; y < (ssize_t) (GetMatrixRows(p)-i); y++)
{
if (GetMatrixElement(p,x+i,y,&element) == MagickFalse)
continue;
if (GetMatrixElement(p,x+i+step,y+i,&neighbor) == MagickFalse)
continue;
neighbor+=element;
if (SetMatrixElement(q,x+2*i,y,&neighbor) == MagickFalse)
continue;
if (SetMatrixElement(q,x+2*i+1,y,&element) == MagickFalse)
continue;
}
for ( ; y < (ssize_t) GetMatrixRows(p); y++)
{
if (GetMatrixElement(p,x+i,y,&element) == MagickFalse)
continue;
if (SetMatrixElement(q,x+2*i,y,&element) == MagickFalse)
continue;
if (SetMatrixElement(q,x+2*i+1,y,&element) == MagickFalse)
continue;
}
}
}
swap=p;
p=q;
q=swap;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) \
magick_number_threads(image,image,GetMatrixColumns(p),1)
#endif
for (x=0; x < (ssize_t) GetMatrixColumns(p); x++)
{
register ssize_t
y;
size_t
sum;
sum=0;
for (y=0; y < (ssize_t) (GetMatrixRows(p)-1); y++)
{
ssize_t
delta;
unsigned short
element,
neighbor;
if (GetMatrixElement(p,x,y,&element) == MagickFalse)
continue;
if (GetMatrixElement(p,x,y+1,&neighbor) == MagickFalse)
continue;
delta=(ssize_t) element-(ssize_t) neighbor;
sum+=delta*delta;
}
projection[GetMatrixColumns(p)+sign*x-1]=sum;
}
}
static MagickBooleanType RadonTransform(const Image *image,
const double threshold,size_t *projection,ExceptionInfo *exception)
{
CacheView
*image_view;
MatrixInfo
*destination_matrix,
*source_matrix;
MagickBooleanType
status;
register ssize_t
i;
size_t
count,
width;
ssize_t
y;
unsigned char
byte;
unsigned short
bits[256];
for (width=1; width < ((image->columns+7)/8); width<<=1) ;
source_matrix=AcquireMatrixInfo(width,image->rows,sizeof(unsigned short),
exception);
destination_matrix=AcquireMatrixInfo(width,image->rows,sizeof(unsigned short),
exception);
if ((source_matrix == (MatrixInfo *) NULL) ||
(destination_matrix == (MatrixInfo *) NULL))
{
if (destination_matrix != (MatrixInfo *) NULL)
destination_matrix=DestroyMatrixInfo(destination_matrix);
if (source_matrix != (MatrixInfo *) NULL)
source_matrix=DestroyMatrixInfo(source_matrix);
return(MagickFalse);
}
if (NullMatrix(source_matrix) == MagickFalse)
{
destination_matrix=DestroyMatrixInfo(destination_matrix);
source_matrix=DestroyMatrixInfo(source_matrix);
return(MagickFalse);
}
for (i=0; i < 256; i++)
{
byte=(unsigned char) i;
for (count=0; byte != 0; byte>>=1)
count+=byte & 0x01;
bits[i]=(unsigned short) count;
}
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register ssize_t
i,
x;
size_t
bit,
byte;
unsigned short
value;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
bit=0;
byte=0;
i=(ssize_t) (image->columns+7)/8;
for (x=0; x < (ssize_t) image->columns; x++)
{
byte<<=1;
if (((MagickRealType) GetPixelRed(p) < threshold) ||
((MagickRealType) GetPixelGreen(p) < threshold) ||
((MagickRealType) GetPixelBlue(p) < threshold))
byte|=0x01;
bit++;
if (bit == 8)
{
value=bits[byte];
(void) SetMatrixElement(source_matrix,--i,y,&value);
bit=0;
byte=0;
}
p++;
}
if (bit != 0)
{
byte<<=(8-bit);
value=bits[byte];
(void) SetMatrixElement(source_matrix,--i,y,&value);
}
}
RadonProjection(image,source_matrix,destination_matrix,-1,projection);
(void) NullMatrix(source_matrix);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register ssize_t
i,
x;
size_t
bit,
byte;
unsigned short
value;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
bit=0;
byte=0;
i=0;
for (x=0; x < (ssize_t) image->columns; x++)
{
byte<<=1;
if (((MagickRealType) GetPixelRed(p) < threshold) ||
((MagickRealType) GetPixelGreen(p) < threshold) ||
((MagickRealType) GetPixelBlue(p) < threshold))
byte|=0x01;
bit++;
if (bit == 8)
{
value=bits[byte];
(void) SetMatrixElement(source_matrix,i++,y,&value);
bit=0;
byte=0;
}
p++;
}
if (bit != 0)
{
byte<<=(8-bit);
value=bits[byte];
(void) SetMatrixElement(source_matrix,i++,y,&value);
}
}
RadonProjection(image,source_matrix,destination_matrix,1,projection);
image_view=DestroyCacheView(image_view);
destination_matrix=DestroyMatrixInfo(destination_matrix);
source_matrix=DestroyMatrixInfo(source_matrix);
return(MagickTrue);
}
static void GetImageBackgroundColor(Image *image,const ssize_t offset,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickPixelPacket
background;
MagickRealType
count;
ssize_t
y;
/*
Compute average background color.
*/
if (offset <= 0)
return;
GetMagickPixelPacket(image,&background);
count=0.0;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
if ((y >= offset) && (y < ((ssize_t) image->rows-offset)))
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
continue;
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((x >= offset) && (x < ((ssize_t) image->columns-offset)))
continue;
background.red+=QuantumScale*GetPixelRed(p);
background.green+=QuantumScale*GetPixelGreen(p);
background.blue+=QuantumScale*GetPixelBlue(p);
background.opacity+=QuantumScale*GetPixelOpacity(p);
count++;
p++;
}
}
image_view=DestroyCacheView(image_view);
image->background_color.red=ClampToQuantum((MagickRealType) QuantumRange*
background.red/count);
image->background_color.green=ClampToQuantum((MagickRealType) QuantumRange*
background.green/count);
image->background_color.blue=ClampToQuantum((MagickRealType) QuantumRange*
background.blue/count);
image->background_color.opacity=ClampToQuantum((MagickRealType) QuantumRange*
background.opacity/count);
}
MagickExport Image *DeskewImage(const Image *image,const double threshold,
ExceptionInfo *exception)
{
AffineMatrix
affine_matrix;
const char
*artifact;
double
degrees;
Image
*clone_image,
*crop_image,
*deskew_image,
*median_image;
MagickBooleanType
status;
RectangleInfo
geometry;
register ssize_t
i;
size_t
max_projection,
*projection,
width;
ssize_t
skew;
/*
Compute deskew angle.
*/
for (width=1; width < ((image->columns+7)/8); width<<=1) ;
projection=(size_t *) AcquireQuantumMemory((size_t) (2*width-1),
sizeof(*projection));
if (projection == (size_t *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
status=RadonTransform(image,threshold,projection,exception);
if (status == MagickFalse)
{
projection=(size_t *) RelinquishMagickMemory(projection);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
max_projection=0;
skew=0;
for (i=0; i < (ssize_t) (2*width-1); i++)
{
if (projection[i] > max_projection)
{
skew=i-(ssize_t) width+1;
max_projection=projection[i];
}
}
projection=(size_t *) RelinquishMagickMemory(projection);
degrees=RadiansToDegrees(-atan((double) skew/width/8));
if (image->debug != MagickFalse)
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" Deskew angle: %g",degrees);
/*
Deskew image.
*/
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
{
char
angle[MaxTextExtent];
(void) FormatLocaleString(angle,MaxTextExtent,"%g",degrees);
(void) SetImageArtifact(clone_image,"deskew:angle",angle);
}
(void) SetImageVirtualPixelMethod(clone_image,BackgroundVirtualPixelMethod);
affine_matrix.sx=cos(DegreesToRadians(fmod((double) degrees,360.0)));
affine_matrix.rx=sin(DegreesToRadians(fmod((double) degrees,360.0)));
affine_matrix.ry=(-sin(DegreesToRadians(fmod((double) degrees,360.0))));
affine_matrix.sy=cos(DegreesToRadians(fmod((double) degrees,360.0)));
affine_matrix.tx=0.0;
affine_matrix.ty=0.0;
artifact=GetImageArtifact(image,"deskew:auto-crop");
if (IsMagickTrue(artifact) == MagickFalse)
{
deskew_image=AffineTransformImage(clone_image,&affine_matrix,exception);
clone_image=DestroyImage(clone_image);
return(deskew_image);
}
/*
Auto-crop image.
*/
GetImageBackgroundColor(clone_image,(ssize_t) StringToLong(artifact),
exception);
deskew_image=AffineTransformImage(clone_image,&affine_matrix,exception);
clone_image=DestroyImage(clone_image);
if (deskew_image == (Image *) NULL)
return((Image *) NULL);
median_image=StatisticImage(deskew_image,MedianStatistic,3,3,exception);
if (median_image == (Image *) NULL)
{
deskew_image=DestroyImage(deskew_image);
return((Image *) NULL);
}
geometry=GetImageBoundingBox(median_image,exception);
median_image=DestroyImage(median_image);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TransformEvent,GetMagickModule()," Deskew geometry: "
"%.20gx%.20g%+.20g%+.20g",(double) geometry.width,(double)
geometry.height,(double) geometry.x,(double) geometry.y);
crop_image=CropImage(deskew_image,&geometry,exception);
deskew_image=DestroyImage(deskew_image);
return(crop_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n t e g r a l R o t a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IntegralRotateImage() rotates the image an integral of 90 degrees. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the rotated image.
%
% The format of the IntegralRotateImage method is:
%
% Image *IntegralRotateImage(const Image *image,size_t rotations,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o rotations: Specifies the number of 90 degree rotations.
%
*/
MagickExport Image *IntegralRotateImage(const Image *image,size_t rotations,
ExceptionInfo *exception)
{
#define RotateImageTag "Rotate/Image"
CacheView
*image_view,
*rotate_view;
Image
*rotate_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
/*
Initialize rotated image attributes.
*/
assert(image != (Image *) NULL);
page=image->page;
rotations%=4;
rotate_image=(Image *) NULL;
rotate_view=(CacheView *) NULL;
switch (rotations)
{
case 0:
{
rotate_image=CloneImage(image,0,0,MagickTrue,exception);
break;
}
case 2:
{
rotate_image=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
break;
}
case 1:
case 3:
{
rotate_image=CloneImage(image,image->rows,image->columns,MagickTrue,
exception);
break;
}
}
if (rotate_image == (Image *) NULL)
return((Image *) NULL);
/*
Integral rotate the image.
*/
status=MagickTrue;
progress=0;
if (rotations != 0)
{
image_view=AcquireVirtualCacheView(image,exception);
rotate_view=AcquireAuthenticCacheView(rotate_image,exception);
}
switch (rotations)
{
case 1:
{
size_t
tile_height,
tile_width;
ssize_t
tile_y;
/*
Rotate 90 degrees.
*/
GetPixelCacheTileSize(image,&tile_width,&tile_height);
tile_width=image->columns;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,rotate_image,image->rows/tile_height,1)
#endif
for (tile_y=0; tile_y < (ssize_t) image->rows; tile_y+=(ssize_t) tile_height)
{
register ssize_t
tile_x;
if (status == MagickFalse)
continue;
for (tile_x=0; tile_x < (ssize_t) image->columns; tile_x+=(ssize_t) tile_width)
{
MagickBooleanType
sync;
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict rotate_indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
y;
size_t
height,
width;
width=tile_width;
if ((tile_x+(ssize_t) tile_width) > (ssize_t) image->columns)
width=(size_t) (tile_width-(tile_x+tile_width-image->columns));
height=tile_height;
if ((tile_y+(ssize_t) tile_height) > (ssize_t) image->rows)
height=(size_t) (tile_height-(tile_y+tile_height-image->rows));
p=GetCacheViewVirtualPixels(image_view,tile_x,tile_y,width,height,
exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
break;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (y=0; y < (ssize_t) width; y++)
{
register const PixelPacket
*magick_restrict tile_pixels;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(rotate_view,(ssize_t)
(rotate_image->columns-(tile_y+height)),y+tile_x,height,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
tile_pixels=p+(height-1)*width+y;
for (x=0; x < (ssize_t) height; x++)
{
*q++=(*tile_pixels);
tile_pixels-=width;
}
rotate_indexes=GetCacheViewAuthenticIndexQueue(rotate_view);
if ((indexes != (IndexPacket *) NULL) &&
(rotate_indexes != (IndexPacket *) NULL))
{
register const IndexPacket
*magick_restrict tile_indexes;
tile_indexes=indexes+(height-1)*width+y;
for (x=0; x < (ssize_t) height; x++)
{
*rotate_indexes++=(*tile_indexes);
tile_indexes-=width;
}
}
sync=SyncCacheViewAuthenticPixels(rotate_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,RotateImageTag,progress+=tile_height,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
(void) SetImageProgress(image,RotateImageTag,(MagickOffsetType)
image->rows-1,image->rows);
Swap(page.width,page.height);
Swap(page.x,page.y);
if (page.width != 0)
page.x=(ssize_t) (page.width-rotate_image->columns-page.x);
break;
}
case 2:
{
/*
Rotate 180 degrees.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,rotate_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict rotate_indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(rotate_view,0,(ssize_t) (image->rows-y-
1),image->columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
rotate_indexes=GetCacheViewAuthenticIndexQueue(rotate_view);
q+=image->columns;
for (x=0; x < (ssize_t) image->columns; x++)
*--q=(*p++);
if ((indexes != (IndexPacket *) NULL) &&
(rotate_indexes != (IndexPacket *) NULL))
for (x=0; x < (ssize_t) image->columns; x++)
SetPixelIndex(rotate_indexes+image->columns-x-1,
GetPixelIndex(indexes+x));
sync=SyncCacheViewAuthenticPixels(rotate_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,RotateImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
if (page.width != 0)
page.x=(ssize_t) (page.width-rotate_image->columns-page.x);
if (page.height != 0)
page.y=(ssize_t) (page.height-rotate_image->rows-page.y);
break;
}
case 3:
{
size_t
tile_height,
tile_width;
ssize_t
tile_y;
/*
Rotate 270 degrees.
*/
GetPixelCacheTileSize(image,&tile_width,&tile_height);
tile_width=image->columns;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,rotate_image,image->rows/tile_height,1)
#endif
for (tile_y=0; tile_y < (ssize_t) image->rows; tile_y+=(ssize_t) tile_height)
{
register ssize_t
tile_x;
if (status == MagickFalse)
continue;
for (tile_x=0; tile_x < (ssize_t) image->columns; tile_x+=(ssize_t) tile_width)
{
MagickBooleanType
sync;
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict rotate_indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
y;
size_t
height,
width;
width=tile_width;
if ((tile_x+(ssize_t) tile_width) > (ssize_t) image->columns)
width=(size_t) (tile_width-(tile_x+tile_width-image->columns));
height=tile_height;
if ((tile_y+(ssize_t) tile_height) > (ssize_t) image->rows)
height=(size_t) (tile_height-(tile_y+tile_height-image->rows));
p=GetCacheViewVirtualPixels(image_view,tile_x,tile_y,width,height,
exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
break;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (y=0; y < (ssize_t) width; y++)
{
register const PixelPacket
*magick_restrict tile_pixels;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(rotate_view,tile_y,(ssize_t) (y+
rotate_image->rows-(tile_x+width)),height,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
tile_pixels=p+(width-1)-y;
for (x=0; x < (ssize_t) height; x++)
{
*q++=(*tile_pixels);
tile_pixels+=width;
}
rotate_indexes=GetCacheViewAuthenticIndexQueue(rotate_view);
if ((indexes != (IndexPacket *) NULL) &&
(rotate_indexes != (IndexPacket *) NULL))
{
register const IndexPacket
*magick_restrict tile_indexes;
tile_indexes=indexes+(width-1)-y;
for (x=0; x < (ssize_t) height; x++)
{
*rotate_indexes++=(*tile_indexes);
tile_indexes+=width;
}
}
sync=SyncCacheViewAuthenticPixels(rotate_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,RotateImageTag,progress+=tile_height,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
(void) SetImageProgress(image,RotateImageTag,(MagickOffsetType)
image->rows-1,image->rows);
Swap(page.width,page.height);
Swap(page.x,page.y);
if (page.height != 0)
page.y=(ssize_t) (page.height-rotate_image->rows-page.y);
break;
}
default:
break;
}
if (rotations != 0)
{
rotate_view=DestroyCacheView(rotate_view);
image_view=DestroyCacheView(image_view);
}
rotate_image->type=image->type;
rotate_image->page=page;
if (status == MagickFalse)
rotate_image=DestroyImage(rotate_image);
return(rotate_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ X S h e a r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% XShearImage() shears the image in the X direction with a shear angle of
% 'degrees'. Positive angles shear counter-clockwise (right-hand rule), and
% negative angles shear clockwise. Angles are measured relative to a vertical
% Y-axis. X shears will widen an image creating 'empty' triangles on the left
% and right sides of the source image.
%
% The format of the XShearImage method is:
%
% MagickBooleanType XShearImage(Image *image,const MagickRealType degrees,
% const size_t width,const size_t height,
% const ssize_t x_offset,const ssize_t y_offset,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o degrees: A MagickRealType representing the shearing angle along the X
% axis.
%
% o width, height, x_offset, y_offset: Defines a region of the image
% to shear.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType XShearImage(Image *image,const MagickRealType degrees,
const size_t width,const size_t height,const ssize_t x_offset,
const ssize_t y_offset,ExceptionInfo *exception)
{
#define XShearImageTag "XShear/Image"
typedef enum
{
LEFT,
RIGHT
} ShearDirection;
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
background;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
GetMagickPixelPacket(image,&background);
SetMagickPixelPacket(image,&image->background_color,(IndexPacket *) NULL,
&background);
if (image->colorspace == CMYKColorspace)
ConvertRGBToCMYK(&background);
/*
X shear image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,height,1)
#endif
for (y=0; y < (ssize_t) height; y++)
{
MagickPixelPacket
pixel,
source,
destination;
MagickRealType
area,
displacement;
register IndexPacket
*magick_restrict indexes,
*magick_restrict shear_indexes;
register PixelPacket
*magick_restrict p,
*magick_restrict q;
register ssize_t
i;
ShearDirection
direction;
ssize_t
step;
if (status == MagickFalse)
continue;
p=GetCacheViewAuthenticPixels(image_view,0,y_offset+y,image->columns,1,
exception);
if (p == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
p+=x_offset;
indexes+=x_offset;
displacement=degrees*(MagickRealType) (y-height/2.0);
if (displacement == 0.0)
continue;
if (displacement > 0.0)
direction=RIGHT;
else
{
displacement*=(-1.0);
direction=LEFT;
}
step=(ssize_t) floor((double) displacement);
area=(MagickRealType) (displacement-step);
step++;
pixel=background;
GetMagickPixelPacket(image,&source);
GetMagickPixelPacket(image,&destination);
switch (direction)
{
case LEFT:
{
/*
Transfer pixels left-to-right.
*/
if (step > x_offset)
break;
q=p-step;
shear_indexes=indexes-step;
for (i=0; i < (ssize_t) width; i++)
{
if ((x_offset+i) < step)
{
SetMagickPixelPacket(image,++p,++indexes,&pixel);
q++;
shear_indexes++;
continue;
}
SetMagickPixelPacket(image,p,indexes,&source);
MagickPixelCompositeAreaBlend(&pixel,(MagickRealType) pixel.opacity,
&source,(MagickRealType) GetPixelOpacity(p),area,&destination);
SetPixelPacket(image,&destination,q++,shear_indexes++);
SetMagickPixelPacket(image,p++,indexes++,&pixel);
}
MagickPixelCompositeAreaBlend(&pixel,(MagickRealType) pixel.opacity,
&background,(MagickRealType) background.opacity,area,&destination);
SetPixelPacket(image,&destination,q++,shear_indexes++);
for (i=0; i < (step-1); i++)
SetPixelPacket(image,&background,q++,shear_indexes++);
break;
}
case RIGHT:
{
/*
Transfer pixels right-to-left.
*/
p+=width;
indexes+=width;
q=p+step;
shear_indexes=indexes+step;
for (i=0; i < (ssize_t) width; i++)
{
p--;
indexes--;
q--;
shear_indexes--;
if ((size_t) (x_offset+width+step-i) > image->columns)
continue;
SetMagickPixelPacket(image,p,indexes,&source);
MagickPixelCompositeAreaBlend(&pixel,(MagickRealType) pixel.opacity,
&source,(MagickRealType) GetPixelOpacity(p),area,&destination);
SetPixelPacket(image,&destination,q,shear_indexes);
SetMagickPixelPacket(image,p,indexes,&pixel);
}
MagickPixelCompositeAreaBlend(&pixel,(MagickRealType) pixel.opacity,
&background,(MagickRealType) background.opacity,area,&destination);
SetPixelPacket(image,&destination,--q,--shear_indexes);
for (i=0; i < (step-1); i++)
SetPixelPacket(image,&background,--q,--shear_indexes);
break;
}
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,XShearImageTag,progress,height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Y S h e a r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% YShearImage shears the image in the Y direction with a shear angle of
% 'degrees'. Positive angles shear counter-clockwise (right-hand rule), and
% negative angles shear clockwise. Angles are measured relative to a
% horizontal X-axis. Y shears will increase the height of an image creating
% 'empty' triangles on the top and bottom of the source image.
%
% The format of the YShearImage method is:
%
% MagickBooleanType YShearImage(Image *image,const MagickRealType degrees,
% const size_t width,const size_t height,
% const ssize_t x_offset,const ssize_t y_offset,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o degrees: A MagickRealType representing the shearing angle along the Y
% axis.
%
% o width, height, x_offset, y_offset: Defines a region of the image
% to shear.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType YShearImage(Image *image,const MagickRealType degrees,
const size_t width,const size_t height,const ssize_t x_offset,
const ssize_t y_offset,ExceptionInfo *exception)
{
#define YShearImageTag "YShear/Image"
typedef enum
{
UP,
DOWN
} ShearDirection;
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
background;
ssize_t
x;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
GetMagickPixelPacket(image,&background);
SetMagickPixelPacket(image,&image->background_color,(IndexPacket *) NULL,
&background);
if (image->colorspace == CMYKColorspace)
ConvertRGBToCMYK(&background);
/*
Y Shear image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,width,1)
#endif
for (x=0; x < (ssize_t) width; x++)
{
MagickPixelPacket
pixel,
source,
destination;
MagickRealType
area,
displacement;
register IndexPacket
*magick_restrict indexes,
*magick_restrict shear_indexes;
register ssize_t
i;
register PixelPacket
*magick_restrict p,
*magick_restrict q;
ShearDirection
direction;
ssize_t
step;
if (status == MagickFalse)
continue;
p=GetCacheViewAuthenticPixels(image_view,x_offset+x,0,1,image->rows,
exception);
if (p == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
p+=y_offset;
indexes+=y_offset;
displacement=degrees*(MagickRealType) (x-width/2.0);
if (displacement == 0.0)
continue;
if (displacement > 0.0)
direction=DOWN;
else
{
displacement*=(-1.0);
direction=UP;
}
step=(ssize_t) floor((double) displacement);
area=(MagickRealType) (displacement-step);
step++;
pixel=background;
GetMagickPixelPacket(image,&source);
GetMagickPixelPacket(image,&destination);
switch (direction)
{
case UP:
{
/*
Transfer pixels top-to-bottom.
*/
if (step > y_offset)
break;
q=p-step;
shear_indexes=indexes-step;
for (i=0; i < (ssize_t) height; i++)
{
if ((y_offset+i) < step)
{
SetMagickPixelPacket(image,++p,++indexes,&pixel);
q++;
shear_indexes++;
continue;
}
SetMagickPixelPacket(image,p,indexes,&source);
MagickPixelCompositeAreaBlend(&pixel,(MagickRealType) pixel.opacity,
&source,(MagickRealType) GetPixelOpacity(p),area,&destination);
SetPixelPacket(image,&destination,q++,shear_indexes++);
SetMagickPixelPacket(image,p++,indexes++,&pixel);
}
MagickPixelCompositeAreaBlend(&pixel,(MagickRealType) pixel.opacity,
&background,(MagickRealType) background.opacity,area,&destination);
SetPixelPacket(image,&destination,q++,shear_indexes++);
for (i=0; i < (step-1); i++)
SetPixelPacket(image,&background,q++,shear_indexes++);
break;
}
case DOWN:
{
/*
Transfer pixels bottom-to-top.
*/
p+=height;
indexes+=height;
q=p+step;
shear_indexes=indexes+step;
for (i=0; i < (ssize_t) height; i++)
{
p--;
indexes--;
q--;
shear_indexes--;
if ((size_t) (y_offset+height+step-i) > image->rows)
continue;
SetMagickPixelPacket(image,p,indexes,&source);
MagickPixelCompositeAreaBlend(&pixel,(MagickRealType) pixel.opacity,
&source,(MagickRealType) GetPixelOpacity(p),area,&destination);
SetPixelPacket(image,&destination,q,shear_indexes);
SetMagickPixelPacket(image,p,indexes,&pixel);
}
MagickPixelCompositeAreaBlend(&pixel,(MagickRealType) pixel.opacity,
&background,(MagickRealType) background.opacity,area,&destination);
SetPixelPacket(image,&destination,--q,--shear_indexes);
for (i=0; i < (step-1); i++)
SetPixelPacket(image,&background,--q,--shear_indexes);
break;
}
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,YShearImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h e a r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShearImage() creates a new image that is a shear_image copy of an existing
% one. Shearing slides one edge of an image along the X or Y axis, creating
% a parallelogram. An X direction shear slides an edge along the X axis,
% while a Y direction shear slides an edge along the Y axis. The amount of
% the shear is controlled by a shear angle. For X direction shears, x_shear
% is measured relative to the Y axis, and similarly, for Y direction shears
% y_shear is measured relative to the X axis. Empty triangles left over from
% shearing the image are filled with the background color defined by member
% 'background_color' of the image.. ShearImage() allocates the memory
% necessary for the new Image structure and returns a pointer to the new image.
%
% ShearImage() is based on the paper "A Fast Algorithm for General Raster
% Rotatation" by Alan W. Paeth.
%
% The format of the ShearImage method is:
%
% Image *ShearImage(const Image *image,const double x_shear,
% const double y_shear,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o x_shear, y_shear: Specifies the number of degrees to shear the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ShearImage(const Image *image,const double x_shear,
const double y_shear,ExceptionInfo *exception)
{
Image
*integral_image,
*shear_image;
MagickBooleanType
status;
PointInfo
shear;
RectangleInfo
border_info,
bounds;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((x_shear != 0.0) && (fmod(x_shear,90.0) == 0.0))
ThrowImageException(ImageError,"AngleIsDiscontinuous");
if ((y_shear != 0.0) && (fmod(y_shear,90.0) == 0.0))
ThrowImageException(ImageError,"AngleIsDiscontinuous");
/*
Initialize shear angle.
*/
integral_image=CloneImage(image,0,0,MagickTrue,exception);
if (integral_image == (Image *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
shear.x=(-tan(DegreesToRadians(fmod(x_shear,360.0))));
shear.y=tan(DegreesToRadians(fmod(y_shear,360.0)));
if ((shear.x == 0.0) && (shear.y == 0.0))
return(integral_image);
if (SetImageStorageClass(integral_image,DirectClass) == MagickFalse)
{
InheritException(exception,&integral_image->exception);
integral_image=DestroyImage(integral_image);
return(integral_image);
}
if (integral_image->matte == MagickFalse)
(void) SetImageAlphaChannel(integral_image,OpaqueAlphaChannel);
/*
Compute image size.
*/
bounds.width=image->columns+(ssize_t) floor(fabs(shear.x)*image->rows+0.5);
bounds.x=(ssize_t) ceil((double) image->columns+((fabs(shear.x)*image->rows)-
image->columns)/2.0-0.5);
bounds.y=(ssize_t) ceil((double) image->rows+((fabs(shear.y)*bounds.width)-
image->rows)/2.0-0.5);
/*
Surround image with border.
*/
integral_image->border_color=integral_image->background_color;
integral_image->compose=CopyCompositeOp;
border_info.width=(size_t) bounds.x;
border_info.height=(size_t) bounds.y;
shear_image=BorderImage(integral_image,&border_info,exception);
integral_image=DestroyImage(integral_image);
if (shear_image == (Image *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
/*
Shear the image.
*/
if (shear_image->matte == MagickFalse)
(void) SetImageAlphaChannel(shear_image,OpaqueAlphaChannel);
status=XShearImage(shear_image,shear.x,image->columns,image->rows,bounds.x,
(ssize_t) (shear_image->rows-image->rows)/2,exception);
if (status == MagickFalse)
{
shear_image=DestroyImage(shear_image);
return((Image *) NULL);
}
status=YShearImage(shear_image,shear.y,bounds.width,image->rows,(ssize_t)
(shear_image->columns-bounds.width)/2,bounds.y,exception);
if (status == MagickFalse)
{
shear_image=DestroyImage(shear_image);
return((Image *) NULL);
}
status=CropToFitImage(&shear_image,shear.x,shear.y,(MagickRealType)
image->columns,(MagickRealType) image->rows,MagickFalse,exception);
shear_image->matte=image->matte;
shear_image->compose=image->compose;
shear_image->page.width=0;
shear_image->page.height=0;
if (status == MagickFalse)
shear_image=DestroyImage(shear_image);
return(shear_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h e a r R o t a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShearRotateImage() creates a new image that is a rotated copy of an existing
% one. Positive angles rotate counter-clockwise (right-hand rule), while
% negative angles rotate clockwise. Rotated images are usually larger than
% the originals and have 'empty' triangular corners. X axis. Empty
% triangles left over from shearing the image are filled with the background
% color defined by member 'background_color' of the image. ShearRotateImage
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% ShearRotateImage() is based on the paper "A Fast Algorithm for General
% Raster Rotatation" by Alan W. Paeth. ShearRotateImage is adapted from a
% similar method based on the Paeth paper written by Michael Halle of the
% Spatial Imaging Group, MIT Media Lab.
%
% The format of the ShearRotateImage method is:
%
% Image *ShearRotateImage(const Image *image,const double degrees,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o degrees: Specifies the number of degrees to rotate the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ShearRotateImage(const Image *image,const double degrees,
ExceptionInfo *exception)
{
Image
*integral_image,
*rotate_image;
MagickBooleanType
status;
MagickRealType
angle;
PointInfo
shear;
RectangleInfo
border_info,
bounds;
size_t
height,
rotations,
shear_width,
width;
/*
Adjust rotation angle.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
angle=fmod(degrees,360.0);
if (angle < -45.0)
angle+=360.0;
for (rotations=0; angle > 45.0; rotations++)
angle-=90.0;
rotations%=4;
/*
Calculate shear equations.
*/
integral_image=IntegralRotateImage(image,rotations,exception);
if (integral_image == (Image *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
shear.x=(-tan((double) DegreesToRadians(angle)/2.0));
shear.y=sin((double) DegreesToRadians(angle));
if ((shear.x == 0.0) && (shear.y == 0.0))
return(integral_image);
if (SetImageStorageClass(integral_image,DirectClass) == MagickFalse)
{
InheritException(exception,&integral_image->exception);
integral_image=DestroyImage(integral_image);
return(integral_image);
}
if (integral_image->matte == MagickFalse)
(void) SetImageAlphaChannel(integral_image,OpaqueAlphaChannel);
/*
Compute maximum bounds for 3 shear operations.
*/
width=integral_image->columns;
height=integral_image->rows;
bounds.width=(size_t) floor(fabs((double) height*shear.x)+width+0.5);
bounds.height=(size_t) floor(fabs((double) bounds.width*shear.y)+height+0.5);
shear_width=(size_t) floor(fabs((double) bounds.height*shear.x)+
bounds.width+0.5);
bounds.x=(ssize_t) floor((double) ((shear_width > bounds.width) ? width :
bounds.width-shear_width+2)/2.0+0.5);
bounds.y=(ssize_t) floor(((double) bounds.height-height+2)/2.0+0.5);
/*
Surround image with a border.
*/
integral_image->border_color=integral_image->background_color;
integral_image->compose=CopyCompositeOp;
border_info.width=(size_t) bounds.x;
border_info.height=(size_t) bounds.y;
rotate_image=BorderImage(integral_image,&border_info,exception);
integral_image=DestroyImage(integral_image);
if (rotate_image == (Image *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
/*
Rotate the image.
*/
status=XShearImage(rotate_image,shear.x,width,height,bounds.x,(ssize_t)
(rotate_image->rows-height)/2,exception);
if (status == MagickFalse)
{
rotate_image=DestroyImage(rotate_image);
return((Image *) NULL);
}
status=YShearImage(rotate_image,shear.y,bounds.width,height,(ssize_t)
(rotate_image->columns-bounds.width)/2,bounds.y,exception);
if (status == MagickFalse)
{
rotate_image=DestroyImage(rotate_image);
return((Image *) NULL);
}
status=XShearImage(rotate_image,shear.x,bounds.width,bounds.height,(ssize_t)
(rotate_image->columns-bounds.width)/2,(ssize_t) (rotate_image->rows-
bounds.height)/2,exception);
if (status == MagickFalse)
{
rotate_image=DestroyImage(rotate_image);
return((Image *) NULL);
}
status=CropToFitImage(&rotate_image,shear.x,shear.y,(MagickRealType) width,
(MagickRealType) height,MagickTrue,exception);
rotate_image->matte=image->matte;
rotate_image->compose=image->compose;
rotate_image->page.width=0;
rotate_image->page.height=0;
if (status == MagickFalse)
rotate_image=DestroyImage(rotate_image);
return(rotate_image);
}
|
Sema.h | //===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the Sema class, which performs semantic analysis and
// builds ASTs.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_SEMA_SEMA_H
#define LLVM_CLANG_SEMA_SEMA_H
#include "clang/AST/Attr.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/MangleNumberingContext.h"
#include "clang/AST/NSAPI.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/TypeLoc.h"
#include "clang/Basic/ExpressionTraits.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TemplateKinds.h"
#include "clang/Basic/TypeTraits.h"
#include "clang/Sema/AnalysisBasedWarnings.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ExternalSemaSource.h"
#include "clang/Sema/IdentifierResolver.h"
#include "clang/Sema/LocInfoType.h"
#include "clang/Sema/ObjCMethodList.h"
#include "clang/Sema/Ownership.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/TypoCorrection.h"
#include "clang/Sema/Weak.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/TinyPtrVector.h"
#include <deque>
#include <memory>
#include <string>
#include <vector>
// HLSL Change Starts
#include "llvm/Support/OacrIgnoreCond.h" // HLSL Change - all sema use is heavily language-dependant
namespace hlsl {
struct UnusualAnnotation;
}
// HLSL Change Ends
namespace llvm {
class APSInt;
template <typename ValueT> struct DenseMapInfo;
template <typename ValueT, typename ValueInfoT> class DenseSet;
class SmallBitVector;
class InlineAsmIdentifierInfo;
}
namespace clang {
class ADLResult;
class ASTConsumer;
class ASTContext;
class ASTMutationListener;
class ASTReader;
class ASTWriter;
class ArrayType;
class AttributeList;
class BlockDecl;
class CapturedDecl;
class CXXBasePath;
class CXXBasePaths;
class CXXBindTemporaryExpr;
typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath;
class CXXConstructorDecl;
class CXXConversionDecl;
class CXXDeleteExpr;
class CXXDestructorDecl;
class CXXFieldCollector;
class CXXMemberCallExpr;
class CXXMethodDecl;
class CXXScopeSpec;
class CXXTemporary;
class CXXTryStmt;
class CallExpr;
class ClassTemplateDecl;
class ClassTemplatePartialSpecializationDecl;
class ClassTemplateSpecializationDecl;
class VarTemplatePartialSpecializationDecl;
class CodeCompleteConsumer;
class CodeCompletionAllocator;
class CodeCompletionTUInfo;
class CodeCompletionResult;
class Decl;
class DeclAccessPair;
class DeclContext;
class DeclRefExpr;
class DeclaratorDecl;
class DeducedTemplateArgument;
class DependentDiagnostic;
class DesignatedInitExpr;
class Designation;
class EnableIfAttr;
class EnumConstantDecl;
class Expr;
class ExtVectorType;
class ExternalSemaSource;
class FormatAttr;
class FriendDecl;
class FunctionDecl;
class FunctionProtoType;
class FunctionTemplateDecl;
class ImplicitConversionSequence;
class InitListExpr;
class InitializationKind;
class InitializationSequence;
class InitializedEntity;
class IntegerLiteral;
class LabelStmt;
class LambdaExpr;
class LangOptions;
class LocalInstantiationScope;
class LookupResult;
class MacroInfo;
typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath;
class ModuleLoader;
class MultiLevelTemplateArgumentList;
class NamedDecl;
class ObjCCategoryDecl;
class ObjCCategoryImplDecl;
class ObjCCompatibleAliasDecl;
class ObjCContainerDecl;
class ObjCImplDecl;
class ObjCImplementationDecl;
class ObjCInterfaceDecl;
class ObjCIvarDecl;
template <class T> class ObjCList;
class ObjCMessageExpr;
class ObjCMethodDecl;
class ObjCPropertyDecl;
class ObjCProtocolDecl;
class OMPThreadPrivateDecl;
class OMPClause;
class OverloadCandidateSet;
class OverloadExpr;
class ParenListExpr;
class ParmVarDecl;
class Preprocessor;
class PseudoDestructorTypeStorage;
class PseudoObjectExpr;
class QualType;
class StandardConversionSequence;
class Stmt;
class StringLiteral;
class SwitchStmt;
class TemplateArgument;
class TemplateArgumentList;
class TemplateArgumentLoc;
class TemplateDecl;
class TemplateParameterList;
class TemplatePartialOrderingContext;
class TemplateTemplateParmDecl;
class Token;
class TypeAliasDecl;
class TypedefDecl;
class TypedefNameDecl;
class TypeLoc;
class TypoCorrectionConsumer;
class UnqualifiedId;
class UnresolvedLookupExpr;
class UnresolvedMemberExpr;
class UnresolvedSetImpl;
class UnresolvedSetIterator;
class UsingDecl;
class UsingShadowDecl;
class ValueDecl;
class VarDecl;
class VarTemplateSpecializationDecl;
class VisibilityAttr;
class VisibleDeclConsumer;
class IndirectFieldDecl;
struct DeductionFailureInfo;
class TemplateSpecCandidateSet;
namespace sema {
class AccessedEntity;
class BlockScopeInfo;
class CapturedRegionScopeInfo;
class CapturingScopeInfo;
class CompoundScopeInfo;
class DelayedDiagnostic;
class DelayedDiagnosticPool;
class FunctionScopeInfo;
class LambdaScopeInfo;
class PossiblyUnreachableDiag;
class TemplateDeductionInfo;
}
namespace threadSafety {
class BeforeSet;
void threadSafetyCleanup(BeforeSet* Cache);
}
// FIXME: No way to easily map from TemplateTypeParmTypes to
// TemplateTypeParmDecls, so we have this horrible PointerUnion.
typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>,
SourceLocation> UnexpandedParameterPack;
/// Describes whether we've seen any nullability information for the given
/// file.
struct FileNullability {
/// The first pointer declarator (of any pointer kind) in the file that does
/// not have a corresponding nullability annotation.
SourceLocation PointerLoc;
/// Which kind of pointer declarator we saw.
uint8_t PointerKind;
/// Whether we saw any type nullability annotations in the given file.
bool SawTypeNullability = false;
};
/// A mapping from file IDs to a record of whether we've seen nullability
/// information in that file.
class FileNullabilityMap {
/// A mapping from file IDs to the nullability information for each file ID.
llvm::DenseMap<FileID, FileNullability> Map;
/// A single-element cache based on the file ID.
struct {
FileID File;
FileNullability Nullability;
} Cache;
public:
FileNullability &operator[](FileID file) {
// Check the single-element cache.
if (file == Cache.File)
return Cache.Nullability;
// It's not in the single-element cache; flush the cache if we have one.
if (!Cache.File.isInvalid()) {
Map[Cache.File] = Cache.Nullability;
}
// Pull this entry into the cache.
Cache.File = file;
Cache.Nullability = Map[file];
return Cache.Nullability;
}
};
/// Sema - This implements semantic analysis and AST building for C.
class Sema {
Sema(const Sema &) = delete;
void operator=(const Sema &) = delete;
///\brief Source of additional semantic information.
ExternalSemaSource *ExternalSource;
///\brief Whether Sema has generated a multiplexer and has to delete it.
bool isMultiplexExternalSource;
static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD);
bool isVisibleSlow(const NamedDecl *D);
bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old,
const NamedDecl *New) {
// We are about to link these. It is now safe to compute the linkage of
// the new decl. If the new decl has external linkage, we will
// link it with the hidden decl (which also has external linkage) and
// it will keep having external linkage. If it has internal linkage, we
// will not link it. Since it has no previous decls, it will remain
// with internal linkage.
if (getLangOpts().ModulesHideInternalLinkage)
return isVisible(Old) || New->isExternallyVisible();
return true;
}
public:
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef OpaquePtr<QualType> TypeTy;
OpenCLOptions OpenCLFeatures;
FPOptions FPFeatures;
const LangOptions &LangOpts;
Preprocessor &PP;
ASTContext &Context;
ASTConsumer &Consumer;
DiagnosticsEngine &Diags;
SourceManager &SourceMgr;
/// \brief Flag indicating whether or not to collect detailed statistics.
bool CollectStats;
/// \brief Code-completion consumer.
CodeCompleteConsumer *CodeCompleter;
/// CurContext - This is the current declaration context of parsing.
DeclContext *CurContext;
/// \brief Generally null except when we temporarily switch decl contexts,
/// like in \see ActOnObjCTemporaryExitContainerContext.
DeclContext *OriginalLexicalContext;
/// VAListTagName - The declaration name corresponding to __va_list_tag.
/// This is used as part of a hack to omit that class from ADL results.
DeclarationName VAListTagName;
/// PackContext - Manages the stack for \#pragma pack. An alignment
/// of 0 indicates default alignment.
void *PackContext; // Really a "PragmaPackStack*"
bool MSStructPragmaOn; // True when \#pragma ms_struct on
/// \brief Controls member pointer representation format under the MS ABI.
LangOptions::PragmaMSPointersToMembersKind
MSPointerToMemberRepresentationMethod;
// HLSL Change Begin
// The HLSL rewriter doesn't define a default matrix pack,
// so we must preserve the lack of annotations to avoid changing semantics.
bool HasDefaultMatrixPack = false;
// Uses of #pragma pack_matrix change the default pack.
bool DefaultMatrixPackRowMajor = false;
// HLSL Change End.
enum PragmaVtorDispKind {
PVDK_Push, ///< #pragma vtordisp(push, mode)
PVDK_Set, ///< #pragma vtordisp(mode)
PVDK_Pop, ///< #pragma vtordisp(pop)
PVDK_Reset ///< #pragma vtordisp()
};
enum PragmaMsStackAction {
PSK_Reset, // #pragma ()
PSK_Set, // #pragma ("name")
PSK_Push, // #pragma (push[, id])
PSK_Push_Set, // #pragma (push[, id], "name")
PSK_Pop, // #pragma (pop[, id])
PSK_Pop_Set, // #pragma (pop[, id], "name")
};
/// \brief Whether to insert vtordisps prior to virtual bases in the Microsoft
/// C++ ABI. Possible values are 0, 1, and 2, which mean:
///
/// 0: Suppress all vtordisps
/// 1: Insert vtordisps in the presence of vbase overrides and non-trivial
/// structors
/// 2: Always insert vtordisps to support RTTI on partially constructed
/// objects
///
/// The stack always has at least one element in it.
SmallVector<MSVtorDispAttr::Mode, 2> VtorDispModeStack;
/// Stack of active SEH __finally scopes. Can be empty.
SmallVector<Scope*, 2> CurrentSEHFinally;
/// \brief Source location for newly created implicit MSInheritanceAttrs
SourceLocation ImplicitMSInheritanceAttrLoc;
template<typename ValueType>
struct PragmaStack {
struct Slot {
llvm::StringRef StackSlotLabel;
ValueType Value;
SourceLocation PragmaLocation;
Slot(llvm::StringRef StackSlotLabel,
ValueType Value,
SourceLocation PragmaLocation)
: StackSlotLabel(StackSlotLabel), Value(Value),
PragmaLocation(PragmaLocation) {}
};
void Act(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
ValueType Value);
explicit PragmaStack(const ValueType &Value)
: CurrentValue(Value) {}
SmallVector<Slot, 2> Stack;
ValueType CurrentValue;
SourceLocation CurrentPragmaLocation;
};
// FIXME: We should serialize / deserialize these if they occur in a PCH (but
// we shouldn't do so if they're in a module).
PragmaStack<StringLiteral *> DataSegStack;
PragmaStack<StringLiteral *> BSSSegStack;
PragmaStack<StringLiteral *> ConstSegStack;
PragmaStack<StringLiteral *> CodeSegStack;
/// A mapping that describes the nullability we've seen in each header file.
FileNullabilityMap NullabilityMap;
/// Last section used with #pragma init_seg.
StringLiteral *CurInitSeg;
SourceLocation CurInitSegLoc;
/// VisContext - Manages the stack for \#pragma GCC visibility.
void *VisContext; // Really a "PragmaVisStack*"
/// \brief This represents the last location of a "#pragma clang optimize off"
/// directive if such a directive has not been closed by an "on" yet. If
/// optimizations are currently "on", this is set to an invalid location.
SourceLocation OptimizeOffPragmaLocation;
/// \brief Flag indicating if Sema is building a recovery call expression.
///
/// This flag is used to avoid building recovery call expressions
/// if Sema is already doing so, which would cause infinite recursions.
bool IsBuildingRecoveryCallExpr;
/// ExprNeedsCleanups - True if the current evaluation context
/// requires cleanups to be run at its conclusion.
bool ExprNeedsCleanups;
/// ExprCleanupObjects - This is the stack of objects requiring
/// cleanup that are created by the current full expression. The
/// element type here is ExprWithCleanups::Object.
SmallVector<BlockDecl*, 8> ExprCleanupObjects;
/// \brief Store a list of either DeclRefExprs or MemberExprs
/// that contain a reference to a variable (constant) that may or may not
/// be odr-used in this Expr, and we won't know until all lvalue-to-rvalue
/// and discarded value conversions have been applied to all subexpressions
/// of the enclosing full expression. This is cleared at the end of each
/// full expression.
llvm::SmallPtrSet<Expr*, 2> MaybeODRUseExprs;
/// \brief Stack containing information about each of the nested
/// function, block, and method scopes that are currently active.
///
/// This array is never empty. Clients should ignore the first
/// element, which is used to cache a single FunctionScopeInfo
/// that's used to parse every top-level function.
SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes;
typedef LazyVector<TypedefNameDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadExtVectorDecls, 2, 2>
ExtVectorDeclsType;
/// ExtVectorDecls - This is a list all the extended vector types. This allows
/// us to associate a raw vector type with one of the ext_vector type names.
/// This is only necessary for issuing pretty diagnostics.
ExtVectorDeclsType ExtVectorDecls;
/// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes.
std::unique_ptr<CXXFieldCollector> FieldCollector;
typedef llvm::SmallSetVector<const NamedDecl*, 16> NamedDeclSetType;
/// \brief Set containing all declared private fields that are not used.
NamedDeclSetType UnusedPrivateFields;
/// \brief Set containing all typedefs that are likely unused.
llvm::SmallSetVector<const TypedefNameDecl *, 4>
UnusedLocalTypedefNameCandidates;
/// \brief Delete-expressions to be analyzed at the end of translation unit
///
/// This list contains class members, and locations of delete-expressions
/// that could not be proven as to whether they mismatch with new-expression
/// used in initializer of the field.
typedef std::pair<SourceLocation, bool> DeleteExprLoc;
typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs;
llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs;
typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy;
/// PureVirtualClassDiagSet - a set of class declarations which we have
/// emitted a list of pure virtual functions. Used to prevent emitting the
/// same list more than once.
std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet;
/// ParsingInitForAutoVars - a set of declarations with auto types for which
/// we are currently parsing the initializer.
llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars;
/// \brief Look for a locally scoped extern "C" declaration by the given name.
NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name);
typedef LazyVector<VarDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadTentativeDefinitions, 2, 2>
TentativeDefinitionsType;
/// \brief All the tentative definitions encountered in the TU.
TentativeDefinitionsType TentativeDefinitions;
typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2>
UnusedFileScopedDeclsType;
/// \brief The set of file scoped decls seen so far that have not been used
/// and must warn if not used. Only contains the first declaration.
UnusedFileScopedDeclsType UnusedFileScopedDecls;
typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadDelegatingConstructors, 2, 2>
DelegatingCtorDeclsType;
/// \brief All the delegating constructors seen so far in the file, used for
/// cycle detection at the end of the TU.
DelegatingCtorDeclsType DelegatingCtorDecls;
/// \brief All the overriding functions seen during a class definition
/// that had their exception spec checks delayed, plus the overridden
/// function.
SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2>
DelayedExceptionSpecChecks;
/// \brief All the members seen during a class definition which were both
/// explicitly defaulted and had explicitly-specified exception
/// specifications, along with the function type containing their
/// user-specified exception specification. Those exception specifications
/// were overridden with the default specifications, but we still need to
/// check whether they are compatible with the default specification, and
/// we can't do that until the nesting set of class definitions is complete.
SmallVector<std::pair<CXXMethodDecl*, const FunctionProtoType*>, 2>
DelayedDefaultedMemberExceptionSpecs;
typedef llvm::MapVector<const FunctionDecl *, LateParsedTemplate *>
LateParsedTemplateMapT;
LateParsedTemplateMapT LateParsedTemplateMap;
/// \brief Callback to the parser to parse templated functions when needed.
typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT);
typedef void LateTemplateParserCleanupCB(void *P);
LateTemplateParserCB *LateTemplateParser;
LateTemplateParserCleanupCB *LateTemplateParserCleanup;
void *OpaqueParser;
void SetLateTemplateParser(LateTemplateParserCB *LTP,
LateTemplateParserCleanupCB *LTPCleanup,
void *P) {
LateTemplateParser = LTP;
LateTemplateParserCleanup = LTPCleanup;
OpaqueParser = P;
}
class DelayedDiagnostics;
class DelayedDiagnosticsState {
sema::DelayedDiagnosticPool *SavedPool;
friend class Sema::DelayedDiagnostics;
};
typedef DelayedDiagnosticsState ParsingDeclState;
typedef DelayedDiagnosticsState ProcessingContextState;
/// A class which encapsulates the logic for delaying diagnostics
/// during parsing and other processing.
class DelayedDiagnostics {
/// \brief The current pool of diagnostics into which delayed
/// diagnostics should go.
sema::DelayedDiagnosticPool *CurPool;
public:
DelayedDiagnostics() : CurPool(nullptr) {}
/// Adds a delayed diagnostic.
void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h
/// Determines whether diagnostics should be delayed.
bool shouldDelayDiagnostics() { return CurPool != nullptr; }
/// Returns the current delayed-diagnostics pool.
sema::DelayedDiagnosticPool *getCurrentPool() const {
return CurPool;
}
/// Enter a new scope. Access and deprecation diagnostics will be
/// collected in this pool.
DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = &pool;
return state;
}
/// Leave a delayed-diagnostic state that was previously pushed.
/// Do not emit any of the diagnostics. This is performed as part
/// of the bookkeeping of popping a pool "properly".
void popWithoutEmitting(DelayedDiagnosticsState state) {
CurPool = state.SavedPool;
}
/// Enter a new scope where access and deprecation diagnostics are
/// not delayed.
DelayedDiagnosticsState pushUndelayed() {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = nullptr;
return state;
}
/// Undo a previous pushUndelayed().
void popUndelayed(DelayedDiagnosticsState state) {
assert(CurPool == nullptr);
CurPool = state.SavedPool;
}
} DelayedDiagnostics;
/// A RAII object to temporarily push a declaration context.
class ContextRAII {
private:
Sema &S;
DeclContext *SavedContext;
ProcessingContextState SavedContextState;
QualType SavedCXXThisTypeOverride;
public:
ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true)
: S(S), SavedContext(S.CurContext),
SavedContextState(S.DelayedDiagnostics.pushUndelayed()),
SavedCXXThisTypeOverride(S.CXXThisTypeOverride)
{
assert(ContextToPush && "pushing null context");
S.CurContext = ContextToPush;
if (NewThisContext)
S.CXXThisTypeOverride = QualType();
}
void pop() {
if (!SavedContext) return;
S.CurContext = SavedContext;
S.DelayedDiagnostics.popUndelayed(SavedContextState);
S.CXXThisTypeOverride = SavedCXXThisTypeOverride;
SavedContext = nullptr;
}
~ContextRAII() {
pop();
}
};
/// \brief RAII object to handle the state changes required to synthesize
/// a function body.
class SynthesizedFunctionScope {
Sema &S;
Sema::ContextRAII SavedContext;
public:
SynthesizedFunctionScope(Sema &S, DeclContext *DC)
: S(S), SavedContext(S, DC)
{
S.PushFunctionScope();
S.PushExpressionEvaluationContext(Sema::PotentiallyEvaluated);
}
~SynthesizedFunctionScope() {
S.PopExpressionEvaluationContext();
S.PopFunctionScopeInfo();
}
};
/// WeakUndeclaredIdentifiers - Identifiers contained in
/// \#pragma weak before declared. rare. may alias another
/// identifier, declared or undeclared
llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers;
/// ExtnameUndeclaredIdentifiers - Identifiers contained in
/// \#pragma redefine_extname before declared. Used in Solaris system headers
/// to define functions that occur in multiple standards to call the version
/// in the currently selected standard.
llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers;
/// \brief Load weak undeclared identifiers from the external source.
void LoadExternalWeakUndeclaredIdentifiers();
/// WeakTopLevelDecl - Translation-unit scoped declarations generated by
/// \#pragma weak during processing of other Decls.
/// I couldn't figure out a clean way to generate these in-line, so
/// we store them here and handle separately -- which is a hack.
/// It would be best to refactor this.
SmallVector<Decl*,2> WeakTopLevelDecl;
IdentifierResolver IdResolver;
/// Translation Unit Scope - useful to Objective-C actions that need
/// to lookup file scope declarations in the "ordinary" C decl namespace.
/// For example, user-defined classes, built-in "id" type, etc.
Scope *TUScope;
/// \brief The C++ "std" namespace, where the standard library resides.
LazyDeclPtr StdNamespace;
/// \brief The C++ "std::bad_alloc" class, which is defined by the C++
/// standard library.
LazyDeclPtr StdBadAlloc;
/// \brief The C++ "std::initializer_list" template, which is defined in
/// \<initializer_list>.
ClassTemplateDecl *StdInitializerList;
/// \brief The C++ "type_info" declaration, which is defined in \<typeinfo>.
RecordDecl *CXXTypeInfoDecl;
/// \brief The MSVC "_GUID" struct, which is defined in MSVC header files.
RecordDecl *MSVCGuidDecl;
/// \brief Caches identifiers/selectors for NSFoundation APIs.
// std::unique_ptr<NSAPI> NSAPIObj; // HLSL Change
/// \brief The declaration of the Objective-C NSNumber class.
ObjCInterfaceDecl *NSNumberDecl;
/// \brief The declaration of the Objective-C NSValue class.
ObjCInterfaceDecl *NSValueDecl;
/// \brief Pointer to NSNumber type (NSNumber *).
QualType NSNumberPointer;
/// \brief Pointer to NSValue type (NSValue *).
QualType NSValuePointer;
/// \brief The Objective-C NSNumber methods used to create NSNumber literals.
ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods];
/// \brief The declaration of the Objective-C NSString class.
ObjCInterfaceDecl *NSStringDecl;
/// \brief Pointer to NSString type (NSString *).
QualType NSStringPointer;
/// \brief The declaration of the stringWithUTF8String: method.
ObjCMethodDecl *StringWithUTF8StringMethod;
/// \brief The declaration of the valueWithBytes:objCType: method.
ObjCMethodDecl *ValueWithBytesObjCTypeMethod;
/// \brief The declaration of the Objective-C NSArray class.
ObjCInterfaceDecl *NSArrayDecl;
/// \brief The declaration of the arrayWithObjects:count: method.
ObjCMethodDecl *ArrayWithObjectsMethod;
/// \brief The declaration of the Objective-C NSDictionary class.
ObjCInterfaceDecl *NSDictionaryDecl;
/// \brief The declaration of the dictionaryWithObjects:forKeys:count: method.
ObjCMethodDecl *DictionaryWithObjectsMethod;
/// \brief id<NSCopying> type.
QualType QIDNSCopying;
/// \brief will hold 'respondsToSelector:'
Selector RespondsToSelectorSel;
/// \brief counter for internal MS Asm label names.
unsigned MSAsmLabelNameCounter;
/// A flag to remember whether the implicit forms of operator new and delete
/// have been declared.
bool GlobalNewDeleteDeclared;
/// A flag to indicate that we're in a context that permits abstract
/// references to fields. This is really a
bool AllowAbstractFieldReference;
/// \brief Describes how the expressions currently being parsed are
/// evaluated at run-time, if at all.
enum ExpressionEvaluationContext {
/// \brief The current expression and its subexpressions occur within an
/// unevaluated operand (C++11 [expr]p7), such as the subexpression of
/// \c sizeof, where the type of the expression may be significant but
/// no code will be generated to evaluate the value of the expression at
/// run time.
Unevaluated,
/// \brief The current expression occurs within an unevaluated
/// operand that unconditionally permits abstract references to
/// fields, such as a SIZE operator in MS-style inline assembly.
UnevaluatedAbstract,
/// \brief The current context is "potentially evaluated" in C++11 terms,
/// but the expression is evaluated at compile-time (like the values of
/// cases in a switch statement).
ConstantEvaluated,
/// \brief The current expression is potentially evaluated at run time,
/// which means that code may be generated to evaluate the value of the
/// expression at run time.
PotentiallyEvaluated,
/// \brief The current expression is potentially evaluated, but any
/// declarations referenced inside that expression are only used if
/// in fact the current expression is used.
///
/// This value is used when parsing default function arguments, for which
/// we would like to provide diagnostics (e.g., passing non-POD arguments
/// through varargs) but do not want to mark declarations as "referenced"
/// until the default argument is used.
PotentiallyEvaluatedIfUsed
};
/// \brief Data structure used to record current or nested
/// expression evaluation contexts.
struct ExpressionEvaluationContextRecord {
/// \brief The expression evaluation context.
ExpressionEvaluationContext Context;
/// \brief Whether the enclosing context needed a cleanup.
bool ParentNeedsCleanups;
/// \brief Whether we are in a decltype expression.
bool IsDecltype;
/// \brief The number of active cleanup objects when we entered
/// this expression evaluation context.
unsigned NumCleanupObjects;
/// \brief The number of typos encountered during this expression evaluation
/// context (i.e. the number of TypoExprs created).
unsigned NumTypos;
llvm::SmallPtrSet<Expr*, 2> SavedMaybeODRUseExprs;
/// \brief The lambdas that are present within this context, if it
/// is indeed an unevaluated context.
SmallVector<LambdaExpr *, 2> Lambdas;
/// \brief The declaration that provides context for lambda expressions
/// and block literals if the normal declaration context does not
/// suffice, e.g., in a default function argument.
Decl *ManglingContextDecl;
/// \brief The context information used to mangle lambda expressions
/// and block literals within this context.
///
/// This mangling information is allocated lazily, since most contexts
/// do not have lambda expressions or block literals.
IntrusiveRefCntPtr<MangleNumberingContext> MangleNumbering;
/// \brief If we are processing a decltype type, a set of call expressions
/// for which we have deferred checking the completeness of the return type.
SmallVector<CallExpr *, 8> DelayedDecltypeCalls;
/// \brief If we are processing a decltype type, a set of temporary binding
/// expressions for which we have deferred checking the destructor.
SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds;
ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context,
unsigned NumCleanupObjects,
bool ParentNeedsCleanups,
Decl *ManglingContextDecl,
bool IsDecltype)
: Context(Context), ParentNeedsCleanups(ParentNeedsCleanups),
IsDecltype(IsDecltype), NumCleanupObjects(NumCleanupObjects),
NumTypos(0),
ManglingContextDecl(ManglingContextDecl), MangleNumbering() { }
/// \brief Retrieve the mangling numbering context, used to consistently
/// number constructs like lambdas for mangling.
MangleNumberingContext &getMangleNumberingContext(ASTContext &Ctx);
bool isUnevaluated() const {
return Context == Unevaluated || Context == UnevaluatedAbstract;
}
};
/// A stack of expression evaluation contexts.
SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts;
/// \brief Compute the mangling number context for a lambda expression or
/// block literal.
///
/// \param DC - The DeclContext containing the lambda expression or
/// block literal.
/// \param[out] ManglingContextDecl - Returns the ManglingContextDecl
/// associated with the context, if relevant.
MangleNumberingContext *getCurrentMangleNumberContext(
const DeclContext *DC,
Decl *&ManglingContextDecl);
/// SpecialMemberOverloadResult - The overloading result for a special member
/// function.
///
/// This is basically a wrapper around PointerIntPair. The lowest bits of the
/// integer are used to determine whether overload resolution succeeded.
class SpecialMemberOverloadResult : public llvm::FastFoldingSetNode {
public:
enum Kind {
NoMemberOrDeleted,
Ambiguous,
Success
};
private:
llvm::PointerIntPair<CXXMethodDecl*, 2> Pair;
public:
SpecialMemberOverloadResult(const llvm::FoldingSetNodeID &ID)
: FastFoldingSetNode(ID)
{}
CXXMethodDecl *getMethod() const { return Pair.getPointer(); }
void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); }
Kind getKind() const { return static_cast<Kind>(Pair.getInt()); }
void setKind(Kind K) { Pair.setInt(K); }
};
/// \brief A cache of special member function overload resolution results
/// for C++ records.
llvm::FoldingSet<SpecialMemberOverloadResult> SpecialMemberCache;
/// \brief The kind of translation unit we are processing.
///
/// When we're processing a complete translation unit, Sema will perform
/// end-of-translation-unit semantic tasks (such as creating
/// initializers for tentative definitions in C) once parsing has
/// completed. Modules and precompiled headers perform different kinds of
/// checks.
TranslationUnitKind TUKind;
llvm::BumpPtrAllocator BumpAlloc;
/// \brief The number of SFINAE diagnostics that have been trapped.
unsigned NumSFINAEErrors;
typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>>
UnparsedDefaultArgInstantiationsMap;
/// \brief A mapping from parameters with unparsed default arguments to the
/// set of instantiations of each parameter.
///
/// This mapping is a temporary data structure used when parsing
/// nested class templates or nested classes of class templates,
/// where we might end up instantiating an inner class before the
/// default arguments of its methods have been parsed.
UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations;
// Contains the locations of the beginning of unparsed default
// argument locations.
llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs;
/// UndefinedInternals - all the used, undefined objects which require a
/// definition in this translation unit.
llvm::DenseMap<NamedDecl *, SourceLocation> UndefinedButUsed;
/// Obtain a sorted list of functions that are undefined but ODR-used.
void getUndefinedButUsed(
SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined);
/// Retrieves list of suspicious delete-expressions that will be checked at
/// the end of translation unit.
const llvm::MapVector<FieldDecl *, DeleteLocs> &
getMismatchingDeleteExpressions() const;
typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods;
typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool;
/// Method Pool - allows efficient lookup when typechecking messages to "id".
/// We need to maintain a list, since selectors can have differing signatures
/// across classes. In Cocoa, this happens to be extremely uncommon (only 1%
/// of selectors are "overloaded").
/// At the head of the list it is recorded whether there were 0, 1, or >= 2
/// methods inside categories with a particular selector.
GlobalMethodPool MethodPool;
/// Method selectors used in a \@selector expression. Used for implementation
/// of -Wselector.
llvm::MapVector<Selector, SourceLocation> ReferencedSelectors;
/// Kinds of C++ special members.
enum CXXSpecialMember {
CXXDefaultConstructor,
CXXCopyConstructor,
CXXMoveConstructor,
CXXCopyAssignment,
CXXMoveAssignment,
CXXDestructor,
CXXInvalid
};
typedef std::pair<CXXRecordDecl*, CXXSpecialMember> SpecialMemberDecl;
/// The C++ special members which we are currently in the process of
/// declaring. If this process recursively triggers the declaration of the
/// same special member, we should act as if it is not yet declared.
llvm::SmallSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared;
void ReadMethodPool(Selector Sel);
/// Private Helper predicate to check for 'self'.
bool isSelfExpr(Expr *RExpr);
bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method);
/// \brief Cause the active diagnostic on the DiagosticsEngine to be
/// emitted. This is closely coupled to the SemaDiagnosticBuilder class and
/// should not be used elsewhere.
void EmitCurrentDiagnostic(unsigned DiagID);
/// Records and restores the FP_CONTRACT state on entry/exit of compound
/// statements.
class FPContractStateRAII {
public:
FPContractStateRAII(Sema& S)
: S(S), OldFPContractState(S.FPFeatures.fp_contract) {}
~FPContractStateRAII() {
S.FPFeatures.fp_contract = OldFPContractState;
}
private:
Sema& S;
bool OldFPContractState : 1;
};
void addImplicitTypedef(StringRef Name, QualType T);
public:
Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
TranslationUnitKind TUKind = TU_Complete,
CodeCompleteConsumer *CompletionConsumer = nullptr);
~Sema();
/// \brief Perform initialization that occurs after the parser has been
/// initialized but before it parses anything.
void Initialize();
const LangOptions &getLangOpts() const { return LangOpts; }
OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; }
FPOptions &getFPOptions() { return FPFeatures; }
DiagnosticsEngine &getDiagnostics() const { return Diags; }
SourceManager &getSourceManager() const { return SourceMgr; }
Preprocessor &getPreprocessor() const { return PP; }
ASTContext &getASTContext() const { return Context; }
ASTConsumer &getASTConsumer() const { return Consumer; }
ASTMutationListener *getASTMutationListener() const;
ExternalSemaSource* getExternalSource() const { return ExternalSource; }
///\brief Registers an external source. If an external source already exists,
/// creates a multiplex external source and appends to it.
///
///\param[in] E - A non-null external sema source.
///
void addExternalSource(ExternalSemaSource *E);
void PrintStats() const;
/// \brief Helper class that creates diagnostics with optional
/// template instantiation stacks.
///
/// This class provides a wrapper around the basic DiagnosticBuilder
/// class that emits diagnostics. SemaDiagnosticBuilder is
/// responsible for emitting the diagnostic (as DiagnosticBuilder
/// does) and, if the diagnostic comes from inside a template
/// instantiation, printing the template instantiation stack as
/// well.
class SemaDiagnosticBuilder : public DiagnosticBuilder {
Sema &SemaRef;
unsigned DiagID;
public:
SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID)
: DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { }
~SemaDiagnosticBuilder() {
// If we aren't active, there is nothing to do.
if (!isActive()) return;
// Otherwise, we need to emit the diagnostic. First flush the underlying
// DiagnosticBuilder data, and clear the diagnostic builder itself so it
// won't emit the diagnostic in its own destructor.
//
// This seems wasteful, in that as written the DiagnosticBuilder dtor will
// do its own needless checks to see if the diagnostic needs to be
// emitted. However, because we take care to ensure that the builder
// objects never escape, a sufficiently smart compiler will be able to
// eliminate that code.
FlushCounts();
Clear();
// Dispatch to Sema to emit the diagnostic.
SemaRef.EmitCurrentDiagnostic(DiagID);
}
/// Teach operator<< to produce an object of the correct type.
template<typename T>
friend const SemaDiagnosticBuilder &operator<<(
const SemaDiagnosticBuilder &Diag, const T &Value) {
const DiagnosticBuilder &BaseDiag = Diag;
BaseDiag << Value;
return Diag;
}
};
/// \brief Emit a diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) {
DiagnosticBuilder DB = Diags.Report(Loc, DiagID);
return SemaDiagnosticBuilder(DB, *this, DiagID);
}
/// \brief Emit a partial diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD);
/// \brief Build a partial diagnostic.
PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h
bool findMacroSpelling(SourceLocation &loc, StringRef name);
/// \brief Get a string to suggest for zero-initialization of a type.
std::string
getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const;
std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const;
/// \brief Calls \c Lexer::getLocForEndOfToken()
SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0);
/// \brief Retrieve the module loader associated with the preprocessor.
ModuleLoader &getModuleLoader() const;
void emitAndClearUnusedLocalTypedefWarnings();
void ActOnEndOfTranslationUnit();
void CheckDelegatingCtorCycles();
Scope *getScopeForContext(DeclContext *Ctx);
void PushFunctionScope();
void PushBlockScope(Scope *BlockScope, BlockDecl *Block);
sema::LambdaScopeInfo *PushLambdaScope();
/// \brief This is used to inform Sema what the current TemplateParameterDepth
/// is during Parsing. Currently it is used to pass on the depth
/// when parsing generic lambda 'auto' parameters.
void RecordParsingTemplateParameterDepth(unsigned Depth);
void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD,
RecordDecl *RD,
CapturedRegionKind K);
void
PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr,
const Decl *D = nullptr,
const BlockExpr *blkExpr = nullptr);
sema::FunctionScopeInfo *getCurFunction() const {
return FunctionScopes.back();
}
sema::FunctionScopeInfo *getEnclosingFunction() const {
if (FunctionScopes.empty())
return nullptr;
for (int e = FunctionScopes.size()-1; e >= 0; --e) {
if (isa<sema::BlockScopeInfo>(FunctionScopes[e]))
continue;
return FunctionScopes[e];
}
return nullptr;
}
template <typename ExprT>
void recordUseOfEvaluatedWeak(const ExprT *E, bool IsRead=true) {
if (!isUnevaluatedContext())
getCurFunction()->recordUseOfWeak(E, IsRead);
}
void PushCompoundScope();
void PopCompoundScope();
sema::CompoundScopeInfo &getCurCompoundScope() const;
bool hasAnyUnrecoverableErrorsInThisFunction() const;
/// \brief Retrieve the current block, if any.
sema::BlockScopeInfo *getCurBlock();
/// \brief Retrieve the current lambda scope info, if any.
sema::LambdaScopeInfo *getCurLambda();
/// \brief Retrieve the current generic lambda info, if any.
sema::LambdaScopeInfo *getCurGenericLambda();
/// \brief Retrieve the current captured region, if any.
sema::CapturedRegionScopeInfo *getCurCapturedRegion();
/// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls
SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; }
void ActOnComment(SourceRange Comment);
//===--------------------------------------------------------------------===//
// Type Analysis / Processing: SemaType.cpp.
//
QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs,
const DeclSpec *DS = nullptr);
QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA,
const DeclSpec *DS = nullptr);
QualType BuildPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildReferenceType(QualType T, bool LValueRef,
SourceLocation Loc, DeclarationName Entity);
QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
Expr *ArraySize, unsigned Quals,
SourceRange Brackets, DeclarationName Entity);
QualType BuildExtVectorType(QualType T, Expr *ArraySize,
SourceLocation AttrLoc);
bool CheckFunctionReturnType(QualType T, SourceLocation Loc);
unsigned deduceWeakPropertyFromType(QualType T) {
if ((getLangOpts().getGC() != LangOptions::NonGC &&
T.isObjCGCWeak()) ||
(getLangOpts().ObjCAutoRefCount &&
T.getObjCLifetime() == Qualifiers::OCL_Weak))
return ObjCDeclSpec::DQ_PR_weak;
return 0;
}
/// \brief Build a function type.
///
/// This routine checks the function type according to C++ rules and
/// under the assumption that the result type and parameter types have
/// just been instantiated from a template. It therefore duplicates
/// some of the behavior of GetTypeForDeclarator, but in a much
/// simpler form that is only suitable for this narrow use case.
///
/// \param T The return type of the function.
///
/// \param ParamTypes The parameter types of the function. This array
/// will be modified to account for adjustments to the types of the
/// function parameters.
///
/// \param Loc The location of the entity whose type involves this
/// function type or, if there is no such entity, the location of the
/// type that will have function type.
///
/// \param Entity The name of the entity that involves the function
/// type, if known.
///
/// \param EPI Extra information about the function type. Usually this will
/// be taken from an existing function with the same prototype.
///
/// \returns A suitable function type, if there are no errors. The
/// unqualified type will always be a FunctionProtoType.
/// Otherwise, returns a NULL type.
QualType BuildFunctionType(QualType T,
MutableArrayRef<QualType> ParamTypes,
SourceLocation Loc, DeclarationName Entity,
const FunctionProtoType::ExtProtoInfo &EPI);
QualType BuildMemberPointerType(QualType T, QualType Class,
SourceLocation Loc,
DeclarationName Entity);
QualType BuildBlockPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildParenType(QualType T);
QualType BuildAtomicType(QualType T, SourceLocation Loc);
TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S);
TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy);
TypeSourceInfo *GetTypeSourceInfoForDeclarator(Declarator &D, QualType T,
TypeSourceInfo *ReturnTypeInfo);
/// \brief Package the given type and TSI into a ParsedType.
ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo);
DeclarationNameInfo GetNameForDeclarator(Declarator &D);
DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name);
static QualType GetTypeFromParser(ParsedType Ty,
TypeSourceInfo **TInfo = nullptr);
CanThrowResult canThrow(const Expr *E);
const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc,
const FunctionProtoType *FPT);
void UpdateExceptionSpec(FunctionDecl *FD,
const FunctionProtoType::ExceptionSpecInfo &ESI);
bool CheckSpecifiedExceptionType(QualType &T, const SourceRange &Range);
bool CheckDistantExceptionSpec(QualType T);
bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New);
bool CheckEquivalentExceptionSpec(
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool CheckEquivalentExceptionSpec(
const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc,
bool *MissingExceptionSpecification = nullptr,
bool *MissingEmptyExceptionSpecification = nullptr,
bool AllowNoexceptAllMatchWithNoSpec = false,
bool IsOperatorNew = false);
bool CheckExceptionSpecSubset(
const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
const FunctionProtoType *Superset, SourceLocation SuperLoc,
const FunctionProtoType *Subset, SourceLocation SubLoc);
bool CheckParamExceptionSpec(const PartialDiagnostic & NoteID,
const FunctionProtoType *Target, SourceLocation TargetLoc,
const FunctionProtoType *Source, SourceLocation SourceLoc);
TypeResult ActOnTypeName(Scope *S, Declarator &D);
/// \brief The parser has parsed the context-sensitive type 'instancetype'
/// in an Objective-C message declaration. Return the appropriate type.
ParsedType ActOnObjCInstanceType(SourceLocation Loc);
/// \brief Abstract class used to diagnose incomplete types.
struct TypeDiagnoser {
bool Suppressed;
TypeDiagnoser(bool Suppressed = false) : Suppressed(Suppressed) { }
virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0;
virtual ~TypeDiagnoser() {}
};
static int getPrintable(int I) { return I; }
static unsigned getPrintable(unsigned I) { return I; }
static bool getPrintable(bool B) { return B; }
static const char * getPrintable(const char *S) { return S; }
static StringRef getPrintable(StringRef S) { return S; }
static const std::string &getPrintable(const std::string &S) { return S; }
static const IdentifierInfo *getPrintable(const IdentifierInfo *II) {
return II;
}
static DeclarationName getPrintable(DeclarationName N) { return N; }
static QualType getPrintable(QualType T) { return T; }
static SourceRange getPrintable(SourceRange R) { return R; }
static SourceRange getPrintable(SourceLocation L) { return L; }
static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); }
static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();}
template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser {
unsigned DiagID;
std::tuple<const Ts &...> Args;
template <std::size_t... Is>
void emit(const SemaDiagnosticBuilder &DB,
llvm::index_sequence<Is...>) const {
// Apply all tuple elements to the builder in order.
bool Dummy[] = {(DB << getPrintable(std::get<Is>(Args)))...};
(void)Dummy;
}
public:
BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args)
: TypeDiagnoser(DiagID == 0), DiagID(DiagID), Args(Args...) {}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
if (Suppressed)
return;
const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID);
emit(DB, llvm::index_sequence_for<Ts...>());
DB << T;
}
};
private:
bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
VisibleModuleSet VisibleModules;
llvm::SmallVector<VisibleModuleSet, 16> VisibleModulesStack;
Module *CachedFakeTopLevelModule;
public:
/// \brief Get the module owning an entity.
Module *getOwningModule(Decl *Entity);
/// \brief Make a merged definition of an existing hidden definition \p ND
/// visible at the specified location.
void makeMergedDefinitionVisible(NamedDecl *ND, SourceLocation Loc);
bool isModuleVisible(Module *M) { return VisibleModules.isVisible(M); }
/// Determine whether a declaration is visible to name lookup.
bool isVisible(const NamedDecl *D) {
return !D->isHidden() || isVisibleSlow(D);
}
bool hasVisibleMergedDefinition(NamedDecl *Def);
/// Determine if \p D has a visible definition. If not, suggest a declaration
/// that should be made visible to expose the definition.
bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested,
bool OnlyNeedComplete = false);
bool hasVisibleDefinition(const NamedDecl *D) {
NamedDecl *Hidden;
return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden);
}
/// Determine if the template parameter \p D has a visible default argument.
bool
hasVisibleDefaultArgument(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr);
bool RequireCompleteType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireCompleteType(SourceLocation Loc, QualType T,
unsigned DiagID);
template <typename... Ts>
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, Diagnoser);
}
bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser);
bool RequireCompleteExprType(Expr *E, unsigned DiagID);
template <typename... Ts>
bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, Diagnoser);
}
bool RequireLiteralType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID);
template <typename... Ts>
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireLiteralType(Loc, T, Diagnoser);
}
QualType getElaboratedType(ElaboratedTypeKeyword Keyword,
const CXXScopeSpec &SS, QualType T);
QualType BuildTypeofExprType(Expr *E, SourceLocation Loc);
/// If AsUnevaluated is false, E is treated as though it were an evaluated
/// context, such as when building a type for decltype(auto).
QualType BuildDecltypeType(Expr *E, SourceLocation Loc,
bool AsUnevaluated = true);
QualType BuildUnaryTransformType(QualType BaseType,
UnaryTransformType::UTTKind UKind,
SourceLocation Loc);
//===--------------------------------------------------------------------===//
// Symbol table / Decl tracking callbacks: SemaDecl.cpp.
//
/// List of decls defined in a function prototype. This contains EnumConstants
/// that incorrectly end up in translation unit scope because there is no
/// function to pin them on. ActOnFunctionDeclarator reads this list and patches
/// them into the FunctionDecl.
std::vector<NamedDecl*> DeclsInPrototypeScope;
DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr);
void DiagnoseUseOfUnimplementedSelectors();
bool isSimpleTypeSpecifier(tok::TokenKind Kind) const;
ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec *SS = nullptr,
bool isClassName = false,
bool HasTrailingDot = false,
ParsedType ObjectType = ParsedType(),
bool IsCtorOrDtorName = false,
bool WantNontrivialTypeSourceInfo = false,
IdentifierInfo **CorrectedII = nullptr);
TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S);
bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S);
void DiagnoseUnknownTypeName(IdentifierInfo *&II,
SourceLocation IILoc,
Scope *S,
CXXScopeSpec *SS,
ParsedType &SuggestedType,
bool AllowClassTemplates = false);
/// \brief For compatibility with MSVC, we delay parsing of some default
/// template type arguments until instantiation time. Emits a warning and
/// returns a synthesized DependentNameType that isn't really dependent on any
/// other template arguments.
ParsedType ActOnDelayedDefaultTemplateArg(const IdentifierInfo &II,
SourceLocation NameLoc);
/// \brief Describes the result of the name lookup and resolution performed
/// by \c ClassifyName().
enum NameClassificationKind {
NC_Unknown,
NC_Error,
NC_Keyword,
NC_Type,
NC_Expression,
NC_NestedNameSpecifier,
NC_TypeTemplate,
NC_VarTemplate,
NC_FunctionTemplate
};
class NameClassification {
NameClassificationKind Kind;
ExprResult Expr;
TemplateName Template;
ParsedType Type;
const IdentifierInfo *Keyword;
explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {}
public:
NameClassification(ExprResult Expr) : Kind(NC_Expression), Expr(Expr) {}
NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {}
NameClassification(const IdentifierInfo *Keyword)
: Kind(NC_Keyword), Keyword(Keyword) { }
static NameClassification Error() {
return NameClassification(NC_Error);
}
static NameClassification Unknown() {
return NameClassification(NC_Unknown);
}
static NameClassification NestedNameSpecifier() {
return NameClassification(NC_NestedNameSpecifier);
}
static NameClassification TypeTemplate(TemplateName Name) {
NameClassification Result(NC_TypeTemplate);
Result.Template = Name;
return Result;
}
static NameClassification VarTemplate(TemplateName Name) {
NameClassification Result(NC_VarTemplate);
Result.Template = Name;
return Result;
}
static NameClassification FunctionTemplate(TemplateName Name) {
NameClassification Result(NC_FunctionTemplate);
Result.Template = Name;
return Result;
}
NameClassificationKind getKind() const { return Kind; }
ParsedType getType() const {
assert(Kind == NC_Type);
return Type;
}
ExprResult getExpression() const {
assert(Kind == NC_Expression);
return Expr;
}
TemplateName getTemplateName() const {
assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate ||
Kind == NC_VarTemplate);
return Template;
}
TemplateNameKind getTemplateNameKind() const {
switch (Kind) {
case NC_TypeTemplate:
return TNK_Type_template;
case NC_FunctionTemplate:
return TNK_Function_template;
case NC_VarTemplate:
return TNK_Var_template;
default:
llvm_unreachable("unsupported name classification.");
}
}
};
/// \brief Perform name lookup on the given name, classifying it based on
/// the results of name lookup and the following token.
///
/// This routine is used by the parser to resolve identifiers and help direct
/// parsing. When the identifier cannot be found, this routine will attempt
/// to correct the typo and classify based on the resulting name.
///
/// \param S The scope in which we're performing name lookup.
///
/// \param SS The nested-name-specifier that precedes the name.
///
/// \param Name The identifier. If typo correction finds an alternative name,
/// this pointer parameter will be updated accordingly.
///
/// \param NameLoc The location of the identifier.
///
/// \param NextToken The token following the identifier. Used to help
/// disambiguate the name.
///
/// \param IsAddressOfOperand True if this name is the operand of a unary
/// address of ('&') expression, assuming it is classified as an
/// expression.
///
/// \param CCC The correction callback, if typo correction is desired.
NameClassification
ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name,
SourceLocation NameLoc, const Token &NextToken,
bool IsAddressOfOperand,
std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr);
Decl *ActOnDeclarator(Scope *S, Declarator &D);
NamedDecl *HandleDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParameterLists);
void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S);
bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info);
bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
DeclarationName Name,
SourceLocation Loc);
void
diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals,
SourceLocation FallbackLoc,
SourceLocation ConstQualLoc = SourceLocation(),
SourceLocation VolatileQualLoc = SourceLocation(),
SourceLocation RestrictQualLoc = SourceLocation(),
SourceLocation AtomicQualLoc = SourceLocation());
static bool adjustContextForLocalExternDecl(DeclContext *&DC);
void DiagnoseFunctionSpecifiers(const DeclSpec &DS);
void CheckShadow(Scope *S, VarDecl *D, const LookupResult& R);
void CheckShadow(Scope *S, VarDecl *D);
void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange);
void handleTagNumbering(const TagDecl *Tag, Scope *TagScope);
void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec,
TypedefNameDecl *NewTD);
void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D);
NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous);
NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D,
LookupResult &Previous, bool &Redeclaration);
// HLSL Change Starts
// This enumeration is used to determine whether a variable declaration
// should shadow a prior declaration rather than merging.
enum ShadowMergeState {
ShadowMergeState_Disallowed, // shadowing is not allowed
ShadowMergeState_Possible, // shadowing is possible (but may not occur)
ShadowMergeState_Effective // the declaration should shadow a prior one
};
// HLSL Change Ends
NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope,
ShadowMergeState MergeState = ShadowMergeState_Disallowed); // HLSL Change - add merge state
// Returns true if the variable declaration is a redeclaration
bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous, ShadowMergeState MergeState = ShadowMergeState_Disallowed); // HLSL Change - add merge state
void CheckVariableDeclarationType(VarDecl *NewVD);
void CheckCompleteVariableDeclaration(VarDecl *var);
void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D);
NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope);
bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD);
bool CheckConstexprFunctionDecl(const FunctionDecl *FD);
bool CheckConstexprFunctionBody(const FunctionDecl *FD, Stmt *Body);
void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD);
void FindHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
void NoteHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
// Returns true if the function declaration is a redeclaration
bool CheckFunctionDeclaration(Scope *S,
FunctionDecl *NewFD, LookupResult &Previous,
bool IsExplicitSpecialization);
void CheckMain(FunctionDecl *FD, const DeclSpec &D);
void CheckMSVCRTEntryPoint(FunctionDecl *FD);
Decl *ActOnParamDeclarator(Scope *S, Declarator &D);
ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC,
SourceLocation Loc,
QualType T);
ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc,
SourceLocation NameLoc, IdentifierInfo *Name,
QualType T, TypeSourceInfo *TSInfo,
StorageClass SCm, hlsl::ParameterModifier ParamMod); // HLSL Change
void ActOnParamDefaultArgument(Decl *param,
SourceLocation EqualLoc,
Expr *defarg);
void ActOnParamUnparsedDefaultArgument(Decl *param,
SourceLocation EqualLoc,
SourceLocation ArgLoc);
void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc);
bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
SourceLocation EqualLoc);
void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit,
bool TypeMayContainAuto);
void ActOnUninitializedDecl(Decl *dcl, bool TypeMayContainAuto);
void ActOnInitializerError(Decl *Dcl);
void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc);
void ActOnCXXForRangeDecl(Decl *D);
StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
IdentifierInfo *Ident,
ParsedAttributes &Attrs,
SourceLocation AttrEnd);
void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc);
void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc);
void FinalizeDeclaration(Decl *D);
DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
ArrayRef<Decl *> Group);
DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group,
bool TypeMayContainAuto = true);
/// Should be called on all declarations that might have attached
/// documentation comments.
void ActOnDocumentableDecl(Decl *D);
void ActOnDocumentableDecls(ArrayRef<Decl *> Group);
void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
SourceLocation LocAfterDecls);
void CheckForFunctionRedefinition(FunctionDecl *FD,
const FunctionDecl *EffectiveDefinition =
nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D);
Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D);
void ActOnStartOfObjCMethodDef(Scope *S, Decl *D);
bool isObjCMethodDecl(Decl *D) {
return D && isa<ObjCMethodDecl>(D);
}
/// \brief Determine whether we can delay parsing the body of a function or
/// function template until it is used, assuming we don't care about emitting
/// code for that function.
///
/// This will be \c false if we may need the body of the function in the
/// middle of parsing an expression (where it's impractical to switch to
/// parsing a different function), for instance, if it's constexpr in C++11
/// or has an 'auto' return type in C++14. These cases are essentially bugs.
bool canDelayFunctionBody(const Declarator &D);
/// \brief Determine whether we can skip parsing the body of a function
/// definition, assuming we don't care about analyzing its body or emitting
/// code for that function.
///
/// This will be \c false only if we may need the body of the function in
/// order to parse the rest of the program (for instance, if it is
/// \c constexpr in C++11 or has an 'auto' return type in C++14).
bool canSkipFunctionBody(Decl *D);
void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation);
Decl *ActOnSkippedFunctionBody(Decl *Decl);
void ActOnFinishInlineMethodDef(CXXMethodDecl *D);
/// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an
/// attribute for which parsing is delayed.
void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs);
/// \brief Diagnose any unused parameters in the given sequence of
/// ParmVarDecl pointers.
void DiagnoseUnusedParameters(ParmVarDecl * const *Begin,
ParmVarDecl * const *End);
/// \brief Diagnose whether the size of parameters or return value of a
/// function or obj-c method definition is pass-by-value and larger than a
/// specified threshold.
void DiagnoseSizeOfParametersAndReturnValue(ParmVarDecl * const *Begin,
ParmVarDecl * const *End,
QualType ReturnTy,
NamedDecl *D);
void DiagnoseInvalidJumps(Stmt *Body);
Decl *ActOnFileScopeAsmDecl(Expr *expr,
SourceLocation AsmLoc,
SourceLocation RParenLoc);
/// \brief Handle a C++11 empty-declaration and attribute-declaration.
Decl *ActOnEmptyDeclaration(Scope *S,
AttributeList *AttrList,
SourceLocation SemiLoc);
/// \brief The parser has processed a module import declaration.
///
/// \param AtLoc The location of the '@' symbol, if any.
///
/// \param ImportLoc The location of the 'import' keyword.
///
/// \param Path The module access path.
DeclResult ActOnModuleImport(SourceLocation AtLoc, SourceLocation ImportLoc,
ModuleIdPath Path);
/// \brief The parser has processed a module import translated from a
/// #include or similar preprocessing directive.
void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
/// \brief The parsed has entered a submodule.
void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod);
/// \brief The parser has left a submodule.
void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod);
/// \brief Create an implicit import of the given module at the given
/// source location, for error recovery, if possible.
///
/// This routine is typically used when an entity found by name lookup
/// is actually hidden within a module that we know about but the user
/// has forgotten to import.
void createImplicitModuleImportForErrorRecovery(SourceLocation Loc,
Module *Mod);
/// Kinds of missing import. Note, the values of these enumerators correspond
/// to %select values in diagnostics.
enum class MissingImportKind {
Declaration,
Definition,
DefaultArgument
};
/// \brief Diagnose that the specified declaration needs to be visible but
/// isn't, and suggest a module import that would resolve the problem.
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
bool NeedDefinition, bool Recover = true);
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
SourceLocation DeclLoc, ArrayRef<Module *> Modules,
MissingImportKind MIK, bool Recover);
/// \brief Retrieve a suitable printing policy.
PrintingPolicy getPrintingPolicy() const {
return getPrintingPolicy(Context, PP);
}
/// \brief Retrieve a suitable printing policy.
static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx,
const Preprocessor &PP);
/// Scope actions.
void ActOnPopScope(SourceLocation Loc, Scope *S);
void ActOnTranslationUnitScope(Scope *S);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS,
DeclSpec &DS);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS,
DeclSpec &DS,
MultiTemplateParamsArg TemplateParams,
bool IsExplicitInstantiation = false);
Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
AccessSpecifier AS,
RecordDecl *Record,
const PrintingPolicy &Policy);
Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
RecordDecl *Record);
bool isAcceptableTagRedeclaration(const TagDecl *Previous,
TagTypeKind NewTag, bool isDefinition,
SourceLocation NewTagLoc,
const IdentifierInfo *Name);
enum TagUseKind {
TUK_Reference, // Reference to a tag: 'struct foo *X;'
TUK_Declaration, // Fwd decl of a tag: 'struct foo;'
TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;'
TUK_Friend // Friend declaration: 'friend struct foo;'
};
struct SkipBodyInfo {
SkipBodyInfo() : ShouldSkip(false), Previous(nullptr) {}
bool ShouldSkip;
NamedDecl *Previous;
};
Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS,
IdentifierInfo *Name, SourceLocation NameLoc,
AttributeList *Attr, AccessSpecifier AS,
SourceLocation ModulePrivateLoc,
MultiTemplateParamsArg TemplateParameterLists,
bool &OwnedDecl, bool &IsDependent,
SourceLocation ScopedEnumKWLoc,
bool ScopedEnumUsesClassTag, TypeResult UnderlyingType,
bool IsTypeSpecifier, SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
unsigned TagSpec, SourceLocation TagLoc,
CXXScopeSpec &SS,
IdentifierInfo *Name, SourceLocation NameLoc,
AttributeList *Attr,
MultiTemplateParamsArg TempParamLists);
TypeResult ActOnDependentTag(Scope *S,
unsigned TagSpec,
TagUseKind TUK,
const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation TagLoc,
SourceLocation NameLoc);
void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart,
IdentifierInfo *ClassName,
SmallVectorImpl<Decl *> &Decls);
Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth);
FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS);
MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD,
SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS,
AttributeList *MSPropertyAttr);
FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T,
TypeSourceInfo *TInfo,
RecordDecl *Record, SourceLocation Loc,
bool Mutable, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
SourceLocation TSSL,
AccessSpecifier AS, NamedDecl *PrevDecl,
Declarator *D = nullptr);
bool CheckNontrivialField(FieldDecl *FD);
void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM);
bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
bool Diagnose = false);
CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD);
void ActOnLastBitfield(SourceLocation DeclStart,
SmallVectorImpl<Decl *> &AllIvarDecls);
Decl *ActOnIvar(Scope *S, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
tok::ObjCKeywordKind visibility);
// This is used for both record definitions and ObjC interface declarations.
void ActOnFields(Scope* S, SourceLocation RecLoc, Decl *TagDecl,
ArrayRef<Decl *> Fields,
SourceLocation LBrac, SourceLocation RBrac,
AttributeList *AttrList);
/// ActOnTagStartDefinition - Invoked when we have entered the
/// scope of a tag's definition (e.g., for an enumeration, class,
/// struct, or union).
void ActOnTagStartDefinition(Scope *S, Decl *TagDecl);
typedef void *SkippedDefinitionContext;
/// \brief Invoked when we enter a tag definition that we're skipping.
SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD);
Decl *ActOnObjCContainerStartDefinition(Decl *IDecl);
/// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a
/// C++ record definition's base-specifiers clause and are starting its
/// member declarations.
void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl,
SourceLocation FinalLoc,
bool IsFinalSpelledSealed,
SourceLocation LBraceLoc);
/// ActOnTagFinishDefinition - Invoked once we have finished parsing
/// the definition of a tag (enumeration, class, struct, or union).
void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl,
SourceLocation RBraceLoc);
void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context);
void ActOnObjCContainerFinishDefinition();
/// \brief Invoked when we must temporarily exit the objective-c container
/// scope for parsing/looking-up C constructs.
///
/// Must be followed by a call to \see ActOnObjCReenterContainerContext
void ActOnObjCTemporaryExitContainerContext(DeclContext *DC);
void ActOnObjCReenterContainerContext(DeclContext *DC);
/// ActOnTagDefinitionError - Invoked when there was an unrecoverable
/// error parsing the definition of a tag.
void ActOnTagDefinitionError(Scope *S, Decl *TagDecl);
EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum,
EnumConstantDecl *LastEnumConst,
SourceLocation IdLoc,
IdentifierInfo *Id,
Expr *val);
bool CheckEnumUnderlyingType(TypeSourceInfo *TI);
bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped,
QualType EnumUnderlyingTy, const EnumDecl *Prev);
/// Determine whether the body of an anonymous enumeration should be skipped.
/// \param II The name of the first enumerator.
SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II,
SourceLocation IILoc);
Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant,
SourceLocation IdLoc, IdentifierInfo *Id,
AttributeList *Attrs,
SourceLocation EqualLoc, Expr *Val);
void ActOnEnumBody(SourceLocation EnumLoc, SourceLocation LBraceLoc,
SourceLocation RBraceLoc, Decl *EnumDecl,
ArrayRef<Decl *> Elements,
Scope *S, AttributeList *Attr);
DeclContext *getContainingDC(DeclContext *DC);
/// Set the current declaration context until it gets popped.
void PushDeclContext(Scope *S, DeclContext *DC);
void PopDeclContext();
/// EnterDeclaratorContext - Used when we must lookup names in the context
/// of a declarator's nested name specifier.
void EnterDeclaratorContext(Scope *S, DeclContext *DC);
void ExitDeclaratorContext(Scope *S);
/// Push the parameters of D, which must be a function, into scope.
void ActOnReenterFunctionContext(Scope* S, Decl* D);
void ActOnExitFunctionContext();
DeclContext *getFunctionLevelDeclContext();
/// getCurFunctionDecl - If inside of a function body, this returns a pointer
/// to the function decl for the function being parsed. If we're currently
/// in a 'block', this returns the containing context.
FunctionDecl *getCurFunctionDecl();
/// getCurMethodDecl - If inside of a method body, this returns a pointer to
/// the method decl for the method being parsed. If we're currently
/// in a 'block', this returns the containing context.
ObjCMethodDecl *getCurMethodDecl();
/// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method
/// or C function we're in, otherwise return null. If we're currently
/// in a 'block', this returns the containing context.
NamedDecl *getCurFunctionOrMethodDecl();
/// Add this decl to the scope shadowed decl chains.
void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true);
/// \brief Make the given externally-produced declaration visible at the
/// top level scope.
///
/// \param D The externally-produced declaration to push.
///
/// \param Name The name of the externally-produced declaration.
void pushExternalDeclIntoScope(NamedDecl *D, DeclarationName Name);
/// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true
/// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns
/// true if 'D' belongs to the given declaration context.
///
/// \param AllowInlineNamespace If \c true, allow the declaration to be in the
/// enclosing namespace set of the context, rather than contained
/// directly within it.
bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr,
bool AllowInlineNamespace = false);
/// Finds the scope corresponding to the given decl context, if it
/// happens to be an enclosing scope. Otherwise return NULL.
static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC);
/// Subroutines of ActOnDeclarator().
TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
TypeSourceInfo *TInfo);
bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New);
/// Attribute merging methods. Return true if a new attribute was added.
AvailabilityAttr *mergeAvailabilityAttr(NamedDecl *D, SourceRange Range,
IdentifierInfo *Platform,
VersionTuple Introduced,
VersionTuple Deprecated,
VersionTuple Obsoleted,
bool IsUnavailable,
StringRef Message,
bool Override,
unsigned AttrSpellingListIndex);
TypeVisibilityAttr *mergeTypeVisibilityAttr(Decl *D, SourceRange Range,
TypeVisibilityAttr::VisibilityType Vis,
unsigned AttrSpellingListIndex);
VisibilityAttr *mergeVisibilityAttr(Decl *D, SourceRange Range,
VisibilityAttr::VisibilityType Vis,
unsigned AttrSpellingListIndex);
DLLImportAttr *mergeDLLImportAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
DLLExportAttr *mergeDLLExportAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
MSInheritanceAttr *
mergeMSInheritanceAttr(Decl *D, SourceRange Range, bool BestCase,
unsigned AttrSpellingListIndex,
MSInheritanceAttr::Spelling SemanticSpelling);
FormatAttr *mergeFormatAttr(Decl *D, SourceRange Range,
IdentifierInfo *Format, int FormatIdx,
int FirstArg, unsigned AttrSpellingListIndex);
SectionAttr *mergeSectionAttr(Decl *D, SourceRange Range, StringRef Name,
unsigned AttrSpellingListIndex);
AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, SourceRange Range,
IdentifierInfo *Ident,
unsigned AttrSpellingListIndex);
MinSizeAttr *mergeMinSizeAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
/// \brief Describes the kind of merge to perform for availability
/// attributes (including "deprecated", "unavailable", and "availability").
enum AvailabilityMergeKind {
/// \brief Don't merge availability attributes at all.
AMK_None,
/// \brief Merge availability attributes for a redeclaration, which requires
/// an exact match.
AMK_Redeclaration,
/// \brief Merge availability attributes for an override, which requires
/// an exact match or a weakening of constraints.
AMK_Override
};
void mergeDeclAttributes(NamedDecl *New, Decl *Old,
AvailabilityMergeKind AMK = AMK_Redeclaration);
void MergeTypedefNameDecl(TypedefNameDecl *New, LookupResult &OldDecls);
bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S,
bool MergeTypeWithOld);
bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old,
Scope *S, bool MergeTypeWithOld);
void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old);
void MergeVarDecl(VarDecl *New, LookupResult &Previous, ShadowMergeState& MergeState); // HLSL Change - add merge state
void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld, ShadowMergeState& MergeState); // HLSL Change - add merge state
void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old);
bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S);
// AssignmentAction - This is used by all the assignment diagnostic functions
// to represent what is actually causing the operation
enum AssignmentAction {
AA_Assigning,
AA_Passing,
AA_Returning,
AA_Converting,
AA_Initializing,
AA_Sending,
AA_Casting,
AA_Passing_CFAudited
};
/// C++ Overloading.
enum OverloadKind {
/// This is a legitimate overload: the existing declarations are
/// functions or function templates with different signatures.
Ovl_Overload,
/// This is not an overload because the signature exactly matches
/// an existing declaration.
Ovl_Match,
/// This is not an overload because the lookup results contain a
/// non-function.
Ovl_NonFunction
};
OverloadKind CheckOverload(Scope *S,
FunctionDecl *New,
const LookupResult &OldDecls,
NamedDecl *&OldDecl,
bool IsForUsingDecl);
bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl);
/// \brief Checks availability of the function depending on the current
/// function context.Inside an unavailable function,unavailability is ignored.
///
/// \returns true if \p FD is unavailable and current context is inside
/// an available function, false otherwise.
bool isFunctionConsideredUnavailable(FunctionDecl *FD);
ImplicitConversionSequence
TryImplicitConversion(Expr *From, QualType ToType,
bool SuppressUserConversions,
bool AllowExplicit,
bool InOverloadResolution,
bool CStyle,
bool AllowObjCWritebackConversion);
bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType);
bool IsFloatingPointPromotion(QualType FromType, QualType ToType);
bool IsComplexPromotion(QualType FromType, QualType ToType);
bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCWritebackConversion(QualType FromType, QualType ToType,
QualType &ConvertedType);
bool IsBlockPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType);
bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType,
const FunctionProtoType *NewType,
unsigned *ArgPos = nullptr);
void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
QualType FromType, QualType ToType);
void maybeExtendBlockObject(ExprResult &E);
CastKind PrepareCastToObjCObjectPointer(ExprResult &E);
bool CheckPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath& BasePath,
bool IgnoreBaseAccess);
bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType &ConvertedType);
bool CheckMemberPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath &BasePath,
bool IgnoreBaseAccess);
bool IsQualificationConversion(QualType FromType, QualType ToType,
bool CStyle, bool &ObjCLifetimeConversion);
bool IsNoReturnConversion(QualType FromType, QualType ToType,
QualType &ResultTy);
bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType);
bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg);
ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
const VarDecl *NRVOCandidate,
QualType ResultType,
Expr *Value,
bool AllowNRVO = true);
bool CanPerformCopyInitialization(const InitializedEntity &Entity,
ExprResult Init);
ExprResult PerformCopyInitialization(const InitializedEntity &Entity,
SourceLocation EqualLoc,
ExprResult Init,
bool TopLevelOfInitList = false,
bool AllowExplicit = false);
ExprResult PerformObjectArgumentInitialization(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
CXXMethodDecl *Method);
ExprResult PerformContextuallyConvertToBool(Expr *From);
ExprResult PerformContextuallyConvertToObjCPointer(Expr *From);
/// Contexts in which a converted constant expression is required.
enum CCEKind {
CCEK_CaseValue, ///< Expression in a case label.
CCEK_Enumerator, ///< Enumerator value with fixed underlying type.
CCEK_TemplateArg, ///< Value of a non-type template parameter.
CCEK_NewExpr ///< Constant expression in a noptr-new-declarator.
};
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
llvm::APSInt &Value, CCEKind CCE);
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
APValue &Value, CCEKind CCE);
/// \brief Abstract base class used to perform a contextual implicit
/// conversion from an expression to any type passing a filter.
class ContextualImplicitConverter {
public:
bool Suppress;
bool SuppressConversion;
ContextualImplicitConverter(bool Suppress = false,
bool SuppressConversion = false)
: Suppress(Suppress), SuppressConversion(SuppressConversion) {}
/// \brief Determine whether the specified type is a valid destination type
/// for this conversion.
virtual bool match(QualType T) = 0;
/// \brief Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0;
/// \brief Emits a diagnostic when the expression has incomplete class type.
virtual SemaDiagnosticBuilder
diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0;
/// \brief Emits a diagnostic when the only matching conversion function
/// is explicit.
virtual SemaDiagnosticBuilder diagnoseExplicitConv(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
/// \brief Emits a note for the explicit conversion function.
virtual SemaDiagnosticBuilder
noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// \brief Emits a diagnostic when there are multiple possible conversion
/// functions.
virtual SemaDiagnosticBuilder
diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0;
/// \brief Emits a note for one of the candidate conversions.
virtual SemaDiagnosticBuilder
noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// \brief Emits a diagnostic when we picked a conversion function
/// (for cases when we are not allowed to pick a conversion function).
virtual SemaDiagnosticBuilder diagnoseConversion(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
virtual ~ContextualImplicitConverter() {}
};
class ICEConvertDiagnoser : public ContextualImplicitConverter {
bool AllowScopedEnumerations;
public:
ICEConvertDiagnoser(bool AllowScopedEnumerations,
bool Suppress, bool SuppressConversion)
: ContextualImplicitConverter(Suppress, SuppressConversion),
AllowScopedEnumerations(AllowScopedEnumerations) {}
/// Match an integral or (possibly scoped) enumeration type.
bool match(QualType T) override;
SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override {
return diagnoseNotInt(S, Loc, T);
}
/// \brief Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0;
};
/// Perform a contextual implicit conversion.
ExprResult PerformContextualImplicitConversion(
SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter);
enum ObjCSubscriptKind {
OS_Array,
OS_Dictionary,
OS_Error
};
ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE);
// Note that LK_String is intentionally after the other literals, as
// this is used for diagnostics logic.
enum ObjCLiteralKind {
LK_Array,
LK_Dictionary,
LK_Numeric,
LK_Boxed,
LK_String,
LK_Block,
LK_None
};
ObjCLiteralKind CheckLiteralKind(Expr *FromE);
ExprResult PerformObjectMemberConversion(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
NamedDecl *Member);
// Members have to be NamespaceDecl* or TranslationUnitDecl*.
// TODO: make this is a typesafe union.
typedef llvm::SmallPtrSet<DeclContext *, 16> AssociatedNamespaceSet;
typedef llvm::SmallPtrSet<CXXRecordDecl *, 16> AssociatedClassSet;
void AddOverloadCandidate(FunctionDecl *Function,
DeclAccessPair FoundDecl,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool AllowExplicit = false);
void AddFunctionCandidates(const UnresolvedSetImpl &Functions,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
bool SuppressUserConversions = false,
bool PartialOverloading = false);
void AddMethodCandidate(DeclAccessPair FoundDecl,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversion = false);
void AddMethodCandidate(CXXMethodDecl *Method,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false);
void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false);
void AddTemplateOverloadCandidate(FunctionTemplateDecl *FunctionTemplate,
DeclAccessPair FoundDecl,
TemplateArgumentListInfo *ExplicitTemplateArgs,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false);
void AddConversionCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
Expr *From, QualType ToType,
OverloadCandidateSet& CandidateSet,
bool AllowObjCConversionOnExplicit);
void AddTemplateConversionCandidate(FunctionTemplateDecl *FunctionTemplate,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet,
bool AllowObjCConversionOnExplicit);
void AddSurrogateCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
const FunctionProtoType *Proto,
Expr *Object, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddMemberOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
SourceRange OpRange = SourceRange());
void AddBuiltinCandidate(QualType ResultTy, QualType *ParamTys,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool IsAssignmentOperator = false,
unsigned NumContextualBoolArguments = 0);
void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddArgumentDependentLookupCandidates(DeclarationName Name,
SourceLocation Loc,
ArrayRef<Expr *> Args,
TemplateArgumentListInfo *ExplicitTemplateArgs,
OverloadCandidateSet& CandidateSet,
bool PartialOverloading = false);
// Emit as a 'note' the specific overload candidate
void NoteOverloadCandidate(FunctionDecl *Fn, QualType DestType = QualType());
// Emit as a series of 'note's all template and non-templates
// identified by the expression Expr
void NoteAllOverloadCandidates(Expr* E, QualType DestType = QualType());
/// Check the enable_if expressions on the given function. Returns the first
/// failing attribute, or NULL if they were all successful.
EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args,
bool MissingImplicitThis = false);
// [PossiblyAFunctionType] --> [Return]
// NonFunctionType --> NonFunctionType
// R (A) --> R(A)
// R (*)(A) --> R (A)
// R (&)(A) --> R (A)
// R (S::*)(A) --> R (A)
QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType);
FunctionDecl *
ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr,
QualType TargetType,
bool Complain,
DeclAccessPair &Found,
bool *pHadMultipleCandidates = nullptr);
FunctionDecl *
ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
bool Complain = false,
DeclAccessPair *Found = nullptr);
bool ResolveAndFixSingleFunctionTemplateSpecialization(
ExprResult &SrcExpr,
bool DoFunctionPointerConverion = false,
bool Complain = false,
const SourceRange& OpRangeForComplaining = SourceRange(),
QualType DestTypeForComplaining = QualType(),
unsigned DiagIDForComplaining = 0);
Expr *FixOverloadedFunctionReference(Expr *E,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
ExprResult FixOverloadedFunctionReference(ExprResult,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool PartialOverloading = false);
// An enum used to represent the different possible results of building a
// range-based for loop.
enum ForRangeStatus {
FRS_Success,
FRS_NoViableFunction,
FRS_DiagnosticIssued
};
// An enum to represent whether something is dealing with a call to begin()
// or a call to end() in a range-based for loop.
enum BeginEndFunction {
BEF_begin,
BEF_end
};
ForRangeStatus BuildForRangeBeginEndCall(Scope *S, SourceLocation Loc,
SourceLocation RangeLoc,
VarDecl *Decl,
BeginEndFunction BEF,
const DeclarationNameInfo &NameInfo,
LookupResult &MemberLookup,
OverloadCandidateSet *CandidateSet,
Expr *Range, ExprResult *CallExpr);
ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn,
UnresolvedLookupExpr *ULE,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc,
Expr *ExecConfig,
bool AllowTypoCorrection=true);
bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE,
MultiExprArg Args, SourceLocation RParenLoc,
OverloadCandidateSet *CandidateSet,
ExprResult *Result);
ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc,
unsigned Opc,
const UnresolvedSetImpl &Fns,
Expr *input);
ExprResult CreateOverloadedBinOp(SourceLocation OpLoc,
unsigned Opc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS);
ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
SourceLocation RLoc,
Expr *Base,Expr *Idx);
ExprResult
BuildCallToMemberFunction(Scope *S, Expr *MemExpr,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult
BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
bool *NoArrowOperatorFound = nullptr);
/// CheckCallReturnType - Checks that a call expression's return type is
/// complete. Returns true on failure. The location passed in is the location
/// that best represents the call.
bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc,
CallExpr *CE, FunctionDecl *FD);
/// Helpers for dealing with blocks and functions.
bool CheckParmsForFunctionDef(ParmVarDecl *const *Param,
ParmVarDecl *const *ParamEnd,
bool CheckParameterNames);
void CheckCXXDefaultArguments(FunctionDecl *FD);
void CheckExtraCXXDefaultArguments(Declarator &D);
Scope *getNonFieldDeclScope(Scope *S);
/// \name Name lookup
///
/// These routines provide name lookup that is used during semantic
/// analysis to resolve the various kinds of names (identifiers,
/// overloaded operator names, constructor names, etc.) into zero or
/// more declarations within a particular scope. The major entry
/// points are LookupName, which performs unqualified name lookup,
/// and LookupQualifiedName, which performs qualified name lookup.
///
/// All name lookup is performed based on some specific criteria,
/// which specify what names will be visible to name lookup and how
/// far name lookup should work. These criteria are important both
/// for capturing language semantics (certain lookups will ignore
/// certain names, for example) and for performance, since name
/// lookup is often a bottleneck in the compilation of C++. Name
/// lookup criteria is specified via the LookupCriteria enumeration.
///
/// The results of name lookup can vary based on the kind of name
/// lookup performed, the current language, and the translation
/// unit. In C, for example, name lookup will either return nothing
/// (no entity found) or a single declaration. In C++, name lookup
/// can additionally refer to a set of overloaded functions or
/// result in an ambiguity. All of the possible results of name
/// lookup are captured by the LookupResult class, which provides
/// the ability to distinguish among them.
//@{
/// @brief Describes the kind of name lookup to perform.
enum LookupNameKind {
/// Ordinary name lookup, which finds ordinary names (functions,
/// variables, typedefs, etc.) in C and most kinds of names
/// (functions, variables, members, types, etc.) in C++.
LookupOrdinaryName = 0,
/// Tag name lookup, which finds the names of enums, classes,
/// structs, and unions.
LookupTagName,
/// Label name lookup.
LookupLabel,
/// Member name lookup, which finds the names of
/// class/struct/union members.
LookupMemberName,
/// Look up of an operator name (e.g., operator+) for use with
/// operator overloading. This lookup is similar to ordinary name
/// lookup, but will ignore any declarations that are class members.
LookupOperatorName,
/// Look up of a name that precedes the '::' scope resolution
/// operator in C++. This lookup completely ignores operator, object,
/// function, and enumerator names (C++ [basic.lookup.qual]p1).
LookupNestedNameSpecifierName,
/// Look up a namespace name within a C++ using directive or
/// namespace alias definition, ignoring non-namespace names (C++
/// [basic.lookup.udir]p1).
LookupNamespaceName,
/// Look up all declarations in a scope with the given name,
/// including resolved using declarations. This is appropriate
/// for checking redeclarations for a using declaration.
LookupUsingDeclName,
/// Look up an ordinary name that is going to be redeclared as a
/// name with linkage. This lookup ignores any declarations that
/// are outside of the current scope unless they have linkage. See
/// C99 6.2.2p4-5 and C++ [basic.link]p6.
LookupRedeclarationWithLinkage,
/// Look up a friend of a local class. This lookup does not look
/// outside the innermost non-class scope. See C++11 [class.friend]p11.
LookupLocalFriendName,
/// Look up the name of an Objective-C protocol.
LookupObjCProtocolName,
/// Look up implicit 'self' parameter of an objective-c method.
LookupObjCImplicitSelfParam,
/// \brief Look up any declaration with any name.
LookupAnyName
};
/// \brief Specifies whether (or how) name lookup is being performed for a
/// redeclaration (vs. a reference).
enum RedeclarationKind {
/// \brief The lookup is a reference to this name that is not for the
/// purpose of redeclaring the name.
NotForRedeclaration = 0,
/// \brief The lookup results will be used for redeclaration of a name,
/// if an entity by that name already exists.
ForRedeclaration
};
/// \brief The possible outcomes of name lookup for a literal operator.
enum LiteralOperatorLookupResult {
/// \brief The lookup resulted in an error.
LOLR_Error,
/// \brief The lookup found a single 'cooked' literal operator, which
/// expects a normal literal to be built and passed to it.
LOLR_Cooked,
/// \brief The lookup found a single 'raw' literal operator, which expects
/// a string literal containing the spelling of the literal token.
LOLR_Raw,
/// \brief The lookup found an overload set of literal operator templates,
/// which expect the characters of the spelling of the literal token to be
/// passed as a non-type template argument pack.
LOLR_Template,
/// \brief The lookup found an overload set of literal operator templates,
/// which expect the character type and characters of the spelling of the
/// string literal token to be passed as template arguments.
LOLR_StringTemplate
};
SpecialMemberOverloadResult *LookupSpecialMember(CXXRecordDecl *D,
CXXSpecialMember SM,
bool ConstArg,
bool VolatileArg,
bool RValueThis,
bool ConstThis,
bool VolatileThis);
typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator;
typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)>
TypoRecoveryCallback;
private:
bool CppLookupName(LookupResult &R, Scope *S);
struct TypoExprState {
std::unique_ptr<TypoCorrectionConsumer> Consumer;
TypoDiagnosticGenerator DiagHandler;
TypoRecoveryCallback RecoveryHandler;
TypoExprState();
TypoExprState(TypoExprState&& other) LLVM_NOEXCEPT;
TypoExprState& operator=(TypoExprState&& other) LLVM_NOEXCEPT;
};
/// \brief The set of unhandled TypoExprs and their associated state.
llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos;
/// \brief Creates a new TypoExpr AST node.
TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC);
// \brief The set of known/encountered (unique, canonicalized) NamespaceDecls.
//
// The boolean value will be true to indicate that the namespace was loaded
// from an AST/PCH file, or false otherwise.
llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces;
/// \brief Whether we have already loaded known namespaces from an extenal
/// source.
bool LoadedExternalKnownNamespaces;
/// \brief Helper for CorrectTypo and CorrectTypoDelayed used to create and
/// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction
/// should be skipped entirely.
std::unique_ptr<TypoCorrectionConsumer>
makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
std::unique_ptr<CorrectionCandidateCallback> CCC,
DeclContext *MemberContext, bool EnteringContext,
const ObjCObjectPointerType *OPT,
bool ErrorRecovery);
public:
const TypoExprState &getTypoExprState(TypoExpr *TE) const;
/// \brief Clears the state of the given TypoExpr.
void clearDelayedTypo(TypoExpr *TE);
/// \brief Look up a name, looking for a single declaration. Return
/// null if the results were absent, ambiguous, or overloaded.
///
/// It is preferable to use the elaborated form and explicitly handle
/// ambiguity and overloaded.
NamedDecl *LookupSingleName(Scope *S, DeclarationName Name,
SourceLocation Loc,
LookupNameKind NameKind,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupName(LookupResult &R, Scope *S,
bool AllowBuiltinCreation = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
bool InUnqualifiedLookup = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
CXXScopeSpec &SS);
bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
bool AllowBuiltinCreation = false,
bool EnteringContext = false);
ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class);
void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S,
QualType T1, QualType T2,
UnresolvedSetImpl &Functions);
void addOverloadedOperatorToUnresolvedSet(UnresolvedSetImpl &Functions,
DeclAccessPair Operator,
QualType T1, QualType T2);
LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc,
SourceLocation GnuLabelLoc = SourceLocation());
DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class);
CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class);
CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class);
bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id);
LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R,
ArrayRef<QualType> ArgTys,
bool AllowRaw,
bool AllowTemplate,
bool AllowStringTemplate);
bool isKnownName(StringRef name);
void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc,
ArrayRef<Expr *> Args, ADLResult &Functions);
void LookupVisibleDecls(Scope *S, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true);
void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true);
enum CorrectTypoKind {
CTK_NonError, // CorrectTypo used in a non error recovery situation.
CTK_ErrorRecovery // CorrectTypo used in normal error recovery.
};
TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind,
Scope *S, CXXScopeSpec *SS,
std::unique_ptr<CorrectionCandidateCallback> CCC,
CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr,
bool RecordFailure = true);
TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
std::unique_ptr<CorrectionCandidateCallback> CCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr);
/// \brief Process any TypoExprs in the given Expr and its children,
/// generating diagnostics as appropriate and returning a new Expr if there
/// were typos that were all successfully corrected and ExprError if one or
/// more typos could not be corrected.
///
/// \param E The Expr to check for TypoExprs.
///
/// \param InitDecl A VarDecl to avoid because the Expr being corrected is its
/// initializer.
///
/// \param Filter A function applied to a newly rebuilt Expr to determine if
/// it is an acceptable/usable result from a single combination of typo
/// corrections. As long as the filter returns ExprError, different
/// combinations of corrections will be tried until all are exhausted.
ExprResult
CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; });
ExprResult
CorrectDelayedTyposInExpr(Expr *E,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(E, nullptr, Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; }) {
return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(ER, nullptr, Filter);
}
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
bool ErrorRecovery = true);
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
const PartialDiagnostic &PrevNote,
bool ErrorRecovery = true);
void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc,
ArrayRef<Expr *> Args,
AssociatedNamespaceSet &AssociatedNamespaces,
AssociatedClassSet &AssociatedClasses);
void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S,
bool ConsiderLinkage, bool AllowInlineNamespace);
void DiagnoseAmbiguousLookup(LookupResult &Result);
//@}
ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id,
SourceLocation IdLoc,
bool TypoCorrection = false);
NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
Scope *S, bool ForRedeclaration,
SourceLocation Loc);
NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II,
Scope *S);
void AddKnownFunctionAttributes(FunctionDecl *FD);
// More parsing and symbol table subroutines.
void ProcessPragmaWeak(Scope *S, Decl *D);
// Decl attributes - this routine is the top level dispatcher.
void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD);
void ProcessDeclAttributeList(Scope *S, Decl *D, const AttributeList *AL,
bool IncludeCXX11Attributes = true);
bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl,
const AttributeList *AttrList);
void checkUnusedDeclAttributes(Declarator &D);
/// Determine if type T is a valid subject for a nonnull and similar
/// attributes. By default, we look through references (the behavior used by
/// nonnull), but if the second parameter is true, then we treat a reference
/// type as valid.
bool isValidPointerAttrType(QualType T, bool RefOkay = false);
bool CheckRegparmAttr(const AttributeList &attr, unsigned &value);
bool CheckCallingConvAttr(const AttributeList &attr, CallingConv &CC,
const FunctionDecl *FD = nullptr);
bool CheckNoReturnAttr(const AttributeList &attr);
bool checkStringLiteralArgumentAttr(const AttributeList &Attr,
unsigned ArgNum, StringRef &Str,
SourceLocation *ArgLocation = nullptr);
bool checkSectionName(SourceLocation LiteralLoc, StringRef Str);
void checkTargetAttr(SourceLocation LiteralLoc, StringRef Str);
bool checkMSInheritanceAttrOnDefinition(
CXXRecordDecl *RD, SourceRange Range, bool BestCase,
MSInheritanceAttr::Spelling SemanticSpelling);
void CheckAlignasUnderalignment(Decl *D);
/// Adjust the calling convention of a method to be the ABI default if it
/// wasn't specified explicitly. This handles method types formed from
/// function type typedefs and typename template arguments.
void adjustMemberFunctionCC(QualType &T, bool IsStatic);
// Check if there is an explicit attribute, but only look through parens.
// The intent is to look for an attribute on the current declarator, but not
// one that came from a typedef.
bool hasExplicitCallingConv(QualType &T);
/// Get the outermost AttributedType node that sets a calling convention.
/// Valid types should not have multiple attributes with different CCs.
const AttributedType *getCallingConvAttributedType(QualType T) const;
/// Check whether a nullability type specifier can be added to the given
/// type.
///
/// \param type The type to which the nullability specifier will be
/// added. On success, this type will be updated appropriately.
///
/// \param nullability The nullability specifier to add.
///
/// \param nullabilityLoc The location of the nullability specifier.
///
/// \param isContextSensitive Whether this nullability specifier was
/// written as a context-sensitive keyword (in an Objective-C
/// method) or an Objective-C property attribute, rather than as an
/// underscored type specifier.
///
/// \returns true if nullability cannot be applied, false otherwise.
bool checkNullabilityTypeSpecifier(QualType &type, NullabilityKind nullability,
SourceLocation nullabilityLoc,
bool isContextSensitive);
/// \brief Stmt attributes - this routine is the top level dispatcher.
StmtResult ProcessStmtAttributes(Stmt *Stmt, AttributeList *Attrs,
SourceRange Range);
void WarnConflictingTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
void CheckConflictingOverridingMethod(ObjCMethodDecl *Method,
ObjCMethodDecl *Overridden,
bool IsProtocolMethodDecl);
/// WarnExactTypedMethods - This routine issues a warning if method
/// implementation declaration matches exactly that of its declaration.
void WarnExactTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
typedef llvm::SmallPtrSet<Selector, 8> SelectorSet;
typedef llvm::DenseMap<Selector, ObjCMethodDecl*> ProtocolsMethodsMap;
/// CheckImplementationIvars - This routine checks if the instance variables
/// listed in the implelementation match those listed in the interface.
void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
ObjCIvarDecl **Fields, unsigned nIvars,
SourceLocation Loc);
/// ImplMethodsVsClassMethods - This is main routine to warn if any method
/// remains unimplemented in the class or category \@implementation.
void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool IncompleteImpl = false);
/// DiagnoseUnimplementedProperties - This routine warns on those properties
/// which must be implemented by this implementation.
void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl *CDecl,
bool SynthesizeProperties);
/// Diagnose any null-resettable synthesized setters.
void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl);
/// DefaultSynthesizeProperties - This routine default synthesizes all
/// properties which must be synthesized in the class's \@implementation.
void DefaultSynthesizeProperties (Scope *S, ObjCImplDecl* IMPDecl,
ObjCInterfaceDecl *IDecl);
void DefaultSynthesizeProperties(Scope *S, Decl *D);
/// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is
/// an ivar synthesized for 'Method' and 'Method' is a property accessor
/// declared in class 'IFace'.
bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace,
ObjCMethodDecl *Method, ObjCIvarDecl *IV);
/// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which
/// backs the property is not used in the property's accessor.
void DiagnoseUnusedBackingIvarInAccessor(Scope *S,
const ObjCImplementationDecl *ImplD);
/// GetIvarBackingPropertyAccessor - If method is a property setter/getter and
/// it property has a backing ivar, returns this ivar; otherwise, returns NULL.
/// It also returns ivar's property on success.
ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method,
const ObjCPropertyDecl *&PDecl) const;
/// Called by ActOnProperty to handle \@property declarations in
/// class extensions.
ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
Selector SetterSel,
const bool isAssign,
const bool isReadWrite,
const unsigned Attributes,
const unsigned AttributesAsWritten,
bool *isOverridingProperty,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind);
/// Called by ActOnProperty and HandlePropertyInClassExtension to
/// handle creating the ObjcPropertyDecl for a category or \@interface.
ObjCPropertyDecl *CreatePropertyDecl(Scope *S,
ObjCContainerDecl *CDecl,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
Selector SetterSel,
const bool isAssign,
const bool isReadWrite,
const unsigned Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
/// AtomicPropertySetterGetterRules - This routine enforces the rule (via
/// warning) when atomic property has one but not the other user-declared
/// setter or getter.
void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl);
void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D);
void DiagnoseMissingDesignatedInitOverrides(
const ObjCImplementationDecl *ImplD,
const ObjCInterfaceDecl *IFD);
void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID);
enum MethodMatchStrategy {
MMS_loose,
MMS_strict
};
/// MatchTwoMethodDeclarations - Checks if two methods' type match and returns
/// true, or false, accordingly.
bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method,
const ObjCMethodDecl *PrevMethod,
MethodMatchStrategy strategy = MMS_strict);
/// MatchAllMethodDeclarations - Check methods declaraed in interface or
/// or protocol against those declared in their implementations.
void MatchAllMethodDeclarations(const SelectorSet &InsMap,
const SelectorSet &ClsMap,
SelectorSet &InsMapSeen,
SelectorSet &ClsMapSeen,
ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool &IncompleteImpl,
bool ImmediateClass,
bool WarnCategoryMethodImpl=false);
/// CheckCategoryVsClassMethodMatches - Checks that methods implemented in
/// category matches with those implemented in its primary class and
/// warns each time an exact match is found.
void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP);
/// \brief Add the given method to the list of globally-known methods.
void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method);
private:
/// AddMethodToGlobalPool - Add an instance or factory method to the global
/// pool. See descriptoin of AddInstanceMethodToGlobalPool.
void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance);
/// LookupMethodInGlobalPool - Returns the instance or factory method and
/// optionally warns if there are multiple signatures.
ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass,
bool instance);
public:
/// \brief - Returns instance or factory methods in global method pool for
/// given selector. If no such method or only one method found, function returns
/// false; otherwise, it returns true
bool CollectMultipleMethodsInGlobalPool(Selector Sel,
SmallVectorImpl<ObjCMethodDecl*>& Methods,
bool instance);
bool AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod,
SourceRange R,
bool receiverIdOrClass);
void DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods,
Selector Sel, SourceRange R,
bool receiverIdOrClass);
private:
/// \brief - Returns a selector which best matches given argument list or
/// nullptr if none could be found
ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args,
bool IsInstance);
/// \brief Record the typo correction failure and return an empty correction.
TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc,
bool RecordFailure = true) {
if (RecordFailure)
TypoCorrectionFailures[Typo].insert(TypoLoc);
return TypoCorrection();
}
public:
/// AddInstanceMethodToGlobalPool - All instance methods in a translation
/// unit are added to a global pool. This allows us to efficiently associate
/// a selector with a method declaraation for purposes of typechecking
/// messages sent to "id" (where the class of the object is unknown).
void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/true);
}
/// AddFactoryMethodToGlobalPool - Same as above, but for factory methods.
void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/false);
}
/// AddAnyMethodToGlobalPool - Add any method, instance or factory to global
/// pool.
void AddAnyMethodToGlobalPool(Decl *D);
/// LookupInstanceMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/true);
}
/// LookupFactoryMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/false);
}
const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel,
QualType ObjectType=QualType());
/// LookupImplementedMethodInGlobalPool - Returns the method which has an
/// implementation.
ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel);
/// CollectIvarsToConstructOrDestruct - Collect those ivars which require
/// initialization.
void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI,
SmallVectorImpl<ObjCIvarDecl*> &Ivars);
//===--------------------------------------------------------------------===//
// Statement Parsing Callbacks: SemaStmt.cpp.
public:
class FullExprArg {
public:
FullExprArg(Sema &actions) : E(nullptr) { }
ExprResult release() {
return E;
}
Expr *get() const { return E; }
Expr *operator->() {
return E;
}
private:
// FIXME: No need to make the entire Sema class a friend when it's just
// Sema::MakeFullExpr that needs access to the constructor below.
friend class Sema;
explicit FullExprArg(Expr *expr) : E(expr) {}
Expr *E;
};
FullExprArg MakeFullExpr(Expr *Arg) {
return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation());
}
FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) {
return FullExprArg(ActOnFinishFullExpr(Arg, CC).get());
}
FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) {
ExprResult FE =
ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(),
/*DiscardedValue*/ true);
return FullExprArg(FE.get());
}
StmtResult ActOnExprStmt(ExprResult Arg);
StmtResult ActOnExprStmtError();
StmtResult ActOnHlslDiscardStmt(SourceLocation Loc); // HLSL Change
StmtResult ActOnNullStmt(SourceLocation SemiLoc,
bool HasLeadingEmptyMacro = false);
void ActOnStartOfCompoundStmt();
void ActOnFinishOfCompoundStmt();
StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R,
ArrayRef<Stmt *> Elts, bool isStmtExpr);
/// \brief A RAII object to enter scope of a compound statement.
class CompoundScopeRAII {
public:
CompoundScopeRAII(Sema &S): S(S) {
S.ActOnStartOfCompoundStmt();
}
~CompoundScopeRAII() {
S.ActOnFinishOfCompoundStmt();
}
private:
Sema &S;
};
/// An RAII helper that pops function a function scope on exit.
struct FunctionScopeRAII {
Sema &S;
bool Active;
FunctionScopeRAII(Sema &S) : S(S), Active(true) {}
~FunctionScopeRAII() {
if (Active)
S.PopFunctionScopeInfo();
}
void disable() { Active = false; }
};
StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ActOnForEachDeclStmt(DeclGroupPtrTy Decl);
StmtResult ActOnForEachLValueExpr(Expr *E);
StmtResult ActOnCaseStmt(SourceLocation CaseLoc, Expr *LHSVal,
SourceLocation DotDotDotLoc, Expr *RHSVal,
SourceLocation ColonLoc);
void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt);
StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc,
SourceLocation ColonLoc,
Stmt *SubStmt, Scope *CurScope);
StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
SourceLocation ColonLoc, Stmt *SubStmt);
StmtResult ActOnAttributedStmt(SourceLocation AttrLoc,
ArrayRef<const Attr*> Attrs,
Stmt *SubStmt);
StmtResult ActOnIfStmt(SourceLocation IfLoc,
FullExprArg CondVal, Decl *CondVar,
Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc,
Expr *Cond,
Decl *CondVar);
StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc,
Stmt *Switch, Stmt *Body);
StmtResult ActOnWhileStmt(SourceLocation WhileLoc,
FullExprArg Cond,
Decl *CondVar, Stmt *Body);
StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body,
SourceLocation WhileLoc,
SourceLocation CondLParen, Expr *Cond,
SourceLocation CondRParen);
StmtResult ActOnForStmt(SourceLocation ForLoc,
SourceLocation LParenLoc,
Stmt *First, FullExprArg Second,
Decl *SecondVar,
FullExprArg Third,
SourceLocation RParenLoc,
Stmt *Body);
ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc,
Expr *collection);
StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc,
Stmt *First, Expr *collection,
SourceLocation RParenLoc);
StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body);
enum BuildForRangeKind {
/// Initial building of a for-range statement.
BFRK_Build,
/// Instantiation or recovery rebuild of a for-range statement. Don't
/// attempt any typo-correction.
BFRK_Rebuild,
/// Determining whether a for-range statement could be built. Avoid any
/// unnecessary or irreversible actions.
BFRK_Check
};
StmtResult ActOnCXXForRangeStmt(SourceLocation ForLoc, Stmt *LoopVar,
SourceLocation ColonLoc, Expr *Collection,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc,
SourceLocation ColonLoc,
Stmt *RangeDecl, Stmt *BeginEndDecl,
Expr *Cond, Expr *Inc,
Stmt *LoopVarDecl,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body);
StmtResult ActOnGotoStmt(SourceLocation GotoLoc,
SourceLocation LabelLoc,
LabelDecl *TheDecl);
StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc,
SourceLocation StarLoc,
Expr *DestExp);
StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope);
StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope);
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind, unsigned NumParams);
typedef std::pair<StringRef, QualType> CapturedParamNameType;
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind,
ArrayRef<CapturedParamNameType> Params);
StmtResult ActOnCapturedRegionEnd(Stmt *S);
void ActOnCapturedRegionError();
RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD,
SourceLocation Loc,
unsigned NumParams);
VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E,
bool AllowFunctionParameters);
bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD,
bool AllowFunctionParameters);
StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
Scope *CurScope);
StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
bool IsVolatile, unsigned NumOutputs,
unsigned NumInputs, IdentifierInfo **Names,
MultiExprArg Constraints, MultiExprArg Exprs,
Expr *AsmString, MultiExprArg Clobbers,
SourceLocation RParenLoc);
ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Id,
llvm::InlineAsmIdentifierInfo &Info,
bool IsUnevaluatedContext);
bool LookupInlineAsmField(StringRef Base, StringRef Member,
unsigned &Offset, SourceLocation AsmLoc);
StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc,
ArrayRef<Token> AsmToks,
StringRef AsmString,
unsigned NumOutputs, unsigned NumInputs,
ArrayRef<StringRef> Constraints,
ArrayRef<StringRef> Clobbers,
ArrayRef<Expr*> Exprs,
SourceLocation EndLoc);
LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName,
SourceLocation Location,
bool AlwaysCreate);
VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType,
SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
bool Invalid = false);
Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D);
StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen,
Decl *Parm, Stmt *Body);
StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body);
StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try,
MultiStmtArg Catch, Stmt *Finally);
StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw);
StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw,
Scope *CurScope);
ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc,
Expr *operand);
StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc,
Expr *SynchExpr,
Stmt *SynchBody);
StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body);
VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo,
SourceLocation StartLoc,
SourceLocation IdLoc,
IdentifierInfo *Id);
Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D);
StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc,
Decl *ExDecl, Stmt *HandlerBlock);
StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
ArrayRef<Stmt *> Handlers);
StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ?
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
StmtResult ActOnSEHExceptBlock(SourceLocation Loc,
Expr *FilterExpr,
Stmt *Block);
void ActOnStartSEHFinallyBlock();
void ActOnAbortSEHFinallyBlock();
StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block);
StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope);
void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock);
bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const;
/// \brief If it's a file scoped decl that must warn if not used, keep track
/// of it.
void MarkUnusedFileScopedDecl(const DeclaratorDecl *D);
/// DiagnoseUnusedExprResult - If the statement passed in is an expression
/// whose result is unused, warn.
void DiagnoseUnusedExprResult(const Stmt *S);
void DiagnoseUnusedNestedTypedefs(const RecordDecl *D);
void DiagnoseUnusedDecl(const NamedDecl *ND);
/// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null
/// statement as a \p Body, and it is located on the same line.
///
/// This helps prevent bugs due to typos, such as:
/// if (condition);
/// do_stuff();
void DiagnoseEmptyStmtBody(SourceLocation StmtLoc,
const Stmt *Body,
unsigned DiagID);
/// Warn if a for/while loop statement \p S, which is followed by
/// \p PossibleBody, has a suspicious null statement as a body.
void DiagnoseEmptyLoopBody(const Stmt *S,
const Stmt *PossibleBody);
/// Warn if a value is moved to itself.
void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr,
SourceLocation OpLoc);
ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) {
return DelayedDiagnostics.push(pool);
}
void PopParsingDeclaration(ParsingDeclState state, Decl *decl);
typedef ProcessingContextState ParsingClassState;
ParsingClassState PushParsingClass() {
return DelayedDiagnostics.pushUndelayed();
}
void PopParsingClass(ParsingClassState state) {
DelayedDiagnostics.popUndelayed(state);
}
void redelayDiagnostics(sema::DelayedDiagnosticPool &pool);
enum AvailabilityDiagnostic { AD_Deprecation, AD_Unavailable, AD_Partial };
void EmitAvailabilityWarning(AvailabilityDiagnostic AD,
NamedDecl *D, StringRef Message,
SourceLocation Loc,
const ObjCInterfaceDecl *UnknownObjCClass,
const ObjCPropertyDecl *ObjCProperty,
bool ObjCPropertyAccess);
bool makeUnavailableInSystemHeader(SourceLocation loc,
StringRef message);
//===--------------------------------------------------------------------===//
// Expression Parsing Callbacks: SemaExpr.cpp.
bool CanUseDecl(NamedDecl *D);
bool DiagnoseUseOfDecl(NamedDecl *D, SourceLocation Loc,
const ObjCInterfaceDecl *UnknownObjCClass=nullptr,
bool ObjCPropertyAccess=false);
void NoteDeletedFunction(FunctionDecl *FD);
std::string getDeletedOrUnavailableSuffix(const FunctionDecl *FD);
bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD,
ObjCMethodDecl *Getter,
SourceLocation Loc);
void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
ArrayRef<Expr *> Args);
void PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext,
Decl *LambdaContextDecl = nullptr,
bool IsDecltype = false);
enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl };
void PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext,
ReuseLambdaContextDecl_t,
bool IsDecltype = false);
void PopExpressionEvaluationContext();
void DiscardCleanupsInEvaluationContext();
ExprResult TransformToPotentiallyEvaluated(Expr *E);
ExprResult HandleExprEvaluationContextForTypeof(Expr *E);
ExprResult ActOnConstantExpression(ExprResult Res);
// Functions for marking a declaration referenced. These functions also
// contain the relevant logic for marking if a reference to a function or
// variable is an odr-use (in the C++11 sense). There are separate variants
// for expressions referring to a decl; these exist because odr-use marking
// needs to be delayed for some constant variables when we build one of the
// named expressions.
void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool OdrUse);
void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
bool OdrUse = true);
void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var);
void MarkDeclRefReferenced(DeclRefExpr *E);
void MarkMemberReferenced(MemberExpr *E);
void UpdateMarkingForLValueToRValue(Expr *E);
void CleanupVarDeclMarking();
enum TryCaptureKind {
TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef
};
/// \brief Try to capture the given variable.
///
/// \param Var The variable to capture.
///
/// \param Loc The location at which the capture occurs.
///
/// \param Kind The kind of capture, which may be implicit (for either a
/// block or a lambda), or explicit by-value or by-reference (for a lambda).
///
/// \param EllipsisLoc The location of the ellipsis, if one is provided in
/// an explicit lambda capture.
///
/// \param BuildAndDiagnose Whether we are actually supposed to add the
/// captures or diagnose errors. If false, this routine merely check whether
/// the capture can occur without performing the capture itself or complaining
/// if the variable cannot be captured.
///
/// \param CaptureType Will be set to the type of the field used to capture
/// this variable in the innermost block or lambda. Only valid when the
/// variable can be captured.
///
/// \param DeclRefType Will be set to the type of a reference to the capture
/// from within the current scope. Only valid when the variable can be
/// captured.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// variables that may or may not be used in certain specializations of
/// a nested generic lambda.
///
/// \returns true if an error occurred (i.e., the variable cannot be
/// captured) and false if the capture succeeded.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind,
SourceLocation EllipsisLoc, bool BuildAndDiagnose,
QualType &CaptureType,
QualType &DeclRefType,
const unsigned *const FunctionScopeIndexToStopAt);
/// \brief Try to capture the given variable.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc,
TryCaptureKind Kind = TryCapture_Implicit,
SourceLocation EllipsisLoc = SourceLocation());
/// \brief Checks if the variable must be captured.
bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc);
/// \brief Given a variable, determine the type that a reference to that
/// variable will have in the given scope.
QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc);
void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T);
void MarkDeclarationsReferencedInExpr(Expr *E,
bool SkipLocalVariables = false);
/// \brief Try to recover by turning the given expression into a
/// call. Returns true if recovery was attempted or an error was
/// emitted; this may also leave the ExprResult invalid.
bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
bool ForceComplain = false,
bool (*IsPlausibleResult)(QualType) = nullptr);
/// \brief Figure out if an expression could be turned into a call.
bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
UnresolvedSetImpl &NonTemplateOverloads);
/// \brief Conditionally issue a diagnostic based on the current
/// evaluation context.
///
/// \param Statement If Statement is non-null, delay reporting the
/// diagnostic until the function body is parsed, and then do a basic
/// reachability analysis to determine if the statement is reachable.
/// If it is unreachable, the diagnostic will not be emitted.
bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement,
const PartialDiagnostic &PD);
// Primary Expressions.
SourceRange getExprRange(Expr *E) const;
ExprResult ActOnIdExpression(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand,
std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr,
bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr);
void DecomposeUnqualifiedId(const UnqualifiedId &Id,
TemplateArgumentListInfo &Buffer,
DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *&TemplateArgs);
bool
DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
std::unique_ptr<CorrectionCandidateCallback> CCC,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr);
ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S,
IdentifierInfo *II,
bool AllowBuiltinCreation=false);
ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
bool isAddressOfOperand,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty,
ExprValueKind VK,
SourceLocation Loc,
const CXXScopeSpec *SS = nullptr);
ExprResult
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
const CXXScopeSpec *SS = nullptr,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
ExprResult
BuildAnonymousStructUnionMemberReference(
const CXXScopeSpec &SS,
SourceLocation nameLoc,
IndirectFieldDecl *indirectField,
DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none),
Expr *baseObjectExpr = nullptr,
SourceLocation opLoc = SourceLocation());
ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
bool IsDefiniteInstance);
bool UseArgumentDependentLookup(const CXXScopeSpec &SS,
const LookupResult &R,
bool HasTrailingLParen);
ExprResult BuildQualifiedDeclarationNameExpr(
CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo,
bool IsAddressOfOperand, TypeSourceInfo **RecoveryTSI = nullptr);
ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS,
LookupResult &R,
bool NeedsADL,
bool AcceptInvalidDecl = false);
ExprResult BuildDeclarationNameExpr(
const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr,
bool AcceptInvalidDecl = false);
ExprResult BuildLiteralOperatorCall(LookupResult &R,
DeclarationNameInfo &SuffixInfo,
ArrayRef<Expr *> Args,
SourceLocation LitEndLoc,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
ExprResult BuildPredefinedExpr(SourceLocation Loc,
PredefinedExpr::IdentType IT);
ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind);
ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val);
bool CheckLoopHintExpr(Expr *E, SourceLocation Loc);
ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr);
ExprResult ActOnCharacterConstant(const Token &Tok,
Scope *UDLScope = nullptr);
ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E);
ExprResult ActOnParenListExpr(SourceLocation L,
SourceLocation R,
MultiExprArg Val);
/// ActOnStringLiteral - The specified tokens were lexed as pasted string
/// fragments (e.g. "foo" "bar" L"baz").
ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks,
Scope *UDLScope = nullptr);
ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<ParsedType> ArgTypes,
ArrayRef<Expr *> ArgExprs);
ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<TypeSourceInfo *> Types,
ArrayRef<Expr *> Exprs);
// Binary/Unary Operators. 'Tok' is the token for the operator.
ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
Expr *InputExpr);
ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opc, Expr *Input);
ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Op, Expr *Input);
QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc);
ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
SourceRange R);
ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind);
ExprResult
ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
bool IsType, void *TyOrEx,
const SourceRange &ArgRange);
ExprResult CheckPlaceholderExpr(Expr *E);
bool CheckVecStepExpr(Expr *E);
bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind);
bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc,
SourceRange ExprRange,
UnaryExprOrTypeTrait ExprKind);
ExprResult ActOnSizeofParameterPackExpr(Scope *S,
SourceLocation OpLoc,
IdentifierInfo &Name,
SourceLocation NameLoc,
SourceLocation RParenLoc);
ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Kind, Expr *Input);
ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
// This struct is for use by ActOnMemberAccess to allow
// BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after
// changing the access operator from a '.' to a '->' (to see if that is the
// change needed to fix an error about an unknown member, e.g. when the class
// defines a custom operator->).
struct ActOnMemberAccessExtraArgs {
Scope *S;
UnqualifiedId &Id;
Decl *ObjCImpDecl;
};
ExprResult BuildMemberReferenceExpr(
Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow,
CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult
BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc,
bool IsArrow, const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
bool SuppressQualifierCheck = false,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow);
bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType,
const CXXScopeSpec &SS,
const LookupResult &R);
ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType,
bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Member,
Decl *ObjCImpDecl);
void ActOnDefaultCtorInitializers(Decl *CDtorDecl);
bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
FunctionDecl *FDecl,
const FunctionProtoType *Proto,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
bool ExecConfig = false);
void CheckStaticArrayArgument(SourceLocation CallLoc,
ParmVarDecl *Param,
const Expr *ArgExpr);
/// ActOnCallExpr - Handle a call to Fn with the specified array of arguments.
/// This provides the location of the left/right parens and a list of comma
/// locations.
ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr,
bool IsExecConfig = false);
ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
SourceLocation LParenLoc,
ArrayRef<Expr *> Arg,
SourceLocation RParenLoc,
Expr *Config = nullptr,
bool IsExecConfig = false);
ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
MultiExprArg ExecConfig,
SourceLocation GGGLoc);
ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
Declarator &D, ParsedType &Ty,
SourceLocation RParenLoc, Expr *CastExpr);
ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc,
TypeSourceInfo *Ty,
SourceLocation RParenLoc,
Expr *Op);
CastKind PrepareScalarCast(ExprResult &src, QualType destType);
/// \brief Build an altivec or OpenCL literal.
ExprResult BuildVectorLiteral(SourceLocation LParenLoc,
SourceLocation RParenLoc, Expr *E,
TypeSourceInfo *TInfo);
ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME);
ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc,
Expr *InitExpr);
ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc,
TypeSourceInfo *TInfo,
SourceLocation RParenLoc,
Expr *LiteralExpr);
ExprResult ActOnInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult ActOnDesignatedInitializer(Designation &Desig,
SourceLocation Loc,
bool GNUSyntax,
ExprResult Init);
private:
static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind);
public:
ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc,
tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr);
ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr);
ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc,
Expr *LHSExpr, Expr *RHSExpr);
/// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null
/// in the case of a the GNU conditional expr extension.
ExprResult ActOnConditionalOp(SourceLocation QuestionLoc,
SourceLocation ColonLoc,
Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr);
/// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo".
ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc,
LabelDecl *TheDecl);
void ActOnStartStmtExpr();
ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc); // "({..})"
void ActOnStmtExprError();
// __builtin_offsetof(type, identifier(.identifier|[expr])*)
struct OffsetOfComponent {
SourceLocation LocStart, LocEnd;
bool isBrackets; // true if [expr], false if .ident
union {
IdentifierInfo *IdentInfo;
Expr *E;
} U;
};
/// __builtin_offsetof(type, a.b[123][456].c)
ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
TypeSourceInfo *TInfo,
OffsetOfComponent *CompPtr,
unsigned NumComponents,
SourceLocation RParenLoc);
ExprResult ActOnBuiltinOffsetOf(Scope *S,
SourceLocation BuiltinLoc,
SourceLocation TypeLoc,
ParsedType ParsedArgTy,
OffsetOfComponent *CompPtr,
unsigned NumComponents,
SourceLocation RParenLoc);
// __builtin_choose_expr(constExpr, expr1, expr2)
ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc,
Expr *CondExpr, Expr *LHSExpr,
Expr *RHSExpr, SourceLocation RPLoc);
// __builtin_va_arg(expr, type)
ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty,
SourceLocation RPLoc);
ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E,
TypeSourceInfo *TInfo, SourceLocation RPLoc);
// __null
ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc);
bool CheckCaseExpression(Expr *E);
/// \brief Describes the result of an "if-exists" condition check.
enum IfExistsResult {
/// \brief The symbol exists.
IER_Exists,
/// \brief The symbol does not exist.
IER_DoesNotExist,
/// \brief The name is a dependent name, so the results will differ
/// from one instantiation to the next.
IER_Dependent,
/// \brief An error occurred.
IER_Error
};
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS,
const DeclarationNameInfo &TargetNameInfo);
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc,
bool IsIfExists, CXXScopeSpec &SS,
UnqualifiedId &Name);
StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
NestedNameSpecifierLoc QualifierLoc,
DeclarationNameInfo NameInfo,
Stmt *Nested);
StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
CXXScopeSpec &SS, UnqualifiedId &Name,
Stmt *Nested);
//===------------------------- "Block" Extension ------------------------===//
/// ActOnBlockStart - This callback is invoked when a block literal is
/// started.
void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockArguments - This callback allows processing of block arguments.
/// If there are no arguments, this is still invoked.
void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
Scope *CurScope);
/// ActOnBlockError - If there is an error parsing a block, this callback
/// is invoked to pop the information about the block from the action impl.
void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockStmtExpr - This is called when the body of a block statement
/// literal was successfully completed. ^(int x){...}
ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body,
Scope *CurScope);
//===---------------------------- Clang Extensions ----------------------===//
/// __builtin_convertvector(...)
ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- OpenCL Features -----------------------===//
/// __builtin_astype(...)
ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
// HLSL Change Starts
//===---------------------------- HLSL Features -------------------------===//
/// cbuffer/tbuffer
llvm::SmallVector<Decl*, 1> HLSLBuffers;
Decl* ActOnStartHLSLBuffer(Scope* bufferScope, bool cbuffer, SourceLocation KwLoc,
IdentifierInfo *Ident, SourceLocation IdentLoc,
std::vector<hlsl::UnusualAnnotation *>& BufferAttributes,
SourceLocation LBrace);
void ActOnFinishHLSLBuffer(Decl *Dcl, SourceLocation RBrace);
Decl* getActiveHLSLBuffer() const;
void ActOnStartHLSLBufferView();
bool IsOnHLSLBufferView();
Decl *ActOnHLSLBufferView(Scope *bufferScope, SourceLocation KwLoc,
DeclGroupPtrTy &dcl, bool iscbuf);
// HLSL Change Ends
//===---------------------------- C++ Features --------------------------===//
// Act on C++ namespaces
Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc,
SourceLocation NamespaceLoc,
SourceLocation IdentLoc,
IdentifierInfo *Ident,
SourceLocation LBrace,
AttributeList *AttrList);
void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace);
NamespaceDecl *getStdNamespace() const;
NamespaceDecl *getOrCreateStdNamespace();
CXXRecordDecl *getStdBadAlloc() const;
/// \brief Tests whether Ty is an instance of std::initializer_list and, if
/// it is and Element is not NULL, assigns the element type to Element.
bool isStdInitializerList(QualType Ty, QualType *Element);
/// \brief Looks for the std::initializer_list template and instantiates it
/// with Element, or emits an error if it's not found.
///
/// \returns The instantiated template, or null on error.
QualType BuildStdInitializerList(QualType Element, SourceLocation Loc);
/// \brief Determine whether Ctor is an initializer-list constructor, as
/// defined in [dcl.init.list]p2.
bool isInitListConstructor(const CXXConstructorDecl *Ctor);
Decl *ActOnUsingDirective(Scope *CurScope,
SourceLocation UsingLoc,
SourceLocation NamespcLoc,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *NamespcName,
AttributeList *AttrList);
void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir);
Decl *ActOnNamespaceAliasDef(Scope *CurScope,
SourceLocation NamespaceLoc,
SourceLocation AliasLoc,
IdentifierInfo *Alias,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *Ident);
void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow);
bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target,
const LookupResult &PreviousDecls,
UsingShadowDecl *&PrevShadow);
UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD,
NamedDecl *Target,
UsingShadowDecl *PrevDecl);
bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
bool HasTypenameKeyword,
const CXXScopeSpec &SS,
SourceLocation NameLoc,
const LookupResult &Previous);
bool CheckUsingDeclQualifier(SourceLocation UsingLoc,
const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
SourceLocation NameLoc);
NamedDecl *BuildUsingDeclaration(Scope *S, AccessSpecifier AS,
SourceLocation UsingLoc,
CXXScopeSpec &SS,
DeclarationNameInfo NameInfo,
AttributeList *AttrList,
bool IsInstantiation,
bool HasTypenameKeyword,
SourceLocation TypenameLoc);
bool CheckInheritingConstructorUsingDecl(UsingDecl *UD);
Decl *ActOnUsingDeclaration(Scope *CurScope,
AccessSpecifier AS,
bool HasUsingKeyword,
SourceLocation UsingLoc,
CXXScopeSpec &SS,
UnqualifiedId &Name,
AttributeList *AttrList,
bool HasTypenameKeyword,
SourceLocation TypenameLoc);
Decl *ActOnAliasDeclaration(Scope *CurScope,
AccessSpecifier AS,
MultiTemplateParamsArg TemplateParams,
SourceLocation UsingLoc,
UnqualifiedId &Name,
AttributeList *AttrList,
TypeResult Type,
Decl *DeclFromDeclSpec);
/// BuildCXXConstructExpr - Creates a complete call to a constructor,
/// including handling of its default argument expressions.
///
/// \param ConstructKind - a CXXConstructExpr::ConstructionKind
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CXXConstructorDecl *Constructor, MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
// FIXME: Can we remove this and have the above BuildCXXConstructExpr check if
// the constructor can be elidable?
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs, bool HadMultipleCandidates,
bool IsListInitialization,
bool IsStdInitListInitialization, bool RequiresZeroInit,
unsigned ConstructKind, SourceRange ParenRange);
ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field);
/// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating
/// the default expr if needed.
ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc,
FunctionDecl *FD,
ParmVarDecl *Param);
/// FinalizeVarWithDestructor - Prepare for calling destructor on the
/// constructed variable.
void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType);
/// \brief Helper class that collects exception specifications for
/// implicitly-declared special member functions.
class ImplicitExceptionSpecification {
// Pointer to allow copying
Sema *Self;
// We order exception specifications thus:
// noexcept is the most restrictive, but is only used in C++11.
// throw() comes next.
// Then a throw(collected exceptions)
// Finally no specification, which is expressed as noexcept(false).
// throw(...) is used instead if any called function uses it.
ExceptionSpecificationType ComputedEST;
llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen;
SmallVector<QualType, 4> Exceptions;
void ClearExceptions() {
ExceptionsSeen.clear();
Exceptions.clear();
}
public:
explicit ImplicitExceptionSpecification(Sema &Self)
: Self(&Self), ComputedEST(EST_BasicNoexcept) {
if (!Self.getLangOpts().CPlusPlus11)
ComputedEST = EST_DynamicNone;
}
/// \brief Get the computed exception specification type.
ExceptionSpecificationType getExceptionSpecType() const {
assert(ComputedEST != EST_ComputedNoexcept &&
"noexcept(expr) should not be a possible result");
return ComputedEST;
}
/// \brief The number of exceptions in the exception specification.
unsigned size() const { return Exceptions.size(); }
/// \brief The set of exceptions in the exception specification.
const QualType *data() const { return Exceptions.data(); }
/// \brief Integrate another called method into the collected data.
void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method);
/// \brief Integrate an invoked expression into the collected data.
void CalledExpr(Expr *E);
/// \brief Overwrite an EPI's exception specification with this
/// computed exception specification.
FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const {
FunctionProtoType::ExceptionSpecInfo ESI;
ESI.Type = getExceptionSpecType();
if (ESI.Type == EST_Dynamic) {
ESI.Exceptions = Exceptions;
} else if (ESI.Type == EST_None) {
/// C++11 [except.spec]p14:
/// The exception-specification is noexcept(false) if the set of
/// potential exceptions of the special member function contains "any"
ESI.Type = EST_ComputedNoexcept;
ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(),
tok::kw_false).get();
}
return ESI;
}
};
/// \brief Determine what sort of exception specification a defaulted
/// copy constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc,
CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification a defaulted
/// default constructor of a class will have, and whether the parameter
/// will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification a defautled
/// copy assignment operator of a class will have, and whether the
/// parameter will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification a defaulted move
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification a defaulted move
/// assignment operator of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification a defaulted
/// destructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification an inheriting
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeInheritingCtorExceptionSpec(CXXConstructorDecl *CD);
/// \brief Evaluate the implicit exception specification for a defaulted
/// special member function.
void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD);
/// \brief Check the given exception-specification and update the
/// exception specification information with the results.
void checkExceptionSpecification(bool IsTopLevel,
ExceptionSpecificationType EST,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr,
SmallVectorImpl<QualType> &Exceptions,
FunctionProtoType::ExceptionSpecInfo &ESI);
/// \brief Determine if we're in a case where we need to (incorrectly) eagerly
/// parse an exception specification to work around a libstdc++ bug.
bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D);
/// \brief Add an exception-specification to the given member function
/// (or member function template). The exception-specification was parsed
/// after the method itself was declared.
void actOnDelayedExceptionSpecification(Decl *Method,
ExceptionSpecificationType EST,
SourceRange SpecificationRange,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr);
/// \brief Determine if a special member function should have a deleted
/// definition when it is defaulted.
bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
bool Diagnose = false);
/// \brief Declare the implicit default constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// default constructor will be added.
///
/// \returns The implicitly-declared default constructor.
CXXConstructorDecl *DeclareImplicitDefaultConstructor(
CXXRecordDecl *ClassDecl);
/// DefineImplicitDefaultConstructor - Checks for feasibility of
/// defining this constructor as the default constructor.
void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// \brief Declare the implicit destructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// destructor will be added.
///
/// \returns The implicitly-declared destructor.
CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitDestructor - Checks for feasibility of
/// defining this destructor as the default destructor.
void DefineImplicitDestructor(SourceLocation CurrentLocation,
CXXDestructorDecl *Destructor);
/// \brief Build an exception spec for destructors that don't have one.
///
/// C++11 says that user-defined destructors with no exception spec get one
/// that looks as if the destructor was implicitly declared.
void AdjustDestructorExceptionSpec(CXXRecordDecl *ClassDecl,
CXXDestructorDecl *Destructor);
/// \brief Declare all inheriting constructors for the given class.
///
/// \param ClassDecl The class declaration into which the inheriting
/// constructors will be added.
void DeclareInheritingConstructors(CXXRecordDecl *ClassDecl);
/// \brief Define the specified inheriting constructor.
void DefineInheritingConstructor(SourceLocation UseLoc,
CXXConstructorDecl *Constructor);
/// \brief Declare the implicit copy constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy constructor will be added.
///
/// \returns The implicitly-declared copy constructor.
CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitCopyConstructor - Checks for feasibility of
/// defining this constructor as the copy constructor.
void DefineImplicitCopyConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// \brief Declare the implicit move constructor for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move constructor will be added.
///
/// \returns The implicitly-declared move constructor, or NULL if it wasn't
/// declared.
CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitMoveConstructor - Checks for feasibility of
/// defining this constructor as the move constructor.
void DefineImplicitMoveConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// \brief Declare the implicit copy assignment operator for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy assignment operator will be added.
///
/// \returns The implicitly-declared copy assignment operator.
CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl);
/// \brief Defines an implicitly-declared copy assignment operator.
void DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// \brief Declare the implicit move assignment operator for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move assignment operator will be added.
///
/// \returns The implicitly-declared move assignment operator, or NULL if it
/// wasn't declared.
CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl);
/// \brief Defines an implicitly-declared move assignment operator.
void DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// \brief Force the declaration of any implicitly-declared members of this
/// class.
void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class);
/// \brief Determine whether the given function is an implicitly-deleted
/// special member function.
bool isImplicitlyDeleted(FunctionDecl *FD);
/// \brief Check whether 'this' shows up in the type of a static member
/// function after the (naturally empty) cv-qualifier-seq would be.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method);
/// \brief Whether this' shows up in the exception specification of a static
/// member function.
bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method);
/// \brief Check whether 'this' shows up in the attributes of the given
/// static member function.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method);
/// MaybeBindToTemporary - If the passed in expression has a record type with
/// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise
/// it simply returns the passed in expression.
ExprResult MaybeBindToTemporary(Expr *E);
bool CompleteConstructorCall(CXXConstructorDecl *Constructor,
MultiExprArg ArgsPtr,
SourceLocation Loc,
SmallVectorImpl<Expr*> &ConvertedArgs,
bool AllowExplicit = false,
bool IsListInitialization = false);
ParsedType getInheritingConstructorName(CXXScopeSpec &SS,
SourceLocation NameLoc,
IdentifierInfo &Name);
ParsedType getDestructorName(SourceLocation TildeLoc,
IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
ParsedType ObjectType,
bool EnteringContext);
ParsedType getDestructorType(const DeclSpec& DS, ParsedType ObjectType);
// Checks that reinterpret casts don't have undefined behavior.
void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType,
bool IsDereference, SourceRange Range);
/// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's.
ExprResult ActOnCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
SourceLocation LAngleBracketLoc,
Declarator &D,
SourceLocation RAngleBracketLoc,
SourceLocation LParenLoc,
Expr *E,
SourceLocation RParenLoc);
ExprResult BuildCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
TypeSourceInfo *Ty,
Expr *E,
SourceRange AngleBrackets,
SourceRange Parens);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXTypeid - Parse typeid( something ).
ExprResult ActOnCXXTypeid(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXUuidof - Parse __uuidof( something ).
ExprResult ActOnCXXUuidof(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
/// \brief Handle a C++1z fold-expression: ( expr op ... op expr ).
ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
tok::TokenKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc);
ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
BinaryOperatorKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc);
ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc,
BinaryOperatorKind Operator);
//// ActOnCXXThis - Parse 'this' pointer.
ExprResult ActOnCXXThis(SourceLocation loc);
/// \brief Try to retrieve the type of the 'this' pointer.
///
/// \returns The type of 'this', if possible. Otherwise, returns a NULL type.
QualType getCurrentThisType();
/// \brief When non-NULL, the C++ 'this' expression is allowed despite the
/// current context not being a non-static member function. In such cases,
/// this provides the type used for 'this'.
QualType CXXThisTypeOverride;
/// \brief RAII object used to temporarily allow the C++ 'this' expression
/// to be used, with the given qualifiers on the current class type.
class CXXThisScopeRAII {
Sema &S;
QualType OldCXXThisTypeOverride;
bool Enabled;
public:
/// \brief Introduce a new scope where 'this' may be allowed (when enabled),
/// using the given declaration (which is either a class template or a
/// class) along with the given qualifiers.
/// along with the qualifiers placed on '*this'.
CXXThisScopeRAII(Sema &S, Decl *ContextDecl, unsigned CXXThisTypeQuals,
bool Enabled = true);
~CXXThisScopeRAII();
};
/// \brief Make sure the value of 'this' is actually available in the current
/// context, if it is a potentially evaluated context.
///
/// \param Loc The location at which the capture of 'this' occurs.
///
/// \param Explicit Whether 'this' is explicitly captured in a lambda
/// capture list.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// 'this' that may or may not be used in certain specializations of
/// a nested generic lambda (depending on whether the name resolves to
/// a non-static member function or a static function).
/// \return returns 'true' if failed, 'false' if success.
bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false,
bool BuildAndDiagnose = true,
const unsigned *const FunctionScopeIndexToStopAt = nullptr);
/// \brief Determine whether the given type is the type of *this that is used
/// outside of the body of a member function for a type that is currently
/// being defined.
bool isThisOutsideMemberFunctionBody(QualType BaseType);
/// ActOnCXXBoolLiteral - Parse {true,false} literals.
ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
/// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals.
ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
/// ActOnCXXNullPtrLiteral - Parse 'nullptr'.
ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc);
//// ActOnCXXThrow - Parse throw expressions.
ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr);
ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
bool IsThrownVarInScope);
bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E);
/// ActOnCXXTypeConstructExpr - Parse construction of a specified type.
/// Can be interpreted either as function-style casting ("int(x)")
/// or class type construction ("ClassType(x,y,z)")
/// or creation of a value-initialized type ("int()").
ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep,
SourceLocation LParenLoc,
MultiExprArg Exprs,
SourceLocation RParenLoc);
ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type,
SourceLocation LParenLoc,
MultiExprArg Exprs,
SourceLocation RParenLoc);
/// ActOnCXXNew - Parsed a C++ 'new' expression.
ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens, Declarator &D,
Expr *Initializer);
ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens,
QualType AllocType,
TypeSourceInfo *AllocTypeInfo,
Expr *ArraySize,
SourceRange DirectInitRange,
Expr *Initializer,
bool TypeMayContainAuto = true);
bool CheckAllocatedType(QualType AllocType, SourceLocation Loc,
SourceRange R);
bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
bool UseGlobal, QualType AllocType, bool IsArray,
MultiExprArg PlaceArgs,
FunctionDecl *&OperatorNew,
FunctionDecl *&OperatorDelete);
bool FindAllocationOverload(SourceLocation StartLoc, SourceRange Range,
DeclarationName Name, MultiExprArg Args,
DeclContext *Ctx,
bool AllowMissing, FunctionDecl *&Operator,
bool Diagnose = true);
void DeclareGlobalNewDelete();
void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return,
QualType Param1,
QualType Param2 = QualType(),
bool addRestrictAttr = false);
bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
DeclarationName Name, FunctionDecl* &Operator,
bool Diagnose = true);
FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc,
bool CanProvideSize,
DeclarationName Name);
/// ActOnCXXDelete - Parsed a C++ 'delete' expression
ExprResult ActOnCXXDelete(SourceLocation StartLoc,
bool UseGlobal, bool ArrayForm,
Expr *Operand);
DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D);
ExprResult CheckConditionVariable(VarDecl *ConditionVar,
SourceLocation StmtLoc,
bool ConvertToBoolean);
ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen,
Expr *Operand, SourceLocation RParen);
ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand,
SourceLocation RParen);
/// \brief Parsed one of the type trait support pseudo-functions.
ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<ParsedType> Args,
SourceLocation RParenLoc);
ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<TypeSourceInfo *> Args,
SourceLocation RParenLoc);
/// ActOnArrayTypeTrait - Parsed one of the bianry type trait support
/// pseudo-functions.
ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
ParsedType LhsTy,
Expr *DimExpr,
SourceLocation RParen);
ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
TypeSourceInfo *TSInfo,
Expr *DimExpr,
SourceLocation RParen);
/// ActOnExpressionTrait - Parsed one of the unary type trait support
/// pseudo-functions.
ExprResult ActOnExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult BuildExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult ActOnStartCXXMemberReference(Scope *S,
Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
ParsedType &ObjectType,
bool &MayBePseudoDestructor);
ExprResult BuildPseudoDestructorExpr(Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
const CXXScopeSpec &SS,
TypeSourceInfo *ScopeType,
SourceLocation CCLoc,
SourceLocation TildeLoc,
PseudoDestructorTypeStorage DestroyedType);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
UnqualifiedId &FirstTypeName,
SourceLocation CCLoc,
SourceLocation TildeLoc,
UnqualifiedId &SecondTypeName);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
SourceLocation TildeLoc,
const DeclSpec& DS);
/// MaybeCreateExprWithCleanups - If the current full-expression
/// requires any cleanups, surround it with a ExprWithCleanups node.
/// Otherwise, just returns the passed-in expression.
Expr *MaybeCreateExprWithCleanups(Expr *SubExpr);
Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt);
ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr);
ExprResult ActOnFinishFullExpr(Expr *Expr) {
return ActOnFinishFullExpr(Expr, Expr ? Expr->getExprLoc()
: SourceLocation());
}
ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC,
bool DiscardedValue = false,
bool IsConstexpr = false,
bool IsLambdaInitCaptureInitializer = false);
StmtResult ActOnFinishFullStmt(Stmt *Stmt);
// Marks SS invalid if it represents an incomplete type.
bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC);
DeclContext *computeDeclContext(QualType T);
DeclContext *computeDeclContext(const CXXScopeSpec &SS,
bool EnteringContext = false);
bool isDependentScopeSpecifier(const CXXScopeSpec &SS);
CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS);
/// \brief The parser has parsed a global nested-name-specifier '::'.
///
/// \param CCLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS);
/// \brief The parser has parsed a '__super' nested-name-specifier.
///
/// \param SuperLoc The location of the '__super' keyword.
///
/// \param ColonColonLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc,
SourceLocation ColonColonLoc, CXXScopeSpec &SS);
bool isAcceptableNestedNameSpecifier(const NamedDecl *SD,
bool *CanCorrect = nullptr);
NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS);
bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS,
SourceLocation IdLoc,
IdentifierInfo &II,
ParsedType ObjectType);
bool BuildCXXNestedNameSpecifier(Scope *S,
IdentifierInfo &Identifier,
SourceLocation IdentifierLoc,
SourceLocation CCLoc,
QualType ObjectType,
bool EnteringContext,
CXXScopeSpec &SS,
NamedDecl *ScopeLookupResult,
bool ErrorRecoveryLookup,
bool *IsCorrectedToColon = nullptr);
/// \brief The parser has parsed a nested-name-specifier 'identifier::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param Identifier The identifier preceding the '::'.
///
/// \param IdentifierLoc The location of the identifier.
///
/// \param CCLoc The location of the '::'.
///
/// \param ObjectType The type of the object, if we're parsing
/// nested-name-specifier in a member access expression.
///
/// \param EnteringContext Whether we're entering the context nominated by
/// this nested-name-specifier.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param ErrorRecoveryLookup If true, then this method is called to improve
/// error recovery. In this case do not emit error message.
///
/// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':'
/// are allowed. The bool value pointed by this parameter is set to 'true'
/// if the identifier is treated as if it was followed by ':', not '::'.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
IdentifierInfo &Identifier,
SourceLocation IdentifierLoc,
SourceLocation CCLoc,
ParsedType ObjectType,
bool EnteringContext,
CXXScopeSpec &SS,
bool ErrorRecoveryLookup = false,
bool *IsCorrectedToColon = nullptr);
ExprResult ActOnDecltypeExpression(Expr *E);
bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS,
const DeclSpec &DS,
SourceLocation ColonColonLoc);
bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS,
IdentifierInfo &Identifier,
SourceLocation IdentifierLoc,
SourceLocation ColonLoc,
ParsedType ObjectType,
bool EnteringContext);
/// \brief The parser has parsed a nested-name-specifier
/// 'template[opt] template-name < template-args >::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param TemplateKWLoc the location of the 'template' keyword, if any.
/// \param TemplateName the template name.
/// \param TemplateNameLoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
/// \param CCLoc The location of the '::'.
///
/// \param EnteringContext Whether we're entering the context of the
/// nested-name-specifier.
///
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateName,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
SourceLocation CCLoc,
bool EnteringContext);
/// \brief Given a C++ nested-name-specifier, produce an annotation value
/// that the parser can use later to reconstruct the given
/// nested-name-specifier.
///
/// \param SS A nested-name-specifier.
///
/// \returns A pointer containing all of the information in the
/// nested-name-specifier \p SS.
void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS);
/// \brief Given an annotation pointer for a nested-name-specifier, restore
/// the nested-name-specifier structure.
///
/// \param Annotation The annotation pointer, produced by
/// \c SaveNestedNameSpecifierAnnotation().
///
/// \param AnnotationRange The source range corresponding to the annotation.
///
/// \param SS The nested-name-specifier that will be updated with the contents
/// of the annotation pointer.
void RestoreNestedNameSpecifierAnnotation(void *Annotation,
SourceRange AnnotationRange,
CXXScopeSpec &SS);
bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global
/// scope or nested-name-specifier) is parsed, part of a declarator-id.
/// After this method is called, according to [C++ 3.4.3p3], names should be
/// looked up in the declarator-id's scope, until the declarator is parsed and
/// ActOnCXXExitDeclaratorScope is called.
/// The 'SS' should be a non-empty valid CXXScopeSpec.
bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS);
/// ActOnCXXExitDeclaratorScope - Called when a declarator that previously
/// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same
/// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well.
/// Used to indicate that names should revert to being looked up in the
/// defining scope.
void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an
/// initializer for the declaration 'Dcl'.
/// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a
/// static data member of class X, names should be looked up in the scope of
/// class X.
void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl);
/// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an
/// initializer for the declaration 'Dcl'.
void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl);
/// \brief Create a new lambda closure type.
CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange,
TypeSourceInfo *Info,
bool KnownDependent,
LambdaCaptureDefault CaptureDefault);
/// \brief Start the definition of a lambda expression.
CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class,
SourceRange IntroducerRange,
TypeSourceInfo *MethodType,
SourceLocation EndLoc,
ArrayRef<ParmVarDecl *> Params);
/// \brief Endow the lambda scope info with the relevant properties.
void buildLambdaScope(sema::LambdaScopeInfo *LSI,
CXXMethodDecl *CallOperator,
SourceRange IntroducerRange,
LambdaCaptureDefault CaptureDefault,
SourceLocation CaptureDefaultLoc,
bool ExplicitParams,
bool ExplicitResultType,
bool Mutable);
/// \brief Perform initialization analysis of the init-capture and perform
/// any implicit conversions such as an lvalue-to-rvalue conversion if
/// not being used to initialize a reference.
QualType performLambdaInitCaptureInitialization(SourceLocation Loc,
bool ByRef, IdentifierInfo *Id, Expr *&Init);
/// \brief Create a dummy variable within the declcontext of the lambda's
/// call operator, for name lookup purposes for a lambda init capture.
///
/// CodeGen handles emission of lambda captures, ignoring these dummy
/// variables appropriately.
VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc,
QualType InitCaptureType, IdentifierInfo *Id, Expr *Init);
/// \brief Build the implicit field for an init-capture.
FieldDecl *buildInitCaptureField(sema::LambdaScopeInfo *LSI, VarDecl *Var);
/// \brief Note that we have finished the explicit captures for the
/// given lambda.
void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI);
/// \brief Introduce the lambda parameters into scope.
void addLambdaParameters(CXXMethodDecl *CallOperator, Scope *CurScope);
/// \brief Deduce a block or lambda's return type based on the return
/// statements present in the body.
void deduceClosureReturnType(sema::CapturingScopeInfo &CSI);
/// ActOnStartOfLambdaDefinition - This is called just before we start
/// parsing the body of a lambda; it analyzes the explicit captures and
/// arguments, and sets up various data-structures for the body of the
/// lambda.
void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
Declarator &ParamInfo, Scope *CurScope);
/// ActOnLambdaError - If there is an error parsing a lambda, this callback
/// is invoked to pop the information about the lambda.
void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope,
bool IsInstantiation = false);
/// ActOnLambdaExpr - This is called when the body of a lambda expression
/// was successfully completed.
ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
Scope *CurScope);
/// \brief Complete a lambda-expression having processed and attached the
/// lambda body.
ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
sema::LambdaScopeInfo *LSI);
/// \brief Define the "body" of the conversion from a lambda object to a
/// function pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToFunctionPointerConversion(
SourceLocation CurrentLoc, CXXConversionDecl *Conv);
/// \brief Define the "body" of the conversion from a lambda object to a
/// block pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc,
CXXConversionDecl *Conv);
ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation,
SourceLocation ConvLocation,
CXXConversionDecl *Conv,
Expr *Src);
// ParseObjCStringLiteral - Parse Objective-C string literals.
ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs,
Expr **Strings,
unsigned NumStrings);
ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S);
/// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the
/// numeric literal expression. Type of the expression will be "NSNumber *"
/// or "id" if NSNumber is unavailable.
ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number);
ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc,
bool Value);
ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements);
/// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the
/// '@' prefixed parenthesized expression. The type of the expression will
/// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type
/// of ValueType, which is allowed to be a built-in numeric type, "char *",
/// "const char *" or C structure with attribute 'objc_boxable'.
ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr);
ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr,
Expr *IndexExpr,
ObjCMethodDecl *getterMethod,
ObjCMethodDecl *setterMethod);
ExprResult BuildObjCDictionaryLiteral(SourceRange SR,
ObjCDictionaryElement *Elements,
unsigned NumElements);
ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc,
TypeSourceInfo *EncodedTypeInfo,
SourceLocation RParenLoc);
ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl,
CXXConversionDecl *Method,
bool HadMultipleCandidates);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc,
SourceLocation EncodeLoc,
SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc);
/// ParseObjCSelectorExpression - Build selector expression for \@selector
ExprResult ParseObjCSelectorExpression(Selector Sel,
SourceLocation AtLoc,
SourceLocation SelLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc,
bool WarnMultipleSelectors);
/// ParseObjCProtocolExpression - Build protocol expression for \@protocol
ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName,
SourceLocation AtLoc,
SourceLocation ProtoLoc,
SourceLocation LParenLoc,
SourceLocation ProtoIdLoc,
SourceLocation RParenLoc);
//===--------------------------------------------------------------------===//
// C++ Declarations
//
Decl *ActOnStartLinkageSpecification(Scope *S,
SourceLocation ExternLoc,
Expr *LangStr,
SourceLocation LBraceLoc);
Decl *ActOnFinishLinkageSpecification(Scope *S,
Decl *LinkageSpec,
SourceLocation RBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Classes
//
bool isCurrentClassName(const IdentifierInfo &II, Scope *S,
const CXXScopeSpec *SS = nullptr);
bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS);
bool ActOnAccessSpecifier(AccessSpecifier Access,
SourceLocation ASLoc,
SourceLocation ColonLoc,
AttributeList *Attrs = nullptr);
NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS,
Declarator &D,
MultiTemplateParamsArg TemplateParameterLists,
Expr *BitfieldWidth, const VirtSpecifiers &VS,
InClassInitStyle InitStyle);
void ActOnStartCXXInClassMemberInitializer();
void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl,
SourceLocation EqualLoc,
Expr *Init);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
SourceLocation LParenLoc,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
SourceLocation EllipsisLoc);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *InitList,
SourceLocation EllipsisLoc);
MemInitResult BuildMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *Init,
SourceLocation EllipsisLoc);
MemInitResult BuildMemberInitializer(ValueDecl *Member,
Expr *Init,
SourceLocation IdLoc);
MemInitResult BuildBaseInitializer(QualType BaseType,
TypeSourceInfo *BaseTInfo,
Expr *Init,
CXXRecordDecl *ClassDecl,
SourceLocation EllipsisLoc);
MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo,
Expr *Init,
CXXRecordDecl *ClassDecl);
bool SetDelegatingInitializer(CXXConstructorDecl *Constructor,
CXXCtorInitializer *Initializer);
bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors,
ArrayRef<CXXCtorInitializer *> Initializers = None);
void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation);
/// MarkBaseAndMemberDestructorsReferenced - Given a record decl,
/// mark all the non-trivial destructors of its members and bases as
/// referenced.
void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc,
CXXRecordDecl *Record);
/// \brief The list of classes whose vtables have been used within
/// this translation unit, and the source locations at which the
/// first use occurred.
typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse;
/// \brief The list of vtables that are required but have not yet been
/// materialized.
SmallVector<VTableUse, 16> VTableUses;
/// \brief The set of classes whose vtables have been used within
/// this translation unit, and a bit that will be true if the vtable is
/// required to be emitted (otherwise, it should be emitted only if needed
/// by code generation).
llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed;
/// \brief Load any externally-stored vtable uses.
void LoadExternalVTableUses();
/// \brief Note that the vtable for the given class was used at the
/// given location.
void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
bool DefinitionRequired = false);
/// \brief Mark the exception specifications of all virtual member functions
/// in the given class as needed.
void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc,
const CXXRecordDecl *RD);
/// MarkVirtualMembersReferenced - Will mark all members of the given
/// CXXRecordDecl referenced.
void MarkVirtualMembersReferenced(SourceLocation Loc,
const CXXRecordDecl *RD);
/// \brief Define all of the vtables that have been used in this
/// translation unit and reference any virtual members used by those
/// vtables.
///
/// \returns true if any work was done, false otherwise.
bool DefineUsedVTables();
void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl);
void ActOnMemInitializers(Decl *ConstructorDecl,
SourceLocation ColonLoc,
ArrayRef<CXXCtorInitializer*> MemInits,
bool AnyErrors);
void checkClassLevelDLLAttribute(CXXRecordDecl *Class);
void propagateDLLAttrToBaseClassTemplate(
CXXRecordDecl *Class, Attr *ClassAttr,
ClassTemplateSpecializationDecl *BaseTemplateSpec,
SourceLocation BaseLoc);
void CheckCompletedCXXClass(CXXRecordDecl *Record);
void ActOnFinishCXXMemberSpecification(Scope* S, SourceLocation RLoc,
Decl *TagDecl,
SourceLocation LBrac,
SourceLocation RBrac,
AttributeList *AttrList);
void ActOnFinishCXXMemberDecls();
void ActOnFinishCXXMemberDefaultArgs(Decl *D);
void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param);
unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template);
void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param);
void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnFinishDelayedMemberInitializers(Decl *Record);
void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD,
CachedTokens &Toks);
void UnmarkAsLateParsedTemplate(FunctionDecl *FD);
bool IsInsideALocalClassWithinATemplateFunction();
Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
Expr *AssertMessageExpr,
SourceLocation RParenLoc);
Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
StringLiteral *AssertMessageExpr,
SourceLocation RParenLoc,
bool Failed);
FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart,
SourceLocation FriendLoc,
TypeSourceInfo *TSInfo);
Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
MultiTemplateParamsArg TemplateParams);
NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParams);
QualType CheckConstructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
void CheckConstructor(CXXConstructorDecl *Constructor);
QualType CheckDestructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
bool CheckDestructor(CXXDestructorDecl *Destructor);
void CheckConversionDeclarator(Declarator &D, QualType &R,
StorageClass& SC);
Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion);
void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD);
void CheckExplicitlyDefaultedMemberExceptionSpec(CXXMethodDecl *MD,
const FunctionProtoType *T);
void CheckDelayedMemberExceptionSpecs();
//===--------------------------------------------------------------------===//
// C++ Derived Classes
//
/// ActOnBaseSpecifier - Parsed a base specifier
CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class,
SourceRange SpecifierRange,
bool Virtual, AccessSpecifier Access,
TypeSourceInfo *TInfo,
SourceLocation EllipsisLoc);
BaseResult ActOnBaseSpecifier(Decl *classdecl,
SourceRange SpecifierRange,
ParsedAttributes &Attrs,
bool Virtual, AccessSpecifier Access,
ParsedType basetype,
SourceLocation BaseLoc,
SourceLocation EllipsisLoc);
bool AttachBaseSpecifiers(CXXRecordDecl *Class, CXXBaseSpecifier **Bases,
unsigned NumBases);
void ActOnBaseSpecifiers(Decl *ClassDecl, CXXBaseSpecifier **Bases,
unsigned NumBases);
bool IsDerivedFrom(QualType Derived, QualType Base);
bool IsDerivedFrom(QualType Derived, QualType Base, CXXBasePaths &Paths);
// FIXME: I don't like this name.
void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
SourceLocation Loc, SourceRange Range,
CXXCastPath *BasePath = nullptr,
bool IgnoreAccess = false);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
unsigned InaccessibleBaseID,
unsigned AmbigiousBaseConvID,
SourceLocation Loc, SourceRange Range,
DeclarationName Name,
CXXCastPath *BasePath);
std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths);
bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionReturnType - Checks whether the return types are
/// covariant, according to C++ [class.virtual]p5.
bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionExceptionSpec - Checks whether the exception
/// spec is a subset of base spec.
bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange);
/// CheckOverrideControl - Check C++11 override control semantics.
void CheckOverrideControl(NamedDecl *D);
/// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was
/// not used in the declaration of an overriding method.
void DiagnoseAbsenceOfOverrideControl(NamedDecl *D);
/// CheckForFunctionMarkedFinal - Checks whether a virtual member function
/// overrides a virtual member function marked 'final', according to
/// C++11 [class.virtual]p4.
bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
//===--------------------------------------------------------------------===//
// C++ Access Control
//
enum AccessResult {
AR_accessible,
AR_inaccessible,
AR_dependent,
AR_delayed
};
bool SetMemberAccessSpecifier(NamedDecl *MemberDecl,
NamedDecl *PrevMemberDecl,
AccessSpecifier LexicalAS);
AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckAllocationAccess(SourceLocation OperatorLoc,
SourceRange PlacementRange,
CXXRecordDecl *NamingClass,
DeclAccessPair FoundDecl,
bool Diagnose = true);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
const InitializedEntity &Entity,
AccessSpecifier Access,
bool IsCopyBindingRefToTemp = false);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
const InitializedEntity &Entity,
AccessSpecifier Access,
const PartialDiagnostic &PDiag);
AccessResult CheckDestructorAccess(SourceLocation Loc,
CXXDestructorDecl *Dtor,
const PartialDiagnostic &PDiag,
QualType objectType = QualType());
AccessResult CheckFriendAccess(NamedDecl *D);
AccessResult CheckMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *NamingClass,
DeclAccessPair Found);
AccessResult CheckMemberOperatorAccess(SourceLocation Loc,
Expr *ObjectExpr,
Expr *ArgExpr,
DeclAccessPair FoundDecl);
AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr,
DeclAccessPair FoundDecl);
AccessResult CheckBaseClassAccess(SourceLocation AccessLoc,
QualType Base, QualType Derived,
const CXXBasePath &Path,
unsigned DiagID,
bool ForceCheck = false,
bool ForceUnprivileged = false);
void CheckLookupAccess(const LookupResult &R);
bool IsSimplyAccessible(NamedDecl *decl, DeclContext *Ctx);
bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl,
AccessSpecifier access,
QualType objectType);
void HandleDependentAccessCheck(const DependentDiagnostic &DD,
const MultiLevelTemplateArgumentList &TemplateArgs);
void PerformDependentDiagnostics(const DeclContext *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
/// \brief When true, access checking violations are treated as SFINAE
/// failures rather than hard errors.
bool AccessCheckingSFINAE;
enum AbstractDiagSelID {
AbstractNone = -1,
AbstractReturnType,
AbstractParamType,
AbstractVariableType,
AbstractFieldType,
AbstractIvarType,
AbstractSynthesizedIvarType,
AbstractArrayType
};
bool RequireNonAbstractType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
template <typename... Ts>
bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireNonAbstractType(Loc, T, Diagnoser);
}
void DiagnoseAbstractType(const CXXRecordDecl *RD);
bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID,
AbstractDiagSelID SelID = AbstractNone);
//===--------------------------------------------------------------------===//
// C++ Overloaded Operators [C++ 13.5]
//
bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl);
bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl);
//===--------------------------------------------------------------------===//
// C++ Templates [C++ 14]
//
void FilterAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true);
bool hasAnyAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true);
void LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS,
QualType ObjectType, bool EnteringContext,
bool &MemberOfUnknownSpecialization);
TemplateNameKind isTemplateName(Scope *S,
CXXScopeSpec &SS,
bool hasTemplateKeyword,
UnqualifiedId &Name,
ParsedType ObjectType,
bool EnteringContext,
TemplateTy &Template,
bool &MemberOfUnknownSpecialization);
bool DiagnoseUnknownTemplateName(const IdentifierInfo &II,
SourceLocation IILoc,
Scope *S,
const CXXScopeSpec *SS,
TemplateTy &SuggestedTemplate,
TemplateNameKind &SuggestedKind);
void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl);
TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl);
Decl *ActOnTypeParameter(Scope *S, bool Typename,
SourceLocation EllipsisLoc,
SourceLocation KeyLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth, unsigned Position,
SourceLocation EqualLoc,
ParsedType DefaultArg);
QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc);
Decl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
Expr *DefaultArg);
Decl *ActOnTemplateTemplateParameter(Scope *S,
SourceLocation TmpLoc,
TemplateParameterList *Params,
SourceLocation EllipsisLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
ParsedTemplateArgument DefaultArg);
TemplateParameterList *
ActOnTemplateParameterList(unsigned Depth,
SourceLocation ExportLoc,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
Decl **Params, unsigned NumParams,
SourceLocation RAngleLoc);
/// \brief The context in which we are checking a template parameter list.
enum TemplateParamListContext {
TPC_ClassTemplate,
TPC_VarTemplate,
TPC_FunctionTemplate,
TPC_ClassTemplateMember,
TPC_FriendClassTemplate,
TPC_FriendFunctionTemplate,
TPC_FriendFunctionTemplateDefinition,
TPC_TypeAliasTemplate
};
bool CheckTemplateParameterList(TemplateParameterList *NewParams,
TemplateParameterList *OldParams,
TemplateParamListContext TPC);
TemplateParameterList *MatchTemplateParametersToScopeSpecifier(
SourceLocation DeclStartLoc, SourceLocation DeclLoc,
const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId,
ArrayRef<TemplateParameterList *> ParamLists,
bool IsFriend, bool &IsExplicitSpecialization, bool &Invalid);
DeclResult CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS,
IdentifierInfo *Name, SourceLocation NameLoc,
AttributeList *Attr,
TemplateParameterList *TemplateParams,
AccessSpecifier AS,
SourceLocation ModulePrivateLoc,
SourceLocation FriendLoc,
unsigned NumOuterTemplateParamLists,
TemplateParameterList **OuterTemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
void translateTemplateArguments(const ASTTemplateArgsPtr &In,
TemplateArgumentListInfo &Out);
void NoteAllFoundTemplates(TemplateName Name);
QualType CheckTemplateIdType(TemplateName Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs);
TypeResult
ActOnTemplateIdType(CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
TemplateTy Template, SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
bool IsCtorOrDtorName = false);
/// \brief Parsed an elaborated-type-specifier that refers to a template-id,
/// such as \c class T::template apply<U>.
TypeResult ActOnTagTemplateIdType(TagUseKind TUK,
TypeSpecifierType TagSpec,
SourceLocation TagLoc,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateD,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgsIn,
SourceLocation RAngleLoc);
DeclResult ActOnVarTemplateSpecialization(
Scope *S, Declarator &D, TypeSourceInfo *DI,
SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams,
StorageClass SC, bool IsPartialSpecialization);
DeclResult CheckVarTemplateId(VarTemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation TemplateNameLoc,
const TemplateArgumentListInfo &TemplateArgs);
ExprResult CheckVarTemplateId(const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
VarTemplateDecl *Template,
SourceLocation TemplateLoc,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
bool RequiresADL,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
TemplateNameKind ActOnDependentTemplateName(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Name,
ParsedType ObjectType,
bool EnteringContext,
TemplateTy &Template);
DeclResult
ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc,
SourceLocation ModulePrivateLoc,
TemplateIdAnnotation &TemplateId,
AttributeList *Attr,
MultiTemplateParamsArg TemplateParameterLists,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnTemplateDeclarator(Scope *S,
MultiTemplateParamsArg TemplateParameterLists,
Declarator &D);
Decl *ActOnStartOfFunctionTemplateDef(Scope *FnBodyScope,
MultiTemplateParamsArg TemplateParameterLists,
Declarator &D);
bool
CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
TemplateSpecializationKind NewTSK,
NamedDecl *PrevDecl,
TemplateSpecializationKind PrevTSK,
SourceLocation PrevPtOfInstantiation,
bool &SuppressNew);
bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD,
const TemplateArgumentListInfo &ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckFunctionTemplateSpecialization(FunctionDecl *FD,
TemplateArgumentListInfo *ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
DeclResult
ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
unsigned TagSpec,
SourceLocation KWLoc,
const CXXScopeSpec &SS,
TemplateTy Template,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
AttributeList *Attr);
DeclResult
ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
unsigned TagSpec,
SourceLocation KWLoc,
CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation NameLoc,
AttributeList *Attr);
DeclResult ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
Declarator &D);
TemplateArgumentLoc
SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
Decl *Param,
SmallVectorImpl<TemplateArgument>
&Converted,
bool &HasDefaultArg);
/// \brief Specifies the context in which a particular template
/// argument is being checked.
enum CheckTemplateArgumentKind {
/// \brief The template argument was specified in the code or was
/// instantiated with some deduced template arguments.
CTAK_Specified,
/// \brief The template argument was deduced via template argument
/// deduction.
CTAK_Deduced,
/// \brief The template argument was deduced from an array bound
/// via template argument deduction.
CTAK_DeducedFromArrayBound
};
bool CheckTemplateArgument(NamedDecl *Param,
TemplateArgumentLoc &Arg,
NamedDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
unsigned ArgumentPackIndex,
SmallVectorImpl<TemplateArgument> &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
/// \brief Check that the given template arguments can be be provided to
/// the given template, converting the arguments along the way.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateLoc The location of the template name in the source.
///
/// \param TemplateArgs The list of template arguments. If the template is
/// a template template parameter, this function may extend the set of
/// template arguments to also include substituted, defaulted template
/// arguments.
///
/// \param PartialTemplateArgs True if the list of template arguments is
/// intentionally partial, e.g., because we're checking just the initial
/// set of template arguments.
///
/// \param Converted Will receive the converted, canonicalized template
/// arguments.
///
/// \returns true if an error occurred, false otherwise.
bool CheckTemplateArgumentList(TemplateDecl *Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs,
bool PartialTemplateArgs,
SmallVectorImpl<TemplateArgument> &Converted);
bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
TemplateArgumentLoc &Arg,
SmallVectorImpl<TemplateArgument> &Converted);
bool CheckTemplateArgument(TemplateTypeParmDecl *Param,
TypeSourceInfo *Arg);
ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
QualType InstantiatedParamType, Expr *Arg,
TemplateArgument &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
bool CheckTemplateArgument(TemplateTemplateParmDecl *Param,
TemplateArgumentLoc &Arg,
unsigned ArgumentPackIndex);
ExprResult
BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
QualType ParamType,
SourceLocation Loc);
ExprResult
BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
SourceLocation Loc);
/// \brief Enumeration describing how template parameter lists are compared
/// for equality.
enum TemplateParameterListEqualKind {
/// \brief We are matching the template parameter lists of two templates
/// that might be redeclarations.
///
/// \code
/// template<typename T> struct X;
/// template<typename T> struct X;
/// \endcode
TPL_TemplateMatch,
/// \brief We are matching the template parameter lists of two template
/// template parameters as part of matching the template parameter lists
/// of two templates that might be redeclarations.
///
/// \code
/// template<template<int I> class TT> struct X;
/// template<template<int Value> class Other> struct X;
/// \endcode
TPL_TemplateTemplateParmMatch,
/// \brief We are matching the template parameter lists of a template
/// template argument against the template parameter lists of a template
/// template parameter.
///
/// \code
/// template<template<int Value> class Metafun> struct X;
/// template<int Value> struct integer_c;
/// X<integer_c> xic;
/// \endcode
TPL_TemplateTemplateArgumentMatch
};
bool TemplateParameterListsAreEqual(TemplateParameterList *New,
TemplateParameterList *Old,
bool Complain,
TemplateParameterListEqualKind Kind,
SourceLocation TemplateArgLoc
= SourceLocation());
bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams);
/// \brief Called when the parser has parsed a C++ typename
/// specifier, e.g., "typename T::type".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param II the identifier we're retrieving (e.g., 'type' in the example).
/// \param IdLoc the location of the identifier.
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS, const IdentifierInfo &II,
SourceLocation IdLoc);
/// \brief Called when the parser has parsed a C++ typename
/// specifier that ends in a template-id, e.g.,
/// "typename MetaFun::template apply<T1, T2>".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param TemplateLoc the location of the 'template' keyword, if any.
/// \param TemplateName The template name.
/// \param TemplateNameLoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateLoc,
TemplateTy TemplateName,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc);
TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T,
SourceLocation Loc,
DeclarationName Name);
bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS);
ExprResult RebuildExprInCurrentInstantiation(Expr *E);
bool RebuildTemplateParamsInCurrentInstantiation(
TemplateParameterList *Params);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgumentList &Args);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgument *Args,
unsigned NumArgs);
//===--------------------------------------------------------------------===//
// C++ Variadic Templates (C++0x [temp.variadic])
//===--------------------------------------------------------------------===//
/// Determine whether an unexpanded parameter pack might be permitted in this
/// location. Useful for error recovery.
bool isUnexpandedParameterPackPermitted();
/// \brief The context in which an unexpanded parameter pack is
/// being diagnosed.
///
/// Note that the values of this enumeration line up with the first
/// argument to the \c err_unexpanded_parameter_pack diagnostic.
enum UnexpandedParameterPackContext {
/// \brief An arbitrary expression.
UPPC_Expression = 0,
/// \brief The base type of a class type.
UPPC_BaseType,
/// \brief The type of an arbitrary declaration.
UPPC_DeclarationType,
/// \brief The type of a data member.
UPPC_DataMemberType,
/// \brief The size of a bit-field.
UPPC_BitFieldWidth,
/// \brief The expression in a static assertion.
UPPC_StaticAssertExpression,
/// \brief The fixed underlying type of an enumeration.
UPPC_FixedUnderlyingType,
/// \brief The enumerator value.
UPPC_EnumeratorValue,
/// \brief A using declaration.
UPPC_UsingDeclaration,
/// \brief A friend declaration.
UPPC_FriendDeclaration,
/// \brief A declaration qualifier.
UPPC_DeclarationQualifier,
/// \brief An initializer.
UPPC_Initializer,
/// \brief A default argument.
UPPC_DefaultArgument,
/// \brief The type of a non-type template parameter.
UPPC_NonTypeTemplateParameterType,
/// \brief The type of an exception.
UPPC_ExceptionType,
/// \brief Partial specialization.
UPPC_PartialSpecialization,
/// \brief Microsoft __if_exists.
UPPC_IfExists,
/// \brief Microsoft __if_not_exists.
UPPC_IfNotExists,
/// \brief Lambda expression.
UPPC_Lambda,
/// \brief Block expression,
UPPC_Block
};
/// \brief Diagnose unexpanded parameter packs.
///
/// \param Loc The location at which we should emit the diagnostic.
///
/// \param UPPC The context in which we are diagnosing unexpanded
/// parameter packs.
///
/// \param Unexpanded the set of unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
UnexpandedParameterPackContext UPPC,
ArrayRef<UnexpandedParameterPack> Unexpanded);
/// \brief If the given type contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The source location where a diagnostc should be emitted.
///
/// \param T The type that is being checked for unexpanded parameter
/// packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T,
UnexpandedParameterPackContext UPPC);
/// \brief If the given expression contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param E The expression that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(Expr *E,
UnexpandedParameterPackContext UPPC = UPPC_Expression);
/// \brief If the given nested-name-specifier contains an unexpanded
/// parameter pack, diagnose the error.
///
/// \param SS The nested-name-specifier that is being checked for
/// unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS,
UnexpandedParameterPackContext UPPC);
/// \brief If the given name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param NameInfo The name (with source location information) that
/// is being checked for unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo,
UnexpandedParameterPackContext UPPC);
/// \brief If the given template name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The location of the template name.
///
/// \param Template The template name that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc,
TemplateName Template,
UnexpandedParameterPackContext UPPC);
/// \brief If the given template argument contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param Arg The template argument that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg,
UnexpandedParameterPackContext UPPC);
/// \brief Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgument Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param T The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(QualType T,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param TL The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TypeLoc TL,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Collect the set of unexpanded parameter packs within the given
/// nested-name-specifier.
///
/// \param SS The nested-name-specifier that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(CXXScopeSpec &SS,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Collect the set of unexpanded parameter packs within the given
/// name.
///
/// \param NameInfo The name that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Invoked when parsing a template argument followed by an
/// ellipsis, which creates a pack expansion.
///
/// \param Arg The template argument preceding the ellipsis, which
/// may already be invalid.
///
/// \param EllipsisLoc The location of the ellipsis.
ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg,
SourceLocation EllipsisLoc);
/// \brief Invoked when parsing a type followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Type The type preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc);
/// \brief Construct a pack expansion type from the pattern of the pack
/// expansion.
TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// \brief Construct a pack expansion type from the pattern of the pack
/// expansion.
QualType CheckPackExpansion(QualType Pattern,
SourceRange PatternRange,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// \brief Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc);
/// \brief Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// \brief Determine whether we could expand a pack expansion with the
/// given set of parameter packs into separate arguments by repeatedly
/// transforming the pattern.
///
/// \param EllipsisLoc The location of the ellipsis that identifies the
/// pack expansion.
///
/// \param PatternRange The source range that covers the entire pattern of
/// the pack expansion.
///
/// \param Unexpanded The set of unexpanded parameter packs within the
/// pattern.
///
/// \param ShouldExpand Will be set to \c true if the transformer should
/// expand the corresponding pack expansions into separate arguments. When
/// set, \c NumExpansions must also be set.
///
/// \param RetainExpansion Whether the caller should add an unexpanded
/// pack expansion after all of the expanded arguments. This is used
/// when extending explicitly-specified template argument packs per
/// C++0x [temp.arg.explicit]p9.
///
/// \param NumExpansions The number of separate arguments that will be in
/// the expanded form of the corresponding pack expansion. This is both an
/// input and an output parameter, which can be set by the caller if the
/// number of expansions is known a priori (e.g., due to a prior substitution)
/// and will be set by the callee when the number of expansions is known.
/// The callee must set this value when \c ShouldExpand is \c true; it may
/// set this value in other cases.
///
/// \returns true if an error occurred (e.g., because the parameter packs
/// are to be instantiated with arguments of different lengths), false
/// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions)
/// must be set.
bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc,
SourceRange PatternRange,
ArrayRef<UnexpandedParameterPack> Unexpanded,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool &ShouldExpand,
bool &RetainExpansion,
Optional<unsigned> &NumExpansions);
/// \brief Determine the number of arguments in the given pack expansion
/// type.
///
/// This routine assumes that the number of arguments in the expansion is
/// consistent across all of the unexpanded parameter packs in its pattern.
///
/// Returns an empty Optional if the type can't be expanded.
Optional<unsigned> getNumArgumentsInExpansion(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// \brief Determine whether the given declarator contains any unexpanded
/// parameter packs.
///
/// This routine is used by the parser to disambiguate function declarators
/// with an ellipsis prior to the ')', e.g.,
///
/// \code
/// void f(T...);
/// \endcode
///
/// To determine whether we have an (unnamed) function parameter pack or
/// a variadic function.
///
/// \returns true if the declarator contains any unexpanded parameter packs,
/// false otherwise.
bool containsUnexpandedParameterPacks(Declarator &D);
/// \brief Returns the pattern of the pack expansion for a template argument.
///
/// \param OrigLoc The template argument to expand.
///
/// \param Ellipsis Will be set to the location of the ellipsis.
///
/// \param NumExpansions Will be set to the number of expansions that will
/// be generated from this pack expansion, if known a priori.
TemplateArgumentLoc getTemplateArgumentPackExpansionPattern(
TemplateArgumentLoc OrigLoc,
SourceLocation &Ellipsis,
Optional<unsigned> &NumExpansions) const;
//===--------------------------------------------------------------------===//
// C++ Template Argument Deduction (C++ [temp.deduct])
//===--------------------------------------------------------------------===//
QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType);
/// \brief Describes the result of template argument deduction.
///
/// The TemplateDeductionResult enumeration describes the result of
/// template argument deduction, as returned from
/// DeduceTemplateArguments(). The separate TemplateDeductionInfo
/// structure provides additional information about the results of
/// template argument deduction, e.g., the deduced template argument
/// list (if successful) or the specific template parameters or
/// deduced arguments that were involved in the failure.
enum TemplateDeductionResult {
/// \brief Template argument deduction was successful.
TDK_Success = 0,
/// \brief The declaration was invalid; do nothing.
TDK_Invalid,
/// \brief Template argument deduction exceeded the maximum template
/// instantiation depth (which has already been diagnosed).
TDK_InstantiationDepth,
/// \brief Template argument deduction did not deduce a value
/// for every template parameter.
TDK_Incomplete,
/// \brief Template argument deduction produced inconsistent
/// deduced values for the given template parameter.
TDK_Inconsistent,
/// \brief Template argument deduction failed due to inconsistent
/// cv-qualifiers on a template parameter type that would
/// otherwise be deduced, e.g., we tried to deduce T in "const T"
/// but were given a non-const "X".
TDK_Underqualified,
/// \brief Substitution of the deduced template argument values
/// resulted in an error.
TDK_SubstitutionFailure,
/// \brief A non-depnedent component of the parameter did not match the
/// corresponding component of the argument.
TDK_NonDeducedMismatch,
/// \brief When performing template argument deduction for a function
/// template, there were too many call arguments.
TDK_TooManyArguments,
/// \brief When performing template argument deduction for a function
/// template, there were too few call arguments.
TDK_TooFewArguments,
/// \brief The explicitly-specified template arguments were not valid
/// template arguments for the given template.
TDK_InvalidExplicitArguments,
/// \brief The arguments included an overloaded function name that could
/// not be resolved to a suitable function.
TDK_FailedOverloadResolution,
/// \brief Deduction failed; that's all we know.
TDK_MiscellaneousDeductionFailure
};
TemplateDeductionResult
DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult SubstituteExplicitTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo &ExplicitTemplateArgs,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType,
sema::TemplateDeductionInfo &Info);
/// brief A function argument from which we performed template argument
// deduction for a call.
struct OriginalCallArg {
OriginalCallArg(QualType OriginalParamType,
unsigned ArgIdx,
QualType OriginalArgType)
: OriginalParamType(OriginalParamType), ArgIdx(ArgIdx),
OriginalArgType(OriginalArgType) { }
QualType OriginalParamType;
unsigned ArgIdx;
QualType OriginalArgType;
};
TemplateDeductionResult
FinishTemplateArgumentDeduction(FunctionTemplateDecl *FunctionTemplate,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
unsigned NumExplicitlySpecified,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr,
bool PartialOverloading = false);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
ArrayRef<Expr *> Args,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool PartialOverloading = false);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ArgFunctionType,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool InOverloadResolution = false);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
QualType ToType,
CXXConversionDecl *&Specialization,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool InOverloadResolution = false);
/// \brief Substitute Replacement for \p auto in \p TypeWithAuto
QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement);
/// \brief Substitute Replacement for auto in TypeWithAuto
TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
/// \brief Result type of DeduceAutoType.
enum DeduceAutoResult {
DAR_Succeeded,
DAR_Failed,
DAR_FailedAlreadyDiagnosed
};
DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer,
QualType &Result);
DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer,
QualType &Result);
void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init);
bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc,
bool Diagnose = true);
TypeLoc getReturnTypeLoc(FunctionDecl *FD) const;
bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
SourceLocation ReturnLoc,
Expr *&RetExpr, AutoType *AT);
FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1,
FunctionTemplateDecl *FT2,
SourceLocation Loc,
TemplatePartialOrderingContext TPOC,
unsigned NumCallArguments1,
unsigned NumCallArguments2);
UnresolvedSetIterator
getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd,
TemplateSpecCandidateSet &FailedCandidates,
SourceLocation Loc,
const PartialDiagnostic &NoneDiag,
const PartialDiagnostic &AmbigDiag,
const PartialDiagnostic &CandidateDiag,
bool Complain = true, QualType TargetType = QualType());
ClassTemplatePartialSpecializationDecl *
getMoreSpecializedPartialSpecialization(
ClassTemplatePartialSpecializationDecl *PS1,
ClassTemplatePartialSpecializationDecl *PS2,
SourceLocation Loc);
VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization(
VarTemplatePartialSpecializationDecl *PS1,
VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc);
void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs,
bool OnlyDeduced,
unsigned Depth,
llvm::SmallBitVector &Used);
void MarkDeducedTemplateParameters(
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced) {
return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced);
}
static void MarkDeducedTemplateParameters(ASTContext &Ctx,
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced);
//===--------------------------------------------------------------------===//
// C++ Template Instantiation
//
MultiLevelTemplateArgumentList
getTemplateInstantiationArgs(NamedDecl *D,
const TemplateArgumentList *Innermost = nullptr,
bool RelativeToPrimary = false,
const FunctionDecl *Pattern = nullptr);
/// \brief A template instantiation that is currently in progress.
struct ActiveTemplateInstantiation {
/// \brief The kind of template instantiation we are performing
enum InstantiationKind {
/// We are instantiating a template declaration. The entity is
/// the declaration we're instantiating (e.g., a CXXRecordDecl).
TemplateInstantiation,
/// We are instantiating a default argument for a template
/// parameter. The Entity is the template, and
/// TemplateArgs/NumTemplateArguments provides the template
/// arguments as specified.
/// FIXME: Use a TemplateArgumentList
DefaultTemplateArgumentInstantiation,
/// We are instantiating a default argument for a function.
/// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs
/// provides the template arguments as specified.
DefaultFunctionArgumentInstantiation,
/// We are substituting explicit template arguments provided for
/// a function template. The entity is a FunctionTemplateDecl.
ExplicitTemplateArgumentSubstitution,
/// We are substituting template argument determined as part of
/// template argument deduction for either a class template
/// partial specialization or a function template. The
/// Entity is either a ClassTemplatePartialSpecializationDecl or
/// a FunctionTemplateDecl.
DeducedTemplateArgumentSubstitution,
/// We are substituting prior template arguments into a new
/// template parameter. The template parameter itself is either a
/// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl.
PriorTemplateArgumentSubstitution,
/// We are checking the validity of a default template argument that
/// has been used when naming a template-id.
DefaultTemplateArgumentChecking,
/// We are instantiating the exception specification for a function
/// template which was deferred until it was needed.
ExceptionSpecInstantiation
} Kind;
/// \brief The point of instantiation within the source code.
SourceLocation PointOfInstantiation;
/// \brief The template (or partial specialization) in which we are
/// performing the instantiation, for substitutions of prior template
/// arguments.
NamedDecl *Template;
/// \brief The entity that is being instantiated.
Decl *Entity;
/// \brief The list of template arguments we are substituting, if they
/// are not part of the entity.
const TemplateArgument *TemplateArgs;
/// \brief The number of template arguments in TemplateArgs.
unsigned NumTemplateArgs;
/// \brief The template deduction info object associated with the
/// substitution or checking of explicit or deduced template arguments.
sema::TemplateDeductionInfo *DeductionInfo;
/// \brief The source range that covers the construct that cause
/// the instantiation, e.g., the template-id that causes a class
/// template instantiation.
SourceRange InstantiationRange;
ActiveTemplateInstantiation()
: Kind(TemplateInstantiation), Template(nullptr), Entity(nullptr),
TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {}
/// \brief Determines whether this template is an actual instantiation
/// that should be counted toward the maximum instantiation depth.
bool isInstantiationRecord() const;
friend bool operator==(const ActiveTemplateInstantiation &X,
const ActiveTemplateInstantiation &Y) {
if (X.Kind != Y.Kind)
return false;
if (X.Entity != Y.Entity)
return false;
switch (X.Kind) {
case TemplateInstantiation:
case ExceptionSpecInstantiation:
return true;
case PriorTemplateArgumentSubstitution:
case DefaultTemplateArgumentChecking:
return X.Template == Y.Template && X.TemplateArgs == Y.TemplateArgs;
case DefaultTemplateArgumentInstantiation:
case ExplicitTemplateArgumentSubstitution:
case DeducedTemplateArgumentSubstitution:
case DefaultFunctionArgumentInstantiation:
return X.TemplateArgs == Y.TemplateArgs;
}
llvm_unreachable("Invalid InstantiationKind!");
}
friend bool operator!=(const ActiveTemplateInstantiation &X,
const ActiveTemplateInstantiation &Y) {
return !(X == Y);
}
};
/// \brief List of active template instantiations.
///
/// This vector is treated as a stack. As one template instantiation
/// requires another template instantiation, additional
/// instantiations are pushed onto the stack up to a
/// user-configurable limit LangOptions::InstantiationDepth.
SmallVector<ActiveTemplateInstantiation, 16>
ActiveTemplateInstantiations;
/// \brief Extra modules inspected when performing a lookup during a template
/// instantiation. Computed lazily.
SmallVector<Module*, 16> ActiveTemplateInstantiationLookupModules;
/// \brief Cache of additional modules that should be used for name lookup
/// within the current template instantiation. Computed lazily; use
/// getLookupModules() to get a complete set.
llvm::DenseSet<Module*> LookupModulesCache;
/// \brief Get the set of additional modules that should be checked during
/// name lookup. A module and its imports become visible when instanting a
/// template defined within it.
llvm::DenseSet<Module*> &getLookupModules();
/// \brief Whether we are in a SFINAE context that is not associated with
/// template instantiation.
///
/// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside
/// of a template instantiation or template argument deduction.
bool InNonInstantiationSFINAEContext;
/// \brief The number of ActiveTemplateInstantiation entries in
/// \c ActiveTemplateInstantiations that are not actual instantiations and,
/// therefore, should not be counted as part of the instantiation depth.
unsigned NonInstantiationEntries;
/// \brief The last template from which a template instantiation
/// error or warning was produced.
///
/// This value is used to suppress printing of redundant template
/// instantiation backtraces when there are multiple errors in the
/// same instantiation. FIXME: Does this belong in Sema? It's tough
/// to implement it anywhere else.
ActiveTemplateInstantiation LastTemplateInstantiationErrorContext;
/// \brief The current index into pack expansion arguments that will be
/// used for substitution of parameter packs.
///
/// The pack expansion index will be -1 to indicate that parameter packs
/// should be instantiated as themselves. Otherwise, the index specifies
/// which argument within the parameter pack will be used for substitution.
int ArgumentPackSubstitutionIndex;
/// \brief RAII object used to change the argument pack substitution index
/// within a \c Sema object.
///
/// See \c ArgumentPackSubstitutionIndex for more information.
class ArgumentPackSubstitutionIndexRAII {
Sema &Self;
int OldSubstitutionIndex;
public:
ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex)
: Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) {
Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex;
}
~ArgumentPackSubstitutionIndexRAII() {
Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex;
}
};
friend class ArgumentPackSubstitutionRAII;
/// \brief The stack of calls expression undergoing template instantiation.
///
/// The top of this stack is used by a fixit instantiating unresolved
/// function calls to fix the AST to match the textual change it prints.
SmallVector<CallExpr *, 8> CallsUndergoingInstantiation;
/// \brief For each declaration that involved template argument deduction, the
/// set of diagnostics that were suppressed during that template argument
/// deduction.
///
/// FIXME: Serialize this structure to the AST file.
typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> >
SuppressedDiagnosticsMap;
SuppressedDiagnosticsMap SuppressedDiagnostics;
/// \brief A stack object to be created when performing template
/// instantiation.
///
/// Construction of an object of type \c InstantiatingTemplate
/// pushes the current instantiation onto the stack of active
/// instantiations. If the size of this stack exceeds the maximum
/// number of recursive template instantiations, construction
/// produces an error and evaluates true.
///
/// Destruction of this object will pop the named instantiation off
/// the stack.
struct InstantiatingTemplate {
/// \brief Note that we are instantiating a class template,
/// function template, or a member thereof.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
Decl *Entity,
SourceRange InstantiationRange = SourceRange());
struct ExceptionSpecification {};
/// \brief Note that we are instantiating an exception specification
/// of a function template.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionDecl *Entity, ExceptionSpecification,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are instantiating a default argument in a
/// template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are instantiating a default argument in a
/// template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionTemplateDecl *FunctionTemplate,
ArrayRef<TemplateArgument> TemplateArgs,
ActiveTemplateInstantiation::InstantiationKind Kind,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are instantiating as part of template
/// argument deduction for a class template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ClassTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are instantiating as part of template
/// argument deduction for a variable template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
VarTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParmVarDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are substituting prior template arguments into a
/// non-type parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
NonTypeTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// \brief Note that we are substituting prior template arguments into a
/// template template parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
TemplateTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// \brief Note that we are checking the default template argument
/// against the template parameter for a given template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
NamedDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// \brief Note that we have finished instantiating this template.
void Clear();
~InstantiatingTemplate() { Clear(); }
/// \brief Determines whether we have exceeded the maximum
/// recursive template instantiations.
bool isInvalid() const { return Invalid; }
private:
Sema &SemaRef;
bool Invalid;
bool SavedInNonInstantiationSFINAEContext;
bool CheckInstantiationDepth(SourceLocation PointOfInstantiation,
SourceRange InstantiationRange);
InstantiatingTemplate(
Sema &SemaRef, ActiveTemplateInstantiation::InstantiationKind Kind,
SourceLocation PointOfInstantiation, SourceRange InstantiationRange,
Decl *Entity, NamedDecl *Template = nullptr,
ArrayRef<TemplateArgument> TemplateArgs = ArrayRef<TemplateArgument>(),
sema::TemplateDeductionInfo *DeductionInfo = nullptr);
InstantiatingTemplate(const InstantiatingTemplate&) = delete;
InstantiatingTemplate&
operator=(const InstantiatingTemplate&) = delete;
};
void PrintInstantiationStack();
/// \brief Determines whether we are currently in a context where
/// template argument substitution failures are not considered
/// errors.
///
/// \returns An empty \c Optional if we're not in a SFINAE context.
/// Otherwise, contains a pointer that, if non-NULL, contains the nearest
/// template-deduction context object, which can be used to capture
/// diagnostics that will be suppressed.
Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const;
/// \brief Determines whether we are currently in a context that
/// is not evaluated as per C++ [expr] p5.
bool isUnevaluatedContext() const {
assert(!ExprEvalContexts.empty() &&
"Must be in an expression evaluation context");
return ExprEvalContexts.back().isUnevaluated();
}
/// \brief RAII class used to determine whether SFINAE has
/// trapped any errors that occur during template argument
/// deduction.
class SFINAETrap {
Sema &SemaRef;
unsigned PrevSFINAEErrors;
bool PrevInNonInstantiationSFINAEContext;
bool PrevAccessCheckingSFINAE;
public:
explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false)
: SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors),
PrevInNonInstantiationSFINAEContext(
SemaRef.InNonInstantiationSFINAEContext),
PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE)
{
if (!SemaRef.isSFINAEContext())
SemaRef.InNonInstantiationSFINAEContext = true;
SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE;
}
~SFINAETrap() {
SemaRef.NumSFINAEErrors = PrevSFINAEErrors;
SemaRef.InNonInstantiationSFINAEContext
= PrevInNonInstantiationSFINAEContext;
SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE;
}
/// \brief Determine whether any SFINAE errors have been trapped.
bool hasErrorOccurred() const {
return SemaRef.NumSFINAEErrors > PrevSFINAEErrors;
}
};
/// \brief RAII class used to indicate that we are performing provisional
/// semantic analysis to determine the validity of a construct, so
/// typo-correction and diagnostics in the immediate context (not within
/// implicitly-instantiated templates) should be suppressed.
class TentativeAnalysisScope {
Sema &SemaRef;
// FIXME: Using a SFINAETrap for this is a hack.
SFINAETrap Trap;
bool PrevDisableTypoCorrection;
public:
explicit TentativeAnalysisScope(Sema &SemaRef)
: SemaRef(SemaRef), Trap(SemaRef, true),
PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) {
SemaRef.DisableTypoCorrection = true;
}
~TentativeAnalysisScope() {
SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection;
}
};
/// \brief The current instantiation scope used to store local
/// variables.
LocalInstantiationScope *CurrentInstantiationScope;
/// \brief Tracks whether we are in a context where typo correction is
/// disabled.
bool DisableTypoCorrection;
/// \brief The number of typos corrected by CorrectTypo.
unsigned TyposCorrected;
typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet;
typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations;
/// \brief A cache containing identifiers for which typo correction failed and
/// their locations, so that repeated attempts to correct an identifier in a
/// given location are ignored if typo correction already failed for it.
IdentifierSourceLocations TypoCorrectionFailures;
/// \brief Worker object for performing CFG-based warnings.
sema::AnalysisBasedWarnings AnalysisWarnings;
threadSafety::BeforeSet *ThreadSafetyDeclCache;
/// \brief An entity for which implicit template instantiation is required.
///
/// The source location associated with the declaration is the first place in
/// the source code where the declaration was "used". It is not necessarily
/// the point of instantiation (which will be either before or after the
/// namespace-scope declaration that triggered this implicit instantiation),
/// However, it is the location that diagnostics should generally refer to,
/// because users will need to know what code triggered the instantiation.
typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation;
/// \brief The queue of implicit template instantiations that are required
/// but have not yet been performed.
std::deque<PendingImplicitInstantiation> PendingInstantiations;
class SavePendingInstantiationsAndVTableUsesRAII {
public:
SavePendingInstantiationsAndVTableUsesRAII(Sema &S, bool Enabled)
: S(S), Enabled(Enabled) {
if (!Enabled) return;
SavedPendingInstantiations.swap(S.PendingInstantiations);
SavedVTableUses.swap(S.VTableUses);
}
~SavePendingInstantiationsAndVTableUsesRAII() {
if (!Enabled) return;
// Restore the set of pending vtables.
assert(S.VTableUses.empty() &&
"VTableUses should be empty before it is discarded.");
S.VTableUses.swap(SavedVTableUses);
// Restore the set of pending implicit instantiations.
assert(S.PendingInstantiations.empty() &&
"PendingInstantiations should be empty before it is discarded.");
S.PendingInstantiations.swap(SavedPendingInstantiations);
}
private:
Sema &S;
SmallVector<VTableUse, 16> SavedVTableUses;
std::deque<PendingImplicitInstantiation> SavedPendingInstantiations;
bool Enabled;
};
/// \brief The queue of implicit template instantiations that are required
/// and must be performed within the current local scope.
///
/// This queue is only used for member functions of local classes in
/// templates, which must be instantiated in the same scope as their
/// enclosing function, so that they can reference function-local
/// types, static variables, enumerators, etc.
std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations;
class SavePendingLocalImplicitInstantiationsRAII {
public:
SavePendingLocalImplicitInstantiationsRAII(Sema &S): S(S) {
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
~SavePendingLocalImplicitInstantiationsRAII() {
assert(S.PendingLocalImplicitInstantiations.empty() &&
"there shouldn't be any pending local implicit instantiations");
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
private:
Sema &S;
std::deque<PendingImplicitInstantiation>
SavedPendingLocalImplicitInstantiations;
};
void PerformPendingInstantiations(bool LocalOnly = false);
TypeSourceInfo *SubstType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
QualType SubstType(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstType(TypeLoc TL,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc,
DeclarationName Entity,
CXXRecordDecl *ThisContext,
unsigned ThisTypeQuals);
void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto,
const MultiLevelTemplateArgumentList &Args);
ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
int indexAdjustment,
Optional<unsigned> NumExpansions,
bool ExpectParameterPack);
bool SubstParmTypes(SourceLocation Loc,
ParmVarDecl **Params, unsigned NumParams,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<QualType> &ParamTypes,
SmallVectorImpl<ParmVarDecl *> *OutParams = nullptr);
ExprResult SubstExpr(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// \brief Substitute the given template arguments into a list of
/// expressions, expanding pack expansions if required.
///
/// \param Exprs The list of expressions to substitute into.
///
/// \param NumExprs The number of expressions in \p Exprs.
///
/// \param IsCall Whether this is some form of call, in which case
/// default arguments will be dropped.
///
/// \param TemplateArgs The set of template arguments to substitute.
///
/// \param Outputs Will receive all of the substituted arguments.
///
/// \returns true if an error occurred, false otherwise.
bool SubstExprs(Expr **Exprs, unsigned NumExprs, bool IsCall,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<Expr *> &Outputs);
StmtResult SubstStmt(Stmt *S,
const MultiLevelTemplateArgumentList &TemplateArgs);
Decl *SubstDecl(Decl *D, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
ExprResult SubstInitializer(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool CXXDirectInit);
bool
SubstBaseSpecifiers(CXXRecordDecl *Instantiation,
CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
InstantiateClass(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK,
bool Complain = true);
bool InstantiateEnum(SourceLocation PointOfInstantiation,
EnumDecl *Instantiation, EnumDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
bool InstantiateInClassInitializer(
SourceLocation PointOfInstantiation, FieldDecl *Instantiation,
FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs);
struct LateInstantiatedAttribute {
const Attr *TmplAttr;
LocalInstantiationScope *Scope;
Decl *NewDecl;
LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S,
Decl *D)
: TmplAttr(A), Scope(S), NewDecl(D)
{ }
};
typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec;
void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
bool
InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK,
bool Complain = true);
void InstantiateClassMembers(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
void InstantiateClassTemplateSpecializationMembers(
SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK);
NestedNameSpecifierLoc
SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
const MultiLevelTemplateArgumentList &TemplateArgs);
DeclarationNameInfo
SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateName
SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name,
SourceLocation Loc,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs,
TemplateArgumentListInfo &Result,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
FunctionDecl *Function);
void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
FunctionDecl *Function,
bool Recursive = false,
bool DefinitionRequired = false);
VarTemplateSpecializationDecl *BuildVarTemplateInstantiation(
VarTemplateDecl *VarTemplate, VarDecl *FromVar,
const TemplateArgumentList &TemplateArgList,
const TemplateArgumentListInfo &TemplateArgsInfo,
SmallVectorImpl<TemplateArgument> &Converted,
SourceLocation PointOfInstantiation, void *InsertPos,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *StartingScope = nullptr);
VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl(
VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl,
const MultiLevelTemplateArgumentList &TemplateArgs);
void
BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs,
LateInstantiatedAttrVec *LateAttrs,
DeclContext *Owner,
LocalInstantiationScope *StartingScope,
bool InstantiatingVarTemplate = false);
void InstantiateVariableInitializer(
VarDecl *Var, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
VarDecl *Var, bool Recursive = false,
bool DefinitionRequired = false);
void InstantiateStaticDataMemberDefinition(
SourceLocation PointOfInstantiation,
VarDecl *Var,
bool Recursive = false,
bool DefinitionRequired = false);
void InstantiateMemInitializers(CXXConstructorDecl *New,
const CXXConstructorDecl *Tmpl,
const MultiLevelTemplateArgumentList &TemplateArgs);
NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs);
DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC,
const MultiLevelTemplateArgumentList &TemplateArgs);
// Objective-C declarations.
enum ObjCContainerKind {
OCK_None = -1,
OCK_Interface = 0,
OCK_Protocol,
OCK_Category,
OCK_ClassExtension,
OCK_Implementation,
OCK_CategoryImplementation
};
ObjCContainerKind getObjCContainerKind() const;
DeclResult actOnObjCTypeParam(Scope *S,
ObjCTypeParamVariance variance,
SourceLocation varianceLoc,
unsigned index,
IdentifierInfo *paramName,
SourceLocation paramLoc,
SourceLocation colonLoc,
ParsedType typeBound);
ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc,
ArrayRef<Decl *> typeParams,
SourceLocation rAngleLoc);
void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList);
Decl *ActOnStartClassInterface(Scope *S,
SourceLocation AtInterfaceLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
ObjCTypeParamList *typeParamList,
IdentifierInfo *SuperName,
SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs,
SourceRange SuperTypeArgsRange,
Decl * const *ProtoRefs,
unsigned NumProtoRefs,
const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc,
AttributeList *AttrList);
void ActOnSuperClassOfClassInterface(Scope *S,
SourceLocation AtInterfaceLoc,
ObjCInterfaceDecl *IDecl,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperName,
SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs,
SourceRange SuperTypeArgsRange);
void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs,
IdentifierInfo *SuperName,
SourceLocation SuperLoc);
Decl *ActOnCompatibilityAlias(
SourceLocation AtCompatibilityAliasLoc,
IdentifierInfo *AliasName, SourceLocation AliasLocation,
IdentifierInfo *ClassName, SourceLocation ClassLocation);
bool CheckForwardProtocolDeclarationForCircularDependency(
IdentifierInfo *PName,
SourceLocation &PLoc, SourceLocation PrevLoc,
const ObjCList<ObjCProtocolDecl> &PList);
Decl *ActOnStartProtocolInterface(
SourceLocation AtProtoInterfaceLoc,
IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc,
Decl * const *ProtoRefNames, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc,
AttributeList *AttrList);
Decl *ActOnStartCategoryInterface(SourceLocation AtInterfaceLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
ObjCTypeParamList *typeParamList,
IdentifierInfo *CategoryName,
SourceLocation CategoryLoc,
Decl * const *ProtoRefs,
unsigned NumProtoRefs,
const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc);
Decl *ActOnStartClassImplementation(
SourceLocation AtClassImplLoc,
IdentifierInfo *ClassName, SourceLocation ClassLoc,
IdentifierInfo *SuperClassname,
SourceLocation SuperClassLoc);
Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *CatName,
SourceLocation CatLoc);
DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl,
ArrayRef<Decl *> Decls);
DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc,
IdentifierInfo **IdentList,
SourceLocation *IdentLocs,
ArrayRef<ObjCTypeParamList *> TypeParamLists,
unsigned NumElts);
DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc,
const IdentifierLocPair *IdentList,
unsigned NumElts,
AttributeList *attrList);
void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer,
const IdentifierLocPair *ProtocolId,
unsigned NumProtocols,
SmallVectorImpl<Decl *> &Protocols);
/// Given a list of identifiers (and their locations), resolve the
/// names to either Objective-C protocol qualifiers or type
/// arguments, as appropriate.
void actOnObjCTypeArgsOrProtocolQualifiers(
Scope *S,
ParsedType baseType,
SourceLocation lAngleLoc,
ArrayRef<IdentifierInfo *> identifiers,
ArrayRef<SourceLocation> identifierLocs,
SourceLocation rAngleLoc,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SourceLocation &protocolRAngleLoc,
bool warnOnIncompleteProtocols);
/// Build a an Objective-C protocol-qualified 'id' type where no
/// base type was specified.
TypeResult actOnObjCProtocolQualifierType(
SourceLocation lAngleLoc,
ArrayRef<Decl *> protocols,
ArrayRef<SourceLocation> protocolLocs,
SourceLocation rAngleLoc);
/// Build a specialized and/or protocol-qualified Objective-C type.
TypeResult actOnObjCTypeArgsAndProtocolQualifiers(
Scope *S,
SourceLocation Loc,
ParsedType BaseType,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<ParsedType> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<Decl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc);
/// Build an Objective-C object pointer type.
QualType BuildObjCObjectType(QualType BaseType,
SourceLocation Loc,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<TypeSourceInfo *> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Check the application of the Objective-C '__kindof' qualifier to
/// the given type.
bool checkObjCKindOfType(QualType &type, SourceLocation loc);
/// Ensure attributes are consistent with type.
/// \param [in, out] Attributes The attributes to check; they will
/// be modified to be consistent with \p PropertyTy.
void CheckObjCPropertyAttributes(Decl *PropertyPtrTy,
SourceLocation Loc,
unsigned &Attributes,
bool propertyInPrimaryClass);
/// Process the specified property declaration and create decls for the
/// setters and getters as needed.
/// \param property The property declaration being processed
/// \param CD The semantic container for the property
/// \param redeclaredProperty Declaration for property if redeclared
/// in class extension.
/// \param lexicalDC Container for redeclaredProperty.
void ProcessPropertyDecl(ObjCPropertyDecl *property,
ObjCContainerDecl *CD,
ObjCPropertyDecl *redeclaredProperty = nullptr,
ObjCContainerDecl *lexicalDC = nullptr);
void DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
ObjCPropertyDecl *SuperProperty,
const IdentifierInfo *Name,
bool OverridingProtocolProperty);
void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT,
ObjCInterfaceDecl *ID);
Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd,
ArrayRef<Decl *> allMethods = None,
ArrayRef<DeclGroupPtrTy> allTUVars = None);
Decl *ActOnProperty(Scope *S, SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD, ObjCDeclSpec &ODS,
Selector GetterSel, Selector SetterSel,
bool *OverridingProperty,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
Decl *ActOnPropertyImplDecl(Scope *S,
SourceLocation AtLoc,
SourceLocation PropertyLoc,
bool ImplKind,
IdentifierInfo *PropertyId,
IdentifierInfo *PropertyIvar,
SourceLocation PropertyIvarLoc);
enum ObjCSpecialMethodKind {
OSMK_None,
OSMK_Alloc,
OSMK_New,
OSMK_Copy,
OSMK_RetainingInit,
OSMK_NonRetainingInit
};
struct ObjCArgInfo {
IdentifierInfo *Name;
SourceLocation NameLoc;
// The Type is null if no type was specified, and the DeclSpec is invalid
// in this case.
ParsedType Type;
ObjCDeclSpec DeclSpec;
/// ArgAttrs - Attribute list for this argument.
AttributeList *ArgAttrs;
};
Decl *ActOnMethodDeclaration(
Scope *S,
SourceLocation BeginLoc, // location of the + or -.
SourceLocation EndLoc, // location of the ; or {.
tok::TokenKind MethodType,
ObjCDeclSpec &ReturnQT, ParsedType ReturnType,
ArrayRef<SourceLocation> SelectorLocs, Selector Sel,
// optional arguments. The number of types/arguments is obtained
// from the Sel.getNumArgs().
ObjCArgInfo *ArgInfo,
DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args
AttributeList *AttrList, tok::ObjCKeywordKind MethodImplKind,
bool isVariadic, bool MethodDefinition);
ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel,
const ObjCObjectPointerType *OPT,
bool IsInstance);
ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty,
bool IsInstance);
bool CheckARCMethodDecl(ObjCMethodDecl *method);
bool inferObjCARCLifetime(ValueDecl *decl);
ExprResult
HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
Expr *BaseExpr,
SourceLocation OpLoc,
DeclarationName MemberName,
SourceLocation MemberLoc,
SourceLocation SuperLoc, QualType SuperType,
bool Super);
ExprResult
ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
IdentifierInfo &propertyName,
SourceLocation receiverNameLoc,
SourceLocation propertyNameLoc);
ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc);
/// \brief Describes the kind of message expression indicated by a message
/// send that starts with an identifier.
enum ObjCMessageKind {
/// \brief The message is sent to 'super'.
ObjCSuperMessage,
/// \brief The message is an instance message.
ObjCInstanceMessage,
/// \brief The message is a class message, and the identifier is a type
/// name.
ObjCClassMessage
};
ObjCMessageKind getObjCMessageKind(Scope *S,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsSuper,
bool HasTrailingDot,
ParsedType &ReceiverType);
ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildClassMessageImplicit(QualType ReceiverType,
bool isSuperReceiver,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnClassMessage(Scope *S,
ParsedType Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildInstanceMessage(Expr *Receiver,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildInstanceMessageImplicit(Expr *Receiver,
QualType ReceiverType,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnInstanceMessage(Scope *S,
Expr *Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
TypeSourceInfo *TSInfo,
Expr *SubExpr);
ExprResult ActOnObjCBridgedCast(Scope *S,
SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
ParsedType Type,
SourceLocation RParenLoc,
Expr *SubExpr);
void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr);
void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr);
bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr,
CastKind &Kind);
bool checkObjCBridgeRelatedComponents(SourceLocation Loc,
QualType DestType, QualType SrcType,
ObjCInterfaceDecl *&RelatedClass,
ObjCMethodDecl *&ClassMethod,
ObjCMethodDecl *&InstanceMethod,
TypedefNameDecl *&TDNDecl,
bool CfToNs);
bool CheckObjCBridgeRelatedConversions(SourceLocation Loc,
QualType DestType, QualType SrcType,
Expr *&SrcExpr);
bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr);
bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall);
/// \brief Check whether the given new method is a valid override of the
/// given overridden method, and set any properties that should be inherited.
void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
const ObjCMethodDecl *Overridden);
/// \brief Describes the compatibility of a result type with its method.
enum ResultTypeCompatibilityKind {
RTC_Compatible,
RTC_Incompatible,
RTC_Unknown
};
void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
ObjCInterfaceDecl *CurrentClass,
ResultTypeCompatibilityKind RTC);
enum PragmaOptionsAlignKind {
POAK_Native, // #pragma options align=native
POAK_Natural, // #pragma options align=natural
POAK_Packed, // #pragma options align=packed
POAK_Power, // #pragma options align=power
POAK_Mac68k, // #pragma options align=mac68k
POAK_Reset // #pragma options align=reset
};
/// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align.
void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
SourceLocation PragmaLoc);
enum PragmaPackKind {
PPK_Default, // #pragma pack([n])
PPK_Show, // #pragma pack(show), only supported by MSVC.
PPK_Push, // #pragma pack(push, [identifier], [n])
PPK_Pop // #pragma pack(pop, [identifier], [n])
};
enum PragmaMSStructKind {
PMSST_OFF, // #pragms ms_struct off
PMSST_ON // #pragms ms_struct on
};
enum PragmaMSCommentKind {
PCK_Unknown,
PCK_Linker, // #pragma comment(linker, ...)
PCK_Lib, // #pragma comment(lib, ...)
PCK_Compiler, // #pragma comment(compiler, ...)
PCK_ExeStr, // #pragma comment(exestr, ...)
PCK_User // #pragma comment(user, ...)
};
/// ActOnPragmaPack - Called on well formed \#pragma pack(...).
void ActOnPragmaPack(PragmaPackKind Kind,
IdentifierInfo *Name,
Expr *Alignment,
SourceLocation PragmaLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc);
/// ActOnPragmaPackMatrix - Called on well formed \#pragma pack_matrix(...).
void ActOnPragmaPackMatrix(bool bRowMajor, SourceLocation PragmaLoc);
/// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off].
void ActOnPragmaMSStruct(PragmaMSStructKind Kind);
/// ActOnPragmaMSComment - Called on well formed
/// \#pragma comment(kind, "arg").
void ActOnPragmaMSComment(PragmaMSCommentKind Kind, StringRef Arg);
/// ActOnPragmaMSPointersToMembers - called on well formed \#pragma
/// pointers_to_members(representation method[, general purpose
/// representation]).
void ActOnPragmaMSPointersToMembers(
LangOptions::PragmaMSPointersToMembersKind Kind,
SourceLocation PragmaLoc);
/// \brief Called on well formed \#pragma vtordisp().
void ActOnPragmaMSVtorDisp(PragmaVtorDispKind Kind, SourceLocation PragmaLoc,
MSVtorDispAttr::Mode Value);
enum PragmaSectionKind {
PSK_DataSeg,
PSK_BSSSeg,
PSK_ConstSeg,
PSK_CodeSeg,
};
bool UnifySection(StringRef SectionName,
int SectionFlags,
DeclaratorDecl *TheDecl);
bool UnifySection(StringRef SectionName,
int SectionFlags,
SourceLocation PragmaSectionLocation);
/// \brief Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg.
void ActOnPragmaMSSeg(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
StringLiteral *SegmentName,
llvm::StringRef PragmaName);
/// \brief Called on well formed \#pragma section().
void ActOnPragmaMSSection(SourceLocation PragmaLocation,
int SectionFlags, StringLiteral *SegmentName);
/// \brief Called on well-formed \#pragma init_seg().
void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation,
StringLiteral *SegmentName);
/// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch
void ActOnPragmaDetectMismatch(StringRef Name, StringRef Value);
/// ActOnPragmaUnused - Called on well-formed '\#pragma unused'.
void ActOnPragmaUnused(const Token &Identifier,
Scope *curScope,
SourceLocation PragmaLoc);
/// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... .
void ActOnPragmaVisibility(const IdentifierInfo* VisType,
SourceLocation PragmaLoc);
NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
SourceLocation Loc);
void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W);
/// ActOnPragmaWeakID - Called on well formed \#pragma weak ident.
void ActOnPragmaWeakID(IdentifierInfo* WeakName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc);
/// ActOnPragmaRedefineExtname - Called on well formed
/// \#pragma redefine_extname oldname newname.
void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident.
void ActOnPragmaWeakAlias(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaFPContract - Called on well formed
/// \#pragma {STDC,OPENCL} FP_CONTRACT
void ActOnPragmaFPContract(tok::OnOffSwitch OOS);
/// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to
/// a the record decl, to handle '\#pragma pack' and '\#pragma options align'.
void AddAlignmentAttributesForRecord(RecordDecl *RD);
/// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record.
void AddMsStructLayoutForRecord(RecordDecl *RD);
/// FreePackedContext - Deallocate and null out PackContext.
void FreePackedContext();
/// PushNamespaceVisibilityAttr - Note that we've entered a
/// namespace with a visibility attribute.
void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr,
SourceLocation Loc);
/// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used,
/// add an appropriate visibility attribute.
void AddPushedVisibilityAttribute(Decl *RD);
/// PopPragmaVisibility - Pop the top element of the visibility stack; used
/// for '\#pragma GCC visibility' and visibility attributes on namespaces.
void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc);
/// FreeVisContext - Deallocate and null out VisContext.
void FreeVisContext();
/// AddCFAuditedAttribute - Check whether we're currently within
/// '\#pragma clang arc_cf_code_audited' and, if so, consider adding
/// the appropriate attribute.
void AddCFAuditedAttribute(Decl *D);
/// \brief Called on well formed \#pragma clang optimize.
void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc);
/// \brief Get the location for the currently active "\#pragma clang optimize
/// off". If this location is invalid, then the state of the pragma is "on".
SourceLocation getOptimizeOffPragmaLocation() const {
return OptimizeOffPragmaLocation;
}
/// \brief Only called on function definitions; if there is a pragma in scope
/// with the effect of a range-based optnone, consider marking the function
/// with attribute optnone.
void AddRangeBasedOptnone(FunctionDecl *FD);
/// \brief Adds the 'optnone' attribute to the function declaration if there
/// are no conflicts; Loc represents the location causing the 'optnone'
/// attribute to be added (usually because of a pragma).
void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc);
/// AddAlignedAttr - Adds an aligned attribute to a particular declaration.
void AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E,
unsigned SpellingListIndex, bool IsPackExpansion);
void AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *T,
unsigned SpellingListIndex, bool IsPackExpansion);
/// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular
/// declaration.
void AddAssumeAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, Expr *OE,
unsigned SpellingListIndex);
/// AddAlignValueAttr - Adds an align_value attribute to a particular
/// declaration.
void AddAlignValueAttr(SourceRange AttrRange, Decl *D, Expr *E,
unsigned SpellingListIndex);
/// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular
/// declaration.
void AddLaunchBoundsAttr(SourceRange AttrRange, Decl *D, Expr *MaxThreads,
Expr *MinBlocks, unsigned SpellingListIndex);
// OpenMP directives and clauses.
private:
void *VarDataSharingAttributesStack;
/// \brief Initialization of data-sharing attributes stack.
void InitDataSharingAttributesStack();
void DestroyDataSharingAttributesStack();
ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op,
OpenMPClauseKind CKind);
public:
/// \brief Check if the specified variable is used in a private clause in
/// Checks if the specified variable is used in one of the private
/// clauses in OpenMP constructs.
bool IsOpenMPCapturedVar(VarDecl *VD);
/// OpenMP constructs.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPPrivateVar(VarDecl *VD, unsigned Level);
ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc,
Expr *Op);
/// \brief Called on start of new data sharing attribute block.
void StartOpenMPDSABlock(OpenMPDirectiveKind K,
const DeclarationNameInfo &DirName, Scope *CurScope,
SourceLocation Loc);
/// \brief Start analysis of clauses.
void StartOpenMPClause(OpenMPClauseKind K);
/// \brief End analysis of clauses.
void EndOpenMPClause();
/// \brief Called on end of data sharing attribute block.
void EndOpenMPDSABlock(Stmt *CurDirective);
/// \brief Check if the current region is an OpenMP loop region and if it is,
/// mark loop control variable, used in \p Init for loop initialization, as
/// private by default.
/// \param Init First part of the for loop.
void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init);
// OpenMP directives and clauses.
/// \brief Called on correct id-expression from the '#pragma omp
/// threadprivate'.
ExprResult ActOnOpenMPIdExpression(Scope *CurScope,
CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id);
/// \brief Called on well-formed '#pragma omp threadprivate'.
DeclGroupPtrTy ActOnOpenMPThreadprivateDirective(
SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// \brief Builds a new OpenMPThreadPrivateDecl and checks its correctness.
OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(
SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// \brief Initialization of captured region for OpenMP region.
void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope);
/// \brief End of OpenMP region.
///
/// \param S Statement associated with the current OpenMP region.
/// \param Clauses List of clauses for the current OpenMP region.
///
/// \returns Statement for finished OpenMP region.
StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses);
StmtResult ActOnOpenMPExecutableDirective(
OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName,
OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp parallel' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp simd' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp for' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp for simd' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp sections' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp section' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp single' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp master' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp critical' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp parallel for' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp parallel sections' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp task' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp taskyield'.
StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp barrier'.
StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp taskwait'.
StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp taskgroup'.
StmtResult ActOnOpenMPTaskgroupDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp flush'.
StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp ordered' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPOrderedDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp atomic' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp target' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp cancellation point'.
StmtResult
ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// \brief Called on well-formed '\#pragma omp cancel'.
StmtResult ActOnOpenMPCancelDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind,
Expr *Expr,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'if' clause.
OMPClause *ActOnOpenMPIfClause(Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'final' clause.
OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'num_threads' clause.
OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'safelen' clause.
OMPClause *ActOnOpenMPSafelenClause(Expr *Length,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'collapse' clause.
OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind,
unsigned Argument,
SourceLocation ArgumentLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'default' clause.
OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'proc_bind' clause.
OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind,
unsigned Argument, Expr *Expr,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ArgumentLoc,
SourceLocation CommaLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'schedule' clause.
OMPClause *ActOnOpenMPScheduleClause(OpenMPScheduleClauseKind Kind,
Expr *ChunkSize, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation KindLoc,
SourceLocation CommaLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'ordered' clause.
OMPClause *ActOnOpenMPOrderedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'nowait' clause.
OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'untied' clause.
OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'mergeable' clause.
OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'read' clause.
OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'write' clause.
OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'capture' clause.
OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'seq_cst' clause.
OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPVarListClause(
OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId, OpenMPDependClauseKind DepKind,
SourceLocation DepLoc);
/// \brief Called on well-formed 'private' clause.
OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'firstprivate' clause.
OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'lastprivate' clause.
OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'shared' clause.
OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'reduction' clause.
OMPClause *
ActOnOpenMPReductionClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc,
SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId);
/// \brief Called on well-formed 'linear' clause.
OMPClause *ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList,
Expr *Step,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'aligned' clause.
OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList,
Expr *Alignment,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'copyin' clause.
OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'copyprivate' clause.
OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'flush' pseudo clause.
OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'depend' clause.
OMPClause *
ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc,
SourceLocation ColonLoc, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief The kind of conversion being performed.
enum CheckedConversionKind {
/// \brief An implicit conversion.
CCK_ImplicitConversion,
/// \brief A C-style cast.
CCK_CStyleCast,
/// \brief A functional-style cast.
CCK_FunctionalCast,
/// \brief A cast other than a C-style cast.
CCK_OtherCast
};
/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit
/// cast. If there is already an implicit cast, merge into the existing one.
/// If isLvalue, the result of the cast is an lvalue.
ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK,
ExprValueKind VK = VK_RValue,
const CXXCastPath *BasePath = nullptr,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
/// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding
/// to the conversion from scalar type ScalarTy to the Boolean type.
static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy);
/// IgnoredValueConversions - Given that an expression's result is
/// syntactically ignored, perform any conversions that are
/// required.
ExprResult IgnoredValueConversions(Expr *E);
// UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts
// functions and arrays to their respective pointers (C99 6.3.2.1).
ExprResult UsualUnaryConversions(Expr *E);
/// CallExprUnaryConversions - a special case of an unary conversion
/// performed on a function designator of a call expression.
ExprResult CallExprUnaryConversions(Expr *E);
// DefaultFunctionArrayConversion - converts functions and arrays
// to their respective pointers (C99 6.3.2.1).
ExprResult DefaultFunctionArrayConversion(Expr *E);
// DefaultFunctionArrayLvalueConversion - converts functions and
// arrays to their respective pointers and performs the
// lvalue-to-rvalue conversion.
ExprResult DefaultFunctionArrayLvalueConversion(Expr *E);
// DefaultLvalueConversion - performs lvalue-to-rvalue conversion on
// the operand. This is DefaultFunctionArrayLvalueConversion,
// except that it assumes the operand isn't of function or array
// type.
ExprResult DefaultLvalueConversion(Expr *E);
// DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that
// do not have a prototype. Integer promotions are performed on each
// argument, and arguments that have type float are promoted to double.
ExprResult DefaultArgumentPromotion(Expr *E);
// Used for emitting the right warning by DefaultVariadicArgumentPromotion
enum VariadicCallType {
VariadicFunction,
VariadicBlock,
VariadicMethod,
VariadicConstructor,
VariadicDoesNotApply
};
VariadicCallType getVariadicCallType(FunctionDecl *FDecl,
const FunctionProtoType *Proto,
Expr *Fn);
// Used for determining in which context a type is allowed to be passed to a
// vararg function.
enum VarArgKind {
VAK_Valid,
VAK_ValidInCXX11,
VAK_Undefined,
VAK_MSVCUndefined,
VAK_Invalid
};
// Determines which VarArgKind fits an expression.
VarArgKind isValidVarArgType(const QualType &Ty);
/// Check to see if the given expression is a valid argument to a variadic
/// function, issuing a diagnostic if not.
void checkVariadicArgument(const Expr *E, VariadicCallType CT);
/// Check to see if a given expression could have '.c_str()' called on it.
bool hasCStrMethod(const Expr *E);
/// GatherArgumentsForCall - Collector argument expressions for various
/// form of call prototypes.
bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl,
const FunctionProtoType *Proto,
unsigned FirstParam, ArrayRef<Expr *> Args,
SmallVectorImpl<Expr *> &AllArgs,
VariadicCallType CallType = VariadicDoesNotApply,
bool AllowExplicit = false,
bool IsListInitialization = false);
// DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but
// will create a runtime trap if the resulting type is not a POD type.
ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
FunctionDecl *FDecl);
// UsualArithmeticConversions - performs the UsualUnaryConversions on it's
// operands and then handles various conversions that are common to binary
// operators (C99 6.3.1.8). If both operands aren't arithmetic, this
// routine returns the first non-arithmetic type found. The client is
// responsible for emitting appropriate error diagnostics.
QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS,
bool IsCompAssign = false);
/// AssignConvertType - All of the 'assignment' semantic checks return this
/// enum to indicate whether the assignment was allowed. These checks are
/// done for simple assignments, as well as initialization, return from
/// function, argument passing, etc. The query is phrased in terms of a
/// source and destination type.
enum AssignConvertType {
/// Compatible - the types are compatible according to the standard.
Compatible,
/// PointerToInt - The assignment converts a pointer to an int, which we
/// accept as an extension.
PointerToInt,
/// IntToPointer - The assignment converts an int to a pointer, which we
/// accept as an extension.
IntToPointer,
/// FunctionVoidPointer - The assignment is between a function pointer and
/// void*, which the standard doesn't allow, but we accept as an extension.
FunctionVoidPointer,
/// IncompatiblePointer - The assignment is between two pointers types that
/// are not compatible, but we accept them as an extension.
IncompatiblePointer,
/// IncompatiblePointer - The assignment is between two pointers types which
/// point to integers which have a different sign, but are otherwise
/// identical. This is a subset of the above, but broken out because it's by
/// far the most common case of incompatible pointers.
IncompatiblePointerSign,
/// CompatiblePointerDiscardsQualifiers - The assignment discards
/// c/v/r qualifiers, which we accept as an extension.
CompatiblePointerDiscardsQualifiers,
/// IncompatiblePointerDiscardsQualifiers - The assignment
/// discards qualifiers that we don't permit to be discarded,
/// like address spaces.
IncompatiblePointerDiscardsQualifiers,
/// IncompatibleNestedPointerQualifiers - The assignment is between two
/// nested pointer types, and the qualifiers other than the first two
/// levels differ e.g. char ** -> const char **, but we accept them as an
/// extension.
IncompatibleNestedPointerQualifiers,
/// IncompatibleVectors - The assignment is between two vector types that
/// have the same size, which we accept as an extension.
IncompatibleVectors,
/// IntToBlockPointer - The assignment converts an int to a block
/// pointer. We disallow this.
IntToBlockPointer,
/// IncompatibleBlockPointer - The assignment is between two block
/// pointers types that are not compatible.
IncompatibleBlockPointer,
/// IncompatibleObjCQualifiedId - The assignment is between a qualified
/// id type and something else (that is incompatible with it). For example,
/// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol.
IncompatibleObjCQualifiedId,
/// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an
/// object with __weak qualifier.
IncompatibleObjCWeakRef,
/// Incompatible - We reject this conversion outright, it is invalid to
/// represent it in the AST.
Incompatible
};
/// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the
/// assignment conversion type specified by ConvTy. This returns true if the
/// conversion was invalid or false if the conversion was accepted.
bool DiagnoseAssignmentResult(AssignConvertType ConvTy,
SourceLocation Loc,
QualType DstType, QualType SrcType,
Expr *SrcExpr, AssignmentAction Action,
bool *Complained = nullptr);
/// IsValueInFlagEnum - Determine if a value is allowed as part of a flag
/// enum. If AllowMask is true, then we also allow the complement of a valid
/// value, to be used as a mask.
bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val,
bool AllowMask) const;
/// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant
/// integer not in the range of enum values.
void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType,
Expr *SrcExpr);
/// CheckAssignmentConstraints - Perform type checking for assignment,
/// argument passing, variable initialization, and function return values.
/// C99 6.5.16.
AssignConvertType CheckAssignmentConstraints(SourceLocation Loc,
QualType LHSType,
QualType RHSType);
/// Check assignment constraints and prepare for a conversion of the
/// RHS to the LHS type.
AssignConvertType CheckAssignmentConstraints(QualType LHSType,
ExprResult &RHS,
CastKind &Kind);
// CheckSingleAssignmentConstraints - Currently used by
// CheckAssignmentOperands, and ActOnReturnStmt. Prior to type checking,
// this routine performs the default function/array converions.
AssignConvertType CheckSingleAssignmentConstraints(QualType LHSType,
ExprResult &RHS,
bool Diagnose = true,
bool DiagnoseCFAudited = false);
// \brief If the lhs type is a transparent union, check whether we
// can initialize the transparent union with the given expression.
AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType,
ExprResult &RHS);
bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType);
bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit = false);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit,
ImplicitConversionSequence& ICS);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const ImplicitConversionSequence& ICS,
AssignmentAction Action,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const StandardConversionSequence& SCS,
AssignmentAction Action,
CheckedConversionKind CCK);
/// the following "Check" methods will return a valid/converted QualType
/// or a null QualType (indicating an error diagnostic was issued).
/// type checking binary operators (subroutines of CreateBuiltinBinOp).
QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType CheckPointerToMemberOperands( // C++ 5.5
ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK,
SourceLocation OpLoc, bool isIndirect);
QualType CheckMultiplyDivideOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign,
bool IsDivide);
QualType CheckRemainderOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
bool IsCompAssign = false);
QualType CheckAdditionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc,
QualType* CompLHSTy = nullptr);
QualType CheckSubtractionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
QualType* CompLHSTy = nullptr);
QualType CheckShiftOperands( // C99 6.5.7
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc,
bool IsCompAssign = false);
QualType CheckCompareOperands( // C99 6.5.8/9
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned OpaqueOpc,
bool isRelational);
QualType CheckBitwiseOperands( // C99 6.5.[10...12]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
bool IsCompAssign = false);
QualType CheckLogicalOperands( // C99 6.5.[13,14]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc);
// CheckAssignmentOperands is used for both simple and compound assignment.
// For simple assignment, pass both expressions and a null converted type.
// For compound assignment, pass both expressions and the converted type.
QualType CheckAssignmentOperands( // C99 6.5.16.[1,2]
Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType);
ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opcode, Expr *Op);
ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opcode,
Expr *LHS, Expr *RHS);
ExprResult checkPseudoObjectRValue(Expr *E);
Expr *recreateSyntacticForm(PseudoObjectExpr *E);
QualType CheckConditionalOperands( // C99 6.5.15
ExprResult &Cond, ExprResult &LHS, ExprResult &RHS,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc);
QualType CXXCheckConditionalOperands( // C++ 5.16
ExprResult &cond, ExprResult &lhs, ExprResult &rhs,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc);
QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2,
bool *NonStandardCompositeType = nullptr);
QualType FindCompositePointerType(SourceLocation Loc,
ExprResult &E1, ExprResult &E2,
bool *NonStandardCompositeType = nullptr) {
Expr *E1Tmp = E1.get(), *E2Tmp = E2.get();
QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp,
NonStandardCompositeType);
E1 = E1Tmp;
E2 = E2Tmp;
return Composite;
}
QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS,
SourceLocation QuestionLoc);
bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr,
SourceLocation QuestionLoc);
void DiagnoseAlwaysNonNullPointer(Expr *E,
Expr::NullPointerConstantKind NullType,
bool IsEqual, SourceRange Range);
/// type checking for vector binary operators.
QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign,
bool AllowBothBool, bool AllowBoolConversion);
QualType GetSignedVectorType(QualType V);
QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool isRelational);
QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc);
bool isLaxVectorConversion(QualType srcType, QualType destType);
/// type checking declaration initializers (C99 6.7.8)
bool CheckForConstantInitializer(Expr *e, QualType t);
// type checking C++ declaration initializers (C++ [dcl.init]).
/// ReferenceCompareResult - Expresses the result of comparing two
/// types (cv1 T1 and cv2 T2) to determine their compatibility for the
/// purposes of initialization by reference (C++ [dcl.init.ref]p4).
enum ReferenceCompareResult {
/// Ref_Incompatible - The two types are incompatible, so direct
/// reference binding is not possible.
Ref_Incompatible = 0,
/// Ref_Related - The two types are reference-related, which means
/// that their unqualified forms (T1 and T2) are either the same
/// or T1 is a base class of T2.
Ref_Related,
/// Ref_Compatible_With_Added_Qualification - The two types are
/// reference-compatible with added qualification, meaning that
/// they are reference-compatible and the qualifiers on T1 (cv1)
/// are greater than the qualifiers on T2 (cv2).
Ref_Compatible_With_Added_Qualification,
/// Ref_Compatible - The two types are reference-compatible and
/// have equivalent qualifiers (cv1 == cv2).
Ref_Compatible
};
ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc,
QualType T1, QualType T2,
bool &DerivedToBase,
bool &ObjCConversion,
bool &ObjCLifetimeConversion);
ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType,
Expr *CastExpr, CastKind &CastKind,
ExprValueKind &VK, CXXCastPath &Path);
/// \brief Force an expression with unknown-type to an expression of the
/// given type.
ExprResult forceUnknownAnyToType(Expr *E, QualType ToType);
/// \brief Type-check an expression that's being passed to an
/// __unknown_anytype parameter.
ExprResult checkUnknownAnyArg(SourceLocation callLoc,
Expr *result, QualType ¶mType);
// CheckVectorCast - check type constraints for vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size.
// returns true if the cast is invalid
bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty,
CastKind &Kind);
// CheckExtVectorCast - check type constraints for extended vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size,
// or vectors and the element type of that vector.
// returns the cast expr
ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr,
CastKind &Kind);
ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo,
SourceLocation LParenLoc,
Expr *CastExpr,
SourceLocation RParenLoc);
enum ARCConversionResult { ACR_okay, ACR_unbridged };
/// \brief Checks for invalid conversions and casts between
/// retainable pointers and other pointer kinds.
ARCConversionResult CheckObjCARCConversion(SourceRange castRange,
QualType castType, Expr *&op,
CheckedConversionKind CCK,
bool DiagnoseCFAudited = false,
BinaryOperatorKind Opc = BO_PtrMemD
);
Expr *stripARCUnbridgedCast(Expr *e);
void diagnoseARCUnbridgedCast(Expr *e);
bool CheckObjCARCUnavailableWeakConversion(QualType castType,
QualType ExprType);
/// checkRetainCycles - Check whether an Objective-C message send
/// might create an obvious retain cycle.
void checkRetainCycles(ObjCMessageExpr *msg);
void checkRetainCycles(Expr *receiver, Expr *argument);
void checkRetainCycles(VarDecl *Var, Expr *Init);
/// checkUnsafeAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained type.
bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS);
/// checkUnsafeExprAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained expression.
void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS);
/// CheckMessageArgumentTypes - Check types in an Obj-C message send.
/// \param Method - May be null.
/// \param [out] ReturnType - The return type of the send.
/// \return true iff there were any incompatible types.
bool CheckMessageArgumentTypes(QualType ReceiverType,
MultiExprArg Args, Selector Sel,
ArrayRef<SourceLocation> SelectorLocs,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage,
SourceLocation lbrac, SourceLocation rbrac,
SourceRange RecRange,
QualType &ReturnType, ExprValueKind &VK);
/// \brief Determine the result of a message send expression based on
/// the type of the receiver, the method expected to receive the message,
/// and the form of the message send.
QualType getMessageSendResultType(QualType ReceiverType,
ObjCMethodDecl *Method,
bool isClassMessage, bool isSuperMessage);
/// \brief If the given expression involves a message send to a method
/// with a related result type, emit a note describing what happened.
void EmitRelatedResultTypeNote(const Expr *E);
/// \brief Given that we had incompatible pointer types in a return
/// statement, check whether we're in a method with a related result
/// type, and if so, emit a note describing what happened.
void EmitRelatedResultTypeNoteForReturn(QualType destType);
/// CheckBooleanCondition - Diagnose problems involving the use of
/// the given expression as a boolean condition (e.g. in an if
/// statement). Also performs the standard function and array
/// decays, possibly changing the input variable.
///
/// \param Loc - A location associated with the condition, e.g. the
/// 'if' keyword.
/// \return true iff there were any errors
ExprResult CheckBooleanCondition(Expr *E, SourceLocation Loc);
ExprResult ActOnBooleanCondition(Scope *S, SourceLocation Loc,
Expr *SubExpr);
/// DiagnoseAssignmentAsCondition - Given that an expression is
/// being used as a boolean condition, warn if it's an assignment.
void DiagnoseAssignmentAsCondition(Expr *E);
/// \brief Redundant parentheses over an equality comparison can indicate
/// that the user intended an assignment used as condition.
void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE);
/// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid.
ExprResult CheckCXXBooleanCondition(Expr *CondExpr);
/// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have
/// the specified width and sign. If an overflow occurs, detect it and emit
/// the specified diagnostic.
void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal,
unsigned NewWidth, bool NewSign,
SourceLocation Loc, unsigned DiagID);
/// Checks that the Objective-C declaration is declared in the global scope.
/// Emits an error and marks the declaration as invalid if it's not declared
/// in the global scope.
bool CheckObjCDeclScope(Decl *D);
/// \brief Abstract base class used for diagnosing integer constant
/// expression violations.
class VerifyICEDiagnoser {
public:
bool Suppress;
VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { }
virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0;
virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR);
virtual ~VerifyICEDiagnoser() { }
};
/// VerifyIntegerConstantExpression - Verifies that an expression is an ICE,
/// and reports the appropriate diagnostics. Returns false on success.
/// Can optionally return the value of the expression.
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
VerifyICEDiagnoser &Diagnoser,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
unsigned DiagID,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E,
llvm::APSInt *Result = nullptr);
/// VerifyBitField - verifies that a bit field expression is an ICE and has
/// the correct width, and that the field type is valid.
/// Returns false on success.
/// Can optionally return whether the bit-field is of width 0
ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName,
QualType FieldTy, bool IsMsStruct,
Expr *BitWidth, bool *ZeroWidth = nullptr);
enum CUDAFunctionTarget {
CFT_Device,
CFT_Global,
CFT_Host,
CFT_HostDevice,
CFT_InvalidTarget
};
CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D);
bool CheckCUDATarget(const FunctionDecl *Caller, const FunctionDecl *Callee);
/// Given a implicit special member, infer its CUDA target from the
/// calls it needs to make to underlying base/field special members.
/// \param ClassDecl the class for which the member is being created.
/// \param CSM the kind of special member.
/// \param MemberDecl the special member itself.
/// \param ConstRHS true if this is a copy operation with a const object on
/// its RHS.
/// \param Diagnose true if this call should emit diagnostics.
/// \return true if there was an error inferring.
/// The result of this call is implicit CUDA target attribute(s) attached to
/// the member declaration.
bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
CXXSpecialMember CSM,
CXXMethodDecl *MemberDecl,
bool ConstRHS,
bool Diagnose);
/// \name Code completion
//@{
/// \brief Describes the context in which code completion occurs.
enum ParserCompletionContext {
/// \brief Code completion occurs at top-level or namespace context.
PCC_Namespace,
/// \brief Code completion occurs within a class, struct, or union.
PCC_Class,
/// \brief Code completion occurs within an Objective-C interface, protocol,
/// or category.
PCC_ObjCInterface,
/// \brief Code completion occurs within an Objective-C implementation or
/// category implementation
PCC_ObjCImplementation,
/// \brief Code completion occurs within the list of instance variables
/// in an Objective-C interface, protocol, category, or implementation.
PCC_ObjCInstanceVariableList,
/// \brief Code completion occurs following one or more template
/// headers.
PCC_Template,
/// \brief Code completion occurs following one or more template
/// headers within a class.
PCC_MemberTemplate,
/// \brief Code completion occurs within an expression.
PCC_Expression,
/// \brief Code completion occurs within a statement, which may
/// also be an expression or a declaration.
PCC_Statement,
/// \brief Code completion occurs at the beginning of the
/// initialization statement (or expression) in a for loop.
PCC_ForInit,
/// \brief Code completion occurs within the condition of an if,
/// while, switch, or for statement.
PCC_Condition,
/// \brief Code completion occurs within the body of a function on a
/// recovery path, where we do not have a specific handle on our position
/// in the grammar.
PCC_RecoveryInFunction,
/// \brief Code completion occurs where only a type is permitted.
PCC_Type,
/// \brief Code completion occurs in a parenthesized expression, which
/// might also be a type cast.
PCC_ParenthesizedExpression,
/// \brief Code completion occurs within a sequence of declaration
/// specifiers within a function, method, or block.
PCC_LocalDeclarationSpecifiers
};
void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path);
void CodeCompleteOrdinaryName(Scope *S,
ParserCompletionContext CompletionContext);
void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
bool AllowNonIdentifiers,
bool AllowNestedNameSpecifiers);
struct CodeCompleteExpressionData;
void CodeCompleteExpression(Scope *S,
const CodeCompleteExpressionData &Data);
void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
bool IsArrow);
void CodeCompletePostfixExpression(Scope *S, ExprResult LHS);
void CodeCompleteTag(Scope *S, unsigned TagSpec);
void CodeCompleteTypeQualifiers(DeclSpec &DS);
void CodeCompleteCase(Scope *S);
void CodeCompleteCall(Scope *S, Expr *Fn, ArrayRef<Expr *> Args);
void CodeCompleteConstructor(Scope *S, QualType Type, SourceLocation Loc,
ArrayRef<Expr *> Args);
void CodeCompleteInitializer(Scope *S, Decl *D);
void CodeCompleteReturn(Scope *S);
void CodeCompleteAfterIf(Scope *S);
void CodeCompleteAssignmentRHS(Scope *S, Expr *LHS);
void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS,
bool EnteringContext);
void CodeCompleteUsing(Scope *S);
void CodeCompleteUsingDirective(Scope *S);
void CodeCompleteNamespaceDecl(Scope *S);
void CodeCompleteNamespaceAliasDecl(Scope *S);
void CodeCompleteOperatorName(Scope *S);
void CodeCompleteConstructorInitializer(
Decl *Constructor,
ArrayRef<CXXCtorInitializer *> Initializers);
void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
bool AfterAmpersand);
void CodeCompleteObjCAtDirective(Scope *S);
void CodeCompleteObjCAtVisibility(Scope *S);
void CodeCompleteObjCAtStatement(Scope *S);
void CodeCompleteObjCAtExpression(Scope *S);
void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS);
void CodeCompleteObjCPropertyGetter(Scope *S);
void CodeCompleteObjCPropertySetter(Scope *S);
void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
bool IsParameter);
void CodeCompleteObjCMessageReceiver(Scope *S);
void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression);
void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
bool IsSuper = false);
void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
ObjCInterfaceDecl *Super = nullptr);
void CodeCompleteObjCForCollection(Scope *S,
DeclGroupPtrTy IterationVar);
void CodeCompleteObjCSelector(Scope *S,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCProtocolReferences(IdentifierLocPair *Protocols,
unsigned NumProtocols);
void CodeCompleteObjCProtocolDecl(Scope *S);
void CodeCompleteObjCInterfaceDecl(Scope *S);
void CodeCompleteObjCSuperclass(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationDecl(Scope *S);
void CodeCompleteObjCInterfaceCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCPropertyDefinition(Scope *S);
void CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
IdentifierInfo *PropertyName);
void CodeCompleteObjCMethodDecl(Scope *S,
bool IsInstanceMethod,
ParsedType ReturnType);
void CodeCompleteObjCMethodDeclSelector(Scope *S,
bool IsInstanceMethod,
bool AtParameterName,
ParsedType ReturnType,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompletePreprocessorDirective(bool InConditional);
void CodeCompleteInPreprocessorConditionalExclusion(Scope *S);
void CodeCompletePreprocessorMacroName(bool IsDefinition);
void CodeCompletePreprocessorExpression();
void CodeCompletePreprocessorMacroArgument(Scope *S,
IdentifierInfo *Macro,
MacroInfo *MacroInfo,
unsigned Argument);
void CodeCompleteNaturalLanguage();
void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
CodeCompletionTUInfo &CCTUInfo,
SmallVectorImpl<CodeCompletionResult> &Results);
//@}
//===--------------------------------------------------------------------===//
// Extra semantic analysis beyond the C type system
public:
SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL,
unsigned ByteNo) const;
private:
void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
const ArraySubscriptExpr *ASE=nullptr,
bool AllowOnePastEnd=true, bool IndexNegated=false);
// HLSL Change Starts - checking array subscript access to vector or matrix member
void CheckHLSLArrayAccess(const Expr *expr);
// HLSL Change ends
void CheckArrayAccess(const Expr *E);
// Used to grab the relevant information from a FormatAttr and a
// FunctionDeclaration.
struct FormatStringInfo {
unsigned FormatIdx;
unsigned FirstDataArg;
bool HasVAListArg;
};
bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
FormatStringInfo *FSI);
bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc,
ArrayRef<const Expr *> Args);
bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto);
void CheckConstructorCall(FunctionDecl *FDecl,
ArrayRef<const Expr *> Args,
const FunctionProtoType *Proto,
SourceLocation Loc);
void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
ArrayRef<const Expr *> Args, bool IsMemberFunction,
SourceLocation Loc, SourceRange Range,
VariadicCallType CallType);
bool CheckObjCString(Expr *Arg);
ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl,
unsigned BuiltinID, CallExpr *TheCall);
bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
unsigned MaxWidth);
bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStart(CallExpr *TheCall);
bool SemaBuiltinVAStartARM(CallExpr *Call);
bool SemaBuiltinUnorderedCompare(CallExpr *TheCall);
bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs);
public:
// Used by C++ template instantiation.
ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall);
ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
private:
bool SemaBuiltinPrefetch(CallExpr *TheCall);
bool SemaBuiltinAssume(CallExpr *TheCall);
bool SemaBuiltinAssumeAligned(CallExpr *TheCall);
bool SemaBuiltinLongjmp(CallExpr *TheCall);
bool SemaBuiltinSetjmp(CallExpr *TheCall);
ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult);
ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult,
AtomicExpr::AtomicOp Op);
bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
llvm::APSInt &Result);
bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum,
int Low, int High);
bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
int ArgNum, unsigned ExpectedFieldNum,
bool AllowName);
bool SemaBuiltinCpuSupports(CallExpr *TheCall);
public:
enum FormatStringType {
FST_Scanf,
FST_Printf,
FST_NSString,
FST_Strftime,
FST_Strfmon,
FST_Kprintf,
FST_FreeBSDKPrintf,
FST_OSTrace,
FST_Unknown
};
static FormatStringType GetFormatStringType(const FormatAttr *Format);
void CheckFormatString(const StringLiteral *FExpr, const Expr *OrigFormatExpr,
ArrayRef<const Expr *> Args, bool HasVAListArg,
unsigned format_idx, unsigned firstDataArg,
FormatStringType Type, bool inFunctionCall,
VariadicCallType CallType,
llvm::SmallBitVector &CheckedVarArgs);
bool FormatStringHasSArg(const StringLiteral *FExpr);
bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx);
private:
bool CheckFormatArguments(const FormatAttr *Format,
ArrayRef<const Expr *> Args,
bool IsCXXMember,
VariadicCallType CallType,
SourceLocation Loc, SourceRange Range,
llvm::SmallBitVector &CheckedVarArgs);
bool CheckFormatArguments(ArrayRef<const Expr *> Args,
bool HasVAListArg, unsigned format_idx,
unsigned firstDataArg, FormatStringType Type,
VariadicCallType CallType,
SourceLocation Loc, SourceRange range,
llvm::SmallBitVector &CheckedVarArgs);
void CheckAbsoluteValueFunction(const CallExpr *Call,
const FunctionDecl *FDecl,
IdentifierInfo *FnInfo);
void CheckMemaccessArguments(const CallExpr *Call,
unsigned BId,
IdentifierInfo *FnName);
void CheckStrlcpycatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckStrncatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
SourceLocation ReturnLoc,
bool isObjCMethod = false,
const AttrVec *Attrs = nullptr,
const FunctionDecl *FD = nullptr);
void CheckFloatComparison(SourceLocation Loc, Expr* LHS, Expr* RHS);
void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation());
void CheckBoolLikeConversion(Expr *E, SourceLocation CC);
void CheckForIntOverflow(Expr *E);
void CheckUnsequencedOperations(Expr *E);
/// \brief Perform semantic checks on a completed expression. This will either
/// be a full-expression or a default argument expression.
void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(),
bool IsConstexpr = false);
void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field,
Expr *Init);
/// \brief Check if the given expression contains 'break' or 'continue'
/// statement that produces control flow different from GCC.
void CheckBreakContinueBinding(Expr *E);
/// \brief Check whether receiver is mutable ObjC container which
/// attempts to add itself into the container
void CheckObjCCircularContainer(ObjCMessageExpr *Message);
void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE);
void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc,
bool DeleteWasArrayForm);
public:
/// \brief Register a magic integral constant to be used as a type tag.
void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind,
uint64_t MagicValue, QualType Type,
bool LayoutCompatible, bool MustBeNull);
struct TypeTagData {
TypeTagData() {}
TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) :
Type(Type), LayoutCompatible(LayoutCompatible),
MustBeNull(MustBeNull)
{}
QualType Type;
/// If true, \c Type should be compared with other expression's types for
/// layout-compatibility.
unsigned LayoutCompatible : 1;
unsigned MustBeNull : 1;
};
/// A pair of ArgumentKind identifier and magic value. This uniquely
/// identifies the magic value.
typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue;
private:
/// \brief A map from magic value to type information.
std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>>
TypeTagForDatatypeMagicValues;
/// \brief Peform checks on a call of a function with argument_with_type_tag
/// or pointer_with_type_tag attributes.
void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
const Expr * const *ExprArgs);
/// \brief The parser's current scope.
///
/// The parser maintains this state here.
Scope *CurScope;
mutable IdentifierInfo *Ident_super;
mutable IdentifierInfo *Ident___float128;
// HLSL Change Starts
bool DiagnoseHLSLDecl(Declarator& D, DeclContext* DC, Expr *BitWidth, TypeSourceInfo* TInfo, bool isParameter);
bool DiagnoseHLSLLookup(const LookupResult &R);
void TransferUnusualAttributes(Declarator& D, NamedDecl* NewDecl);
// HLSL Change Ends
/// Nullability type specifiers.
IdentifierInfo *Ident__Nonnull = nullptr;
IdentifierInfo *Ident__Nullable = nullptr;
IdentifierInfo *Ident__Null_unspecified = nullptr;
IdentifierInfo *Ident_NSError = nullptr;
protected:
friend class Parser;
friend class InitializationSequence;
friend class ASTReader;
friend class ASTDeclReader;
friend class ASTWriter;
public:
/// Retrieve the keyword associated
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability);
/// The struct behind the CFErrorRef pointer.
RecordDecl *CFError = nullptr;
/// Retrieve the identifier "NSError".
IdentifierInfo *getNSErrorIdent();
/// \brief Retrieve the parser's current scope.
///
/// This routine must only be used when it is certain that semantic analysis
/// and the parser are in precisely the same context, which is not the case
/// when, e.g., we are performing any kind of template instantiation.
/// Therefore, the only safe places to use this scope are in the parser
/// itself and in routines directly invoked from the parser and *never* from
/// template substitution or instantiation.
Scope *getCurScope() const { return CurScope; }
void incrementMSManglingNumber() const {
return CurScope->incrementMSManglingNumber();
}
IdentifierInfo *getSuperIdentifier() const;
IdentifierInfo *getFloat128Identifier() const;
Decl *getObjCDeclContext() const;
DeclContext *getCurLexicalContext() const {
return OriginalLexicalContext ? OriginalLexicalContext : CurContext;
}
AvailabilityResult getCurContextAvailability() const;
const DeclContext *getCurObjCLexicalContext() const {
const DeclContext *DC = getCurLexicalContext();
// A category implicitly has the attribute of the interface.
if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC))
DC = CatD->getClassInterface();
return DC;
}
/// \brief To be used for checking whether the arguments being passed to
/// function exceeds the number of parameters expected for it.
static bool TooManyArguments(size_t NumParams, size_t NumArgs,
bool PartialOverloading = false) {
// We check whether we're just after a comma in code-completion.
if (NumArgs > 0 && PartialOverloading)
return NumArgs + 1 > NumParams; // If so, we view as an extra argument.
return NumArgs > NumParams;
}
};
/// \brief RAII object that enters a new expression evaluation context.
class EnterExpressionEvaluationContext {
Sema &Actions;
public:
EnterExpressionEvaluationContext(Sema &Actions,
Sema::ExpressionEvaluationContext NewContext,
Decl *LambdaContextDecl = nullptr,
bool IsDecltype = false)
: Actions(Actions) {
Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl,
IsDecltype);
}
EnterExpressionEvaluationContext(Sema &Actions,
Sema::ExpressionEvaluationContext NewContext,
Sema::ReuseLambdaContextDecl_t,
bool IsDecltype = false)
: Actions(Actions) {
Actions.PushExpressionEvaluationContext(NewContext,
Sema::ReuseLambdaContextDecl,
IsDecltype);
}
~EnterExpressionEvaluationContext() {
Actions.PopExpressionEvaluationContext();
}
};
DeductionFailureInfo
MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK,
sema::TemplateDeductionInfo &Info);
/// \brief Contains a late templated function.
/// Will be parsed at the end of the translation unit, used by Sema & Parser.
struct LateParsedTemplate {
CachedTokens Toks;
/// \brief The template function declaration to be late parsed.
Decl *D;
};
} // end namespace clang
#endif
|
declare_reduction_ast_print.c | // RUN: %clang_cc1 -verify -fopenmp -ast-print %s | FileCheck %s
// RUN: %clang_cc1 -fopenmp -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp -include-pch %t -fsyntax-only -verify %s -ast-print | FileCheck %s
// RUN: %clang_cc1 -verify -fopenmp-simd -ast-print %s | FileCheck %s
// RUN: %clang_cc1 -fopenmp-simd -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp-simd -include-pch %t -fsyntax-only -verify %s -ast-print | FileCheck %s
// expected-no-diagnostics
#ifndef HEADER
#define HEADER
#pragma omp declare reduction(+ : int, char : omp_out *= omp_in)
// CHECK: #pragma omp declare reduction (+ : int : omp_out *= omp_in){{$}}
// CHECK-NEXT: #pragma omp declare reduction (+ : char : omp_out *= omp_in)
#pragma omp declare reduction(fun : float : omp_out += omp_in) initializer(omp_priv = omp_orig + 15)
// CHECK: #pragma omp declare reduction (fun : float : omp_out += omp_in) initializer(omp_priv = omp_orig + 15)
// CHECK: struct SSS {
struct SSS {
int field;
#pragma omp declare reduction(+ : int, char : omp_out *= omp_in)
// CHECK: #pragma omp declare reduction (+ : int : omp_out *= omp_in)
// CHECK-NEXT: #pragma omp declare reduction (+ : char : omp_out *= omp_in)
};
// CHECK: };
void init(struct SSS *priv, struct SSS orig);
#pragma omp declare reduction(fun : struct SSS : omp_out = omp_in) initializer(init(&omp_priv, omp_orig))
// CHECK: #pragma omp declare reduction (fun : struct SSS : omp_out = omp_in) initializer(init(&omp_priv, omp_orig))
// CHECK: int main() {
int main() {
#pragma omp declare reduction(fun : struct SSS : omp_out = omp_in) initializer(init(&omp_priv, omp_orig))
// CHECK: #pragma omp declare reduction (fun : struct SSS : omp_out = omp_in) initializer(init(&omp_priv, omp_orig))
{
#pragma omp declare reduction(fun : struct SSS : omp_out = omp_in) initializer(init(&omp_priv, omp_orig))
// CHECK: #pragma omp declare reduction (fun : struct SSS : omp_out = omp_in) initializer(init(&omp_priv, omp_orig))
}
return 0;
}
// CHECK: }
#pragma omp declare reduction(mymin:int \
: omp_out = omp_out > omp_in ? omp_in : omp_out) \
initializer(omp_priv = 2147483647)
int foo(int argc, char **argv) {
int x;
#pragma omp parallel for reduction(mymin : x)
for (int i = 0; i < 1000; i++)
;
return 0;
}
// CHECK: #pragma omp parallel for reduction(mymin: x)
#endif
|
DenseMatrix.h | //=================================================================================================
/*!
// \file blaze/math/smp/openmp/DenseMatrix.h
// \brief Header file for the OpenMP-based dense matrix SMP implementation
//
// Copyright (C) 2012-2018 Klaus Iglberger - All Rights Reserved
//
// This file is part of the Blaze library. You can redistribute it and/or modify it under
// the terms of the New (Revised) BSD License. Redistribution and use in source and binary
// forms, with or without modification, are permitted provided that the following conditions
// are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of
// conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice, this list
// of conditions and the following disclaimer in the documentation and/or other materials
// provided with the distribution.
// 3. Neither the names of the Blaze development group nor the names of its contributors
// may be used to endorse or promote products derived from this software without specific
// prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
// SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
// TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
// BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
// DAMAGE.
*/
//=================================================================================================
#ifndef _BLAZE_MATH_SMP_OPENMP_DENSEMATRIX_H_
#define _BLAZE_MATH_SMP_OPENMP_DENSEMATRIX_H_
//*************************************************************************************************
// Includes
//*************************************************************************************************
#include <omp.h>
#include <blaze/math/Aliases.h>
#include <blaze/math/AlignmentFlag.h>
#include <blaze/math/constraints/SMPAssignable.h>
#include <blaze/math/expressions/DenseMatrix.h>
#include <blaze/math/expressions/SparseMatrix.h>
#include <blaze/math/functors/AddAssign.h>
#include <blaze/math/functors/Assign.h>
#include <blaze/math/functors/MultAssign.h>
#include <blaze/math/functors/SchurAssign.h>
#include <blaze/math/functors/SubAssign.h>
#include <blaze/math/simd/SIMDTrait.h>
#include <blaze/math/smp/ParallelSection.h>
#include <blaze/math/smp/SerialSection.h>
#include <blaze/math/smp/ThreadMapping.h>
#include <blaze/math/StorageOrder.h>
#include <blaze/math/typetraits/IsDenseMatrix.h>
#include <blaze/math/typetraits/IsSIMDCombinable.h>
#include <blaze/math/typetraits/IsSMPAssignable.h>
#include <blaze/math/views/Submatrix.h>
#include <blaze/system/SMP.h>
#include <blaze/util/algorithms/Min.h>
#include <blaze/util/Assert.h>
#include <blaze/util/EnableIf.h>
#include <blaze/util/FunctionTrace.h>
#include <blaze/util/mpl/And.h>
#include <blaze/util/mpl/Not.h>
#include <blaze/util/mpl/Or.h>
#include <blaze/util/StaticAssert.h>
#include <blaze/util/Types.h>
namespace blaze {
//=================================================================================================
//
// OPENMP-BASED ASSIGNMENT KERNELS
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Backend of the OpenMP-based SMP (compound) assignment of a dense matrix to a dense matrix.
// \ingroup math
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side dense matrix to be assigned.
// \param op The (compound) assignment operation.
// \return void
//
// This function is the backend implementation of the OpenMP-based SMP assignment of a dense
// matrix to a dense matrix.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side dense matrix
, bool SO2 // Storage order of the right-hand side dense matrix
, typename OP > // Type of the assignment operation
void openmpAssign( DenseMatrix<MT1,SO1>& lhs, const DenseMatrix<MT2,SO2>& rhs, OP op )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" );
using ET1 = ElementType_<MT1>;
using ET2 = ElementType_<MT2>;
constexpr bool simdEnabled( MT1::simdEnabled && MT2::simdEnabled && IsSIMDCombinable<ET1,ET2>::value );
constexpr size_t SIMDSIZE( SIMDTrait< ElementType_<MT1> >::size );
const bool lhsAligned( (~lhs).isAligned() );
const bool rhsAligned( (~rhs).isAligned() );
const int threads( omp_get_num_threads() );
const ThreadMapping threadmap( createThreadMapping( threads, ~rhs ) );
const size_t addon1 ( ( ( (~rhs).rows() % threadmap.first ) != 0UL )? 1UL : 0UL );
const size_t equalShare1( (~rhs).rows() / threadmap.first + addon1 );
const size_t rest1 ( equalShare1 & ( SIMDSIZE - 1UL ) );
const size_t rowsPerThread( ( simdEnabled && rest1 )?( equalShare1 - rest1 + SIMDSIZE ):( equalShare1 ) );
const size_t addon2 ( ( ( (~rhs).columns() % threadmap.second ) != 0UL )? 1UL : 0UL );
const size_t equalShare2( (~rhs).columns() / threadmap.second + addon2 );
const size_t rest2 ( equalShare2 & ( SIMDSIZE - 1UL ) );
const size_t colsPerThread( ( simdEnabled && rest2 )?( equalShare2 - rest2 + SIMDSIZE ):( equalShare2 ) );
#pragma omp for schedule(dynamic,1) nowait
for( int i=0; i<threads; ++i )
{
const size_t row ( ( i / threadmap.second ) * rowsPerThread );
const size_t column( ( i % threadmap.second ) * colsPerThread );
if( row >= (~rhs).rows() || column >= (~rhs).columns() )
continue;
const size_t m( min( rowsPerThread, (~rhs).rows() - row ) );
const size_t n( min( colsPerThread, (~rhs).columns() - column ) );
if( simdEnabled && lhsAligned && rhsAligned ) {
auto target( submatrix<aligned>( ~lhs, row, column, m, n ) );
const auto source( submatrix<aligned>( ~rhs, row, column, m, n ) );
op( target, source );
}
else if( simdEnabled && lhsAligned ) {
auto target( submatrix<aligned>( ~lhs, row, column, m, n ) );
const auto source( submatrix<unaligned>( ~rhs, row, column, m, n ) );
op( target, source );
}
else if( simdEnabled && rhsAligned ) {
auto target( submatrix<unaligned>( ~lhs, row, column, m, n ) );
const auto source( submatrix<aligned>( ~rhs, row, column, m, n ) );
op( target, source );
}
else {
auto target( submatrix<unaligned>( ~lhs, row, column, m, n ) );
const auto source( submatrix<unaligned>( ~rhs, row, column, m, n ) );
op( target, source );
}
}
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Backend of the OpenMP-based SMP (compound) assignment of a sparse matrix to a dense matrix.
// \ingroup math
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side sparse matrix to be assigned.
// \param op The (compound) assignment operation.
// \return void
//
// This function is the backend implementation of the OpenMP-based SMP assignment of a sparse
// matrix to a dense matrix.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side sparse matrix
, bool SO2 // Storage order of the right-hand side sparse matrix
, typename OP > // Type of the assignment operation
void openmpAssign( DenseMatrix<MT1,SO1>& lhs, const SparseMatrix<MT2,SO2>& rhs, OP op )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" );
const size_t threads( omp_get_num_threads() );
const ThreadMapping threadmap( createThreadMapping( threads, ~rhs ) );
const size_t addon1 ( ( ( (~rhs).rows() % threadmap.first ) != 0UL )? 1UL : 0UL );
const size_t rowsPerThread( (~rhs).rows() / threadmap.first + addon1 );
const size_t addon2 ( ( ( (~rhs).columns() % threadmap.second ) != 0UL )? 1UL : 0UL );
const size_t colsPerThread( (~rhs).columns() / threadmap.second + addon2 );
#pragma omp for schedule(dynamic,1) nowait
for( size_t i=0; i<threads; ++i )
{
const size_t row ( ( i / threadmap.second ) * rowsPerThread );
const size_t column( ( i % threadmap.second ) * colsPerThread );
if( row >= (~rhs).rows() || column >= (~rhs).columns() )
continue;
const size_t m( min( rowsPerThread, (~lhs).rows() - row ) );
const size_t n( min( colsPerThread, (~lhs).columns() - column ) );
auto target( submatrix<unaligned>( ~lhs, row, column, m, n ) );
const auto source( submatrix<unaligned>( ~rhs, row, column, m, n ) );
op( target, source );
}
}
/*! \endcond */
//*************************************************************************************************
//=================================================================================================
//
// PLAIN ASSIGNMENT
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Default implementation of the OpenMP-based SMP assignment to a dense matrix.
// \ingroup smp
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side matrix to be assigned.
// \return void
//
// This function implements the default OpenMP-based SMP assignment to a dense matrix. Due to
// the explicit application of the SFINAE principle, this function can only be selected by the
// compiler in case both operands are SMP-assignable and the element types of both operands are
// not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side matrix
, bool SO2 > // Storage order of the right-hand side matrix
inline EnableIf_< And< IsDenseMatrix<MT1>
, Or< Not< IsSMPAssignable<MT1> >
, Not< IsSMPAssignable<MT2> > > > >
smpAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
assign( ~lhs, ~rhs );
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Implementation of the OpenMP-based SMP assignment to a dense matrix.
// \ingroup math
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side matrix to be assigned.
// \return void
//
// This function implements the OpenMP-based SMP assignment to a dense matrix. Due to the
// explicit application of the SFINAE principle, this function can only be selected by the
// compiler in case both operands are SMP-assignable and the element types of both operands
// are not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side matrix
, bool SO2 > // Storage order of the right-hand side matrix
inline EnableIf_< And< IsDenseMatrix<MT1>, IsSMPAssignable<MT1>, IsSMPAssignable<MT2> > >
smpAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<MT1> );
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<MT2> );
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
BLAZE_PARALLEL_SECTION
{
if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) {
assign( ~lhs, ~rhs );
}
else {
#pragma omp parallel shared( lhs, rhs )
openmpAssign( ~lhs, ~rhs, Assign() );
}
}
}
/*! \endcond */
//*************************************************************************************************
//=================================================================================================
//
// ADDITION ASSIGNMENT
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Default implementation of the OpenMP-based SMP addition assignment to a dense matrix.
// \ingroup smp
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side matrix to be added.
// \return void
//
// This function implements the default OpenMP-based SMP addition assignment to a dense matrix.
// Due to the explicit application of the SFINAE principle, this function can only be selected
// by the compiler in case both operands are SMP-assignable and the element types of both operands
// are not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side matrix
, bool SO2 > // Storage order of the right-hand side matrix
inline EnableIf_< And< IsDenseMatrix<MT1>
, Or< Not< IsSMPAssignable<MT1> >
, Not< IsSMPAssignable<MT2> > > > >
smpAddAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
addAssign( ~lhs, ~rhs );
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Implementation of the OpenMP-based SMP addition assignment to a dense matrix.
// \ingroup math
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side matrix to be added.
// \return void
//
// This function implements the OpenMP-based SMP addition assignment to a dense matrix. Due to
// the explicit application of the SFINAE principle, this function can only be selected by the
// compiler in case both operands are SMP-assignable and the element types of both operands are
// not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side matrix
, bool SO2 > // Storage order of the right-hand side matrix
inline EnableIf_< And< IsDenseMatrix<MT1>, IsSMPAssignable<MT1>, IsSMPAssignable<MT2> > >
smpAddAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<MT1> );
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<MT2> );
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
BLAZE_PARALLEL_SECTION
{
if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) {
addAssign( ~lhs, ~rhs );
}
else {
#pragma omp parallel shared( lhs, rhs )
openmpAssign( ~lhs, ~rhs, AddAssign() );
}
}
}
/*! \endcond */
//*************************************************************************************************
//=================================================================================================
//
// SUBTRACTION ASSIGNMENT
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Default implementation of the OpenMP-based SMP subtracction assignment to a dense matrix.
// \ingroup smp
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side matrix to be subtracted.
// \return void
//
// This function implements the default OpenMP-based SMP subtraction assignment to a dense matrix.
// Due to the explicit application of the SFINAE principle, this function can only be selected by
// the compiler in case both operands are SMP-assignable and the element types of both operands
// are not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side matrix
, bool SO2 > // Storage order of the right-hand side matrix
inline EnableIf_< And< IsDenseMatrix<MT1>
, Or< Not< IsSMPAssignable<MT1> >
, Not< IsSMPAssignable<MT2> > > > >
smpSubAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
subAssign( ~lhs, ~rhs );
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Implementation of the OpenMP-based SMP subtracction assignment to a dense matrix.
// \ingroup smp
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side matrix to be subtracted.
// \return void
//
// This function implements the default OpenMP-based SMP subtraction assignment of a matrix to a
// dense matrix. Due to the explicit application of the SFINAE principle, this function can only
// be selected by the compiler in case both operands are SMP-assignable and the element types of
// both operands are not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side matrix
, bool SO2 > // Storage order of the right-hand side matrix
inline EnableIf_< And< IsDenseMatrix<MT1>, IsSMPAssignable<MT1>, IsSMPAssignable<MT2> > >
smpSubAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<MT1> );
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<MT2> );
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
BLAZE_PARALLEL_SECTION
{
if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) {
subAssign( ~lhs, ~rhs );
}
else {
#pragma omp parallel shared( lhs, rhs )
openmpAssign( ~lhs, ~rhs, SubAssign() );
}
}
}
/*! \endcond */
//*************************************************************************************************
//=================================================================================================
//
// SCHUR PRODUCT ASSIGNMENT
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Default implementation of the OpenMP-based SMP Schur product assignment to a dense matrix.
// \ingroup smp
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side matrix for the Schur product.
// \return void
//
// This function implements the default OpenMP-based SMP Schur product assignment to a dense
// matrix. Due to the explicit application of the SFINAE principle, this function can only be
// selected by the compiler in case both operands are SMP-assignable and the element types of
// both operands are not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side matrix
, bool SO2 > // Storage order of the right-hand side matrix
inline EnableIf_< And< IsDenseMatrix<MT1>
, Or< Not< IsSMPAssignable<MT1> >
, Not< IsSMPAssignable<MT2> > > > >
smpSchurAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
schurAssign( ~lhs, ~rhs );
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Implementation of the OpenMP-based SMP Schur product assignment to a dense matrix.
// \ingroup math
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side matrix for the Schur product.
// \return void
//
// This function implements the OpenMP-based SMP Schur product assignment to a dense matrix. Due
// to the explicit application of the SFINAE principle, this function can only be selected by the
// compiler in case both operands are SMP-assignable and the element types of both operands are
// not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side matrix
, bool SO2 > // Storage order of the right-hand side matrix
inline EnableIf_< And< IsDenseMatrix<MT1>, IsSMPAssignable<MT1>, IsSMPAssignable<MT2> > >
smpSchurAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<MT1> );
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<MT2> );
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
BLAZE_PARALLEL_SECTION
{
if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) {
schurAssign( ~lhs, ~rhs );
}
else {
#pragma omp parallel shared( lhs, rhs )
openmpAssign( ~lhs, ~rhs, SchurAssign() );
}
}
}
/*! \endcond */
//*************************************************************************************************
//=================================================================================================
//
// MULTIPLICATION ASSIGNMENT
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Default implementation of the OpenMP-based SMP multiplication assignment to a dense matrix.
// \ingroup smp
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side matrix to be multiplied.
// \return void
//
// This function implements the default OpenMP-based SMP multiplication assignment to a dense
// matrix.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side matrix
, typename MT2 // Type of the right-hand side matrix
, bool SO2 > // Storage order of the right-hand side matrix
inline EnableIf_< IsDenseMatrix<MT1> >
smpMultAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
multAssign( ~lhs, ~rhs );
}
/*! \endcond */
//*************************************************************************************************
//=================================================================================================
//
// COMPILE TIME CONSTRAINT
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
namespace {
BLAZE_STATIC_ASSERT( BLAZE_OPENMP_PARALLEL_MODE );
}
/*! \endcond */
//*************************************************************************************************
} // namespace blaze
#endif
|
templatemath.h | /*
* ******************************************************************************
* *
* *
* * This program and the accompanying materials are made available under the
* * terms of the Apache License, Version 2.0 which is available at
* * https://www.apache.org/licenses/LICENSE-2.0.
* *
* * See the NOTICE file distributed with this work for additional
* * information regarding copyright ownership.
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* * License for the specific language governing permissions and limitations
* * under the License.
* *
* * SPDX-License-Identifier: Apache-2.0
* *****************************************************************************
*/
/*
* templatemath.h
*
* Created on: Jan 1, 2016
* Author: agibsonccc
*/
#ifndef TEMPLATEMATH_H_
#define TEMPLATEMATH_H_
#include <system/dll.h>
#include <system/pointercast.h>
#include <math/platformmath.h>
#include <array/DataTypeUtils.h>
#define BFLOAT16_MAX_VALUE 32737.
#define HALF_MAX_VALUE 65504.
#define FLOAT_MAX_VALUE 3.4028235E38
#define DOUBLE_MAX_VALUE 1.7976931348623157E308
#define FLOAT_MIN_NORMAL 1.17549435e-38
#ifndef M_E
#define M_E 2.718281828459
#endif
#ifndef M_PI
#define M_PI 3.14159265358979323846
#endif
namespace sd {
#ifdef __CUDACC__
#endif
namespace math {
template<typename T>
math_def inline T nd4j_abs(T value);
template<typename T>
math_def inline void nd4j_swap(T &val1, T &val2);
template<typename T>
math_def inline T nd4j_max(T val1, T val2);
template<typename T>
math_def inline T nd4j_min(T val1, T val2);
template <typename T>
math_def inline bool nd4j_eq(T val1, T val2, double eps);
template<typename T, typename Z>
math_def inline Z nd4j_re(T val1, T val2);
template<typename T, typename Z>
math_def inline Z nd4j_rint(T val1);
template<typename T, typename Z>
math_def inline Z nd4j_copysign(T val1, T val2);
template <typename T, typename Z>
math_def inline Z nd4j_softplus(T val);
template <typename T>
math_def inline T nd4j_rotl(T val, T shift);
template <typename T>
math_def inline T nd4j_rotr(T val, T shift);
//#ifndef __CUDACC__
template<typename X, typename Y, typename Z>
math_def inline Z nd4j_dot(X *x, Y *y, int length);
//#endif
template<typename T, typename Z>
math_def inline Z nd4j_ceil(T val1);
template<typename T>
math_def inline bool nd4j_isnan(T val1);
template<typename T>
math_def inline bool nd4j_isinf(T val1);
template<typename T>
math_def inline bool nd4j_isfin(T val1);
template<typename T, typename Z>
math_def inline Z nd4j_cos(T val);
template<typename T, typename Z>
math_def inline Z nd4j_cosh(T val);
template<typename X, typename Z>
math_def inline Z nd4j_exp(X val);
template<typename T, typename Z>
math_def inline Z nd4j_floor(T val);
template<typename X, typename Z>
math_def inline Z nd4j_log(X val);
template<typename X, typename Y, typename Z>
math_def inline Z nd4j_pow(X val, Y val2);
template<typename T, typename Z>
math_def inline Z nd4j_round(T val);
template<typename X, typename Y, typename Z>
math_def inline Z nd4j_remainder(X num, Y denom);
template<typename X, typename Y, typename Z>
math_def inline Z nd4j_fmod(X num, Y denom);
template<typename T, typename Z>
math_def inline Z nd4j_erf(T num);
template<typename T, typename Z>
math_def inline Z nd4j_erfc(T num);
math_def inline int32_t floatToRawIntBits(float d) {
union {
float f;
int32_t i;
} tmp;
tmp.f = d;
return tmp.i;
}
math_def inline float intBitsToFloat(int32_t i) {
union {
float f;
int32_t i;
} tmp;
tmp.i = i;
return tmp.f;
}
math_def inline float mulsignf(float x, float y) {
return intBitsToFloat(floatToRawIntBits(x) ^ (floatToRawIntBits(y) & (1 << 31)));
}
math_def inline float copysignfk(float x, float y) {
return intBitsToFloat((floatToRawIntBits(x) & ~(1 << 31)) ^ (floatToRawIntBits(y) & (1 << 31)));
}
template<typename T, typename Z>
math_def inline Z nd4j_sigmoid(T val) {
return (Z) 1.0f / ((Z) 1.0f + nd4j_exp<T, Z>(-val));
}
template<typename T, typename Z>
math_def inline Z nd4j_elu(T val, T alpha) {
if (val >= (T) 0.f)
return val;
return static_cast<Z>(alpha) * (nd4j_exp<T, Z>(val) - static_cast<Z>(1.0f));
}
template<typename T, typename Z>
math_def inline Z nd4j_leakyrelu(T val,T alpha) {
if (val < (T) 0.0f)
return alpha * val;
else
return val;
}
template<typename T, typename Z>
math_def inline Z nd4j_eluderivative(T val, T alpha) {
if (val >= static_cast<T>(0.0f))
return static_cast<Z>(1.0f);
return static_cast<Z>(alpha) * nd4j_exp<T, Z>(val);
//return val >= 0.0 ? 1.0 : nd4j_exp(val);
}
template<typename T, typename Z>
math_def inline Z nd4j_sin(T val);
template<typename T, typename Z>
math_def inline Z nd4j_sinh(T val);
template<typename T, typename Z>
math_def inline Z nd4j_softplus(T val) {
return nd4j_log<T, Z>((Z) 1.0f + nd4j_exp<T, Z>(val));
}
template<typename T, typename Z>
math_def inline Z nd4j_softsign(T val) {
return val / ((T) 1.0f + sd::math::nd4j_abs<T>(val));
}
template<typename X, typename Z>
math_def inline Z nd4j_sqrt(X val);
template<typename X, typename Z>
math_def inline Z nd4j_tanh(X val);
template<typename T, typename Z>
math_def inline Z nd4j_tan(T val);
template<typename X, typename Z>
math_def inline Z nd4j_atan2(X val1, X val2);
template<typename X, typename Z>
math_def inline Z nd4j_atan2(X val1, X val2) {
return p_atan2<Z>(static_cast<Z>(val1), static_cast<Z>(val2));
}
template<typename T, typename Z>
math_def inline Z nd4j_tan(T tval) {
return p_tan<Z>(static_cast<Z>(tval));
}
template<typename T, typename Z>
math_def inline Z nd4j_tanhderivative(T val) {
Z tanh = nd4j_tanh<T,Z>(val);
return (Z) 1.0f - tanh * tanh;
}
template <typename T, typename Z>
math_def inline T nd4j_sigmoidderivative(T val) {
Z sigmoid = nd4j_sigmoid<T,Z>(val);
return sigmoid * ((Z) 1.0f - sigmoid);
}
template<typename T, typename Z>
math_def inline T nd4j_softsignderivative(T val) {
T y = (T) 1.0f + nd4j_abs(val);
return (Z) 1.0f / (y * y);
}
template<typename T, typename Z>
math_def inline T nd4j_sgn(T val) {
return val < (T) 0.0f ? (Z) -1.0f : val > (T) 0.0f ? (Z) 1.0f : (Z) 0.0f;
}
template<typename T, typename Z>
math_def inline Z nd4j_sign(T val) {
return nd4j_sgn<T, Z>(val);
}
template<typename T, typename Z>
math_def inline Z nd4j_signum(T val) {
return nd4j_sgn<T, Z>(val);
}
template<typename X, typename Z>
math_def inline Z nd4j_gamma(X a);
template<typename X, typename Z>
math_def inline Z nd4j_lgamma(X x);
//#ifndef __CUDACC__
/*
template<>
math_def inline float16 nd4j_dot<float16>(float16 *x, float16 *y, int length) {
float16 dot = (float16) 0.0f;
// TODO: since we can't use simd on unions, we might use something else here.
for(int e = 0; e < length; e++) {
dot += x[e] * y[e];
}
return dot;
}
*/
template<typename X, typename Y, typename Z>
math_def inline Z nd4j_dot(X *x, Y *y, int length) {
Z dot = (Z)0.0f;
for(int e = 0; e < length; e++) {
dot += static_cast<Z>(x[e]) * static_cast<Z>(y[e]);
}
return dot;
}
//#endif
template<typename T, typename Z>
math_def inline Z nd4j_acos(T val);
template<typename T, typename Z>
math_def inline Z nd4j_sech(T val);
template<typename T, typename Z>
math_def inline Z nd4j_acosh(T val);
template<typename T, typename Z>
math_def inline Z nd4j_asin(T val);
template<typename T, typename Z>
math_def inline Z nd4j_asinh(T val);
template<typename T, typename Z>
math_def inline Z nd4j_asinh(T val) {
//Math.log(Math.sqrt(Math.pow(x, 2) + 1) + x)
return nd4j_log<Z, Z>(nd4j_sqrt<Z, Z>(nd4j_pow<T,T,Z>(val, (T) 2) + (Z) 1.f) + (Z) val);
}
template<typename T, typename Z>
math_def inline Z nd4j_atan(T val);
template<typename T, typename Z>
math_def inline Z nd4j_atanh(T val);
template<>
math_def inline float16 nd4j_abs<float16>(float16 value) {
#ifdef NATIVE_HALFS
if (value < (float16) 0.f) {
return float16(__hneg(value.data));
} else
return value;
#else
return (float16) fabsf((float) value);
#endif
}
template<>
math_def inline bfloat16 nd4j_abs<bfloat16>(bfloat16 value) {
return (bfloat16) fabsf((float) value);
}
template<>
math_def inline float nd4j_abs<float>(float value) {
return fabsf(value);
}
template<>
math_def inline double nd4j_abs<double>(double value) {
return fabs(value);
}
template<>
math_def inline int nd4j_abs<int>(int value) {
return abs(value);
}
template<>
math_def inline Nd4jLong nd4j_abs<Nd4jLong>(Nd4jLong value) {
return llabs(value);
}
template<>
math_def inline bool nd4j_abs<bool>(bool value) {
return value;
}
template<>
math_def inline uint8_t nd4j_abs<uint8_t>(uint8_t value) {
return value;
}
template<>
math_def inline uint16_t nd4j_abs<uint16_t>(uint16_t value) {
return value;
}
template<>
math_def inline uint32_t nd4j_abs<uint32_t>(uint32_t value) {
return value;
}
template<>
math_def inline Nd4jULong nd4j_abs<Nd4jULong>(Nd4jULong value) {
return value;
}
template<>
math_def inline int8_t nd4j_abs<int8_t>(int8_t value) {
return value < 0 ? -value : value;
}
template<>
math_def inline int16_t nd4j_abs<int16_t>(int16_t value) {
return value < 0 ? -value : value;
}
template<>
math_def inline bool nd4j_isnan<float16>(float16 value) {
return *(value.data.getXP()) == 0x7fffU;
}
template<>
math_def inline bool nd4j_isnan<bfloat16>(bfloat16 value) {
return value == bfloat16::nan(); //0x7fffU;
}
template<>
math_def inline bool nd4j_isnan<float>(float value) {
return value != value;
}
template<>
math_def inline bool nd4j_isnan<double>(double value) {
return value != value;
}
template<>
math_def inline bool nd4j_isnan<int>(int value) {
return false;
}
template<>
math_def inline bool nd4j_isnan<uint32_t>(uint32_t value) {
return false;
}
template<>
math_def inline bool nd4j_isnan<uint16_t>(uint16_t value) {
return false;
}
template<>
math_def inline bool nd4j_isnan<uint8_t>(uint8_t value) {
return false;
}
template<>
math_def inline bool nd4j_isnan<int16_t>(int16_t value) {
return false;
}
template<>
math_def inline bool nd4j_isnan<int8_t>(int8_t value) {
return false;
}
template<>
math_def inline bool nd4j_isnan<bool>(bool value) {
return false;
}
template<>
math_def inline bool nd4j_isnan<Nd4jLong>(Nd4jLong value) {
return false;
}
template<>
math_def inline bool nd4j_isnan<Nd4jULong>(Nd4jULong value) {
return false;
}
template<>
math_def inline bool nd4j_isinf<float16>(float16 value) {
return value < (float16) -HALF_MAX_VALUE || value > (float16) HALF_MAX_VALUE;
}
template<>
math_def inline bool nd4j_isinf<bfloat16>(bfloat16 value) {
return value < (bfloat16) -BFLOAT16_MAX_VALUE || value > (bfloat16) BFLOAT16_MAX_VALUE;
}
template<>
math_def inline bool nd4j_isinf<float>(float value) {
#ifdef __CUDACC__
return isinf(value);
#else
return std::isinf(value);
#endif
//return value < -FLOAT_MAX_VALUE || value > FLOAT_MAX_VALUE;
}
template<>
math_def inline bool nd4j_isinf<double>(double value) {
#ifdef __CUDACC__
return isinf(value);
#else
return std::isinf(value);
#endif
//return value < -DOUBLE_MAX_VALUE || value > DOUBLE_MAX_VALUE;
}
template<>
math_def inline bool nd4j_isinf<int>(int value) {
return false;
}
template<>
math_def inline bool nd4j_isinf<uint32_t>(uint32_t value) {
return false;
}
template<>
math_def inline bool nd4j_isinf<uint16_t>(uint16_t value) {
return false;
}
template<>
math_def inline bool nd4j_isinf<uint8_t>(uint8_t value) {
return false;
}
template<>
math_def inline bool nd4j_isinf<int16_t>(int16_t value) {
return false;
}
template<>
math_def inline bool nd4j_isinf<int8_t>(int8_t value) {
return false;
}
template<>
math_def inline bool nd4j_isinf<bool>(bool value) {
return false;
}
template<>
math_def inline bool nd4j_isinf<Nd4jLong>(Nd4jLong value) {
return false;
}
template<>
math_def inline bool nd4j_isinf<Nd4jULong>(Nd4jULong value) {
return false;
}
template<typename T>
math_def inline bool nd4j_isfin(T value) {
return !nd4j_isnan<T>(value) && !nd4j_isinf<T>(value);
}
template<>
math_def inline float16 nd4j_copysign<float16>(float16 val1, float16 val2) {
return (float16) copysignf((float) val1, (float) val2);
}
template<>
math_def inline float nd4j_copysign<float>(float val1, float val2) {
return copysignf(val1, val2);
}
template<>
math_def inline double nd4j_copysign<double>(double val1, double val2) {
return copysign(val1, val2);
}
template<>
math_def inline int nd4j_copysign<int>(int val1, int val2) {
if (val2 < 0) return -(nd4j_abs<int>(val1));
else return nd4j_abs<int>(val1);
}
template<>
math_def inline Nd4jLong nd4j_copysign<Nd4jLong>(Nd4jLong val1, Nd4jLong val2) {
if (val2 < 0) return -(nd4j_abs<Nd4jLong>(val1));
else return nd4j_abs<Nd4jLong>(val1);
}
template<>
math_def inline bool nd4j_max(bool val1, bool val2) {
return (val1 || val2) ? true : false;
}
template<typename T>
math_def inline T nd4j_max(T val1, T val2) {
return val1 > val2 ? val1 : val2;
}
template<>
math_def inline bool nd4j_min(bool val1, bool val2) {
return (val1 && val2) ? true : false;
}
template<typename T>
math_def inline T nd4j_min(T val1, T val2) {
return val1 < val2 ? val1 : val2;
}
template <typename T>
math_def inline bool nd4j_eq(T d1, T d2, double eps) {
if (sd::math::nd4j_isinf<T>(d1) && sd::math::nd4j_isinf<T>(d2)) {
if (d1 > 0 && d2 > 0)
return true;
else if (d1 < 0 && d2 < 0)
return true;
else
return false;
}
auto diff = static_cast<double>(sd::math::nd4j_abs<T>(d1 - d2));
// works well except in the range of very large numbers
if (diff <= eps)
return true;
// Knuth approach
// works well except in the range of very small numbers
if (diff <= sd::math::nd4j_max<double>(sd::math::nd4j_abs<double>(static_cast<double>(d1)), sd::math::nd4j_abs<double>(static_cast<double>(d2))) * eps)
return true;
return false;
}
template <typename X, typename Z>
math_def inline Z nd4j_ceil(X val) {
return static_cast<Z>(p_ceil<X>(val));
}
template <typename X, typename Z>
math_def inline Z nd4j_round(X val) {
return static_cast<Z>(p_round<X>(val));
}
template <typename X, typename Z>
math_def inline Z nd4j_asin(X val) {
return p_asin<Z>(static_cast<Z>(val));
}
template <typename X, typename Z>
math_def inline Z nd4j_atan(X val) {
return p_atan<Z>(static_cast<Z>(val));
}
template <typename X, typename Z>
math_def inline Z nd4j_atanh(X val) {
return p_atanh<Z>(static_cast<Z>(val));
}
template <typename X, typename Z>
math_def inline Z nd4j_cosh(X val) {
return p_cosh<Z>(static_cast<Z>(val));
}
template <typename X, typename Z>
math_def inline Z nd4j_rint(X val) {
return p_rint<X>(val);
}
template <typename X, typename Z>
math_def inline Z nd4j_sinh(X val) {
return p_sinh<Z>(static_cast<Z>(val));
}
template <typename X, typename Z>
math_def inline Z nd4j_acos(X val) {
return p_acos<Z>(static_cast<Z>(val));
}
template <typename X, typename Z>
math_def inline Z nd4j_sech(X val) {
return static_cast<Z>(1) / nd4j_cosh<X,Z>(val);
}
template <typename X, typename Z>
math_def inline Z nd4j_acosh(X val) {
return p_acosh<Z>(static_cast<Z>(val));
}
template <typename X, typename Z>
math_def inline Z nd4j_cos(X val) {
return p_cos<Z>(static_cast<Z>(val));
}
template <typename X, typename Z>
math_def inline Z nd4j_exp(X val) {
return p_exp<X>(val);
}
template<typename X, typename Z>
math_def inline Z nd4j_floor(X val) {
return static_cast<Z>(p_floor<X>(val));
}
template<typename X, typename Z>
math_def inline Z nd4j_log(X val) {
return static_cast<Z>(p_log<X>(val));
}
/**
* This func is special case - it must return floating point value, and optionally Y arg can be floating point argument
* @tparam X
* @tparam Y
* @tparam Z
* @param val
* @param val2
* @return
*/
template <>
math_def inline float nd4j_pow(float val, float val2) {
return p_pow<float>(val, val2);
}
template <typename X, typename Y, typename Z>
math_def inline Z nd4j_pow(X val, Y val2) {
return p_pow<Z>(static_cast<Z>(val), static_cast<Z>(val2));
}
/**
* LogGamma(a) - float point extension of ln(n!)
**/
template <typename X, typename Z>
math_def inline Z nd4j_lgamma(X x) {
// if (x <= X(0.0))
// {
// std::stringstream os;
// os << "Logarithm of Gamma has sence only for positive values, but " << x << " was given.";
// throw std::invalid_argument( os.str() );
// }
if (x < X(12.0)) {
return nd4j_log<Z,Z>(nd4j_gamma<X,Z>(x));
}
// Abramowitz and Stegun 6.1.41
// Asymptotic series should be good to at least 11 or 12 figures
// For error analysis, see Whittiker and Watson
// A Course in Modern Analysis (1927), page 252
static const double c[8] = {
1.0/12.0,
-1.0/360.0,
1.0/1260.0,
-1.0/1680.0,
1.0/1188.0,
-691.0/360360.0,
1.0/156.0,
-3617.0/122400.0
};
double z = Z(1.0 / Z(x * x));
double sum = c[7];
for (int i = 6; i >= 0; i--) {
sum *= z;
sum += c[i];
}
double series = sum / Z(x);
static const double halfLogTwoPi = 0.91893853320467274178032973640562;
return Z((double(x) - 0.5) * nd4j_log<X,double>(x) - double(x) + halfLogTwoPi + series);
}
template<typename T>
math_def inline T nd4j_re(T val1, T val2) {
if (val1 == (T) 0.0f && val2 == (T) 0.0f)
return (T) 0.0f;
return nd4j_abs<T>(val1 - val2) / (nd4j_abs<T>(val1) + nd4j_abs<T>(val2));
}
template <typename X, typename Y, typename Z>
math_def inline Z nd4j_remainder(X val, Y val2) {
return p_remainder<Z>(static_cast<Z>(val), static_cast<Z>(val2));
}
template <typename X, typename Y, typename Z>
math_def inline Z nd4j_fmod(X val, Y val2) {
return p_fmod<Z>(static_cast<Z>(val), static_cast<Z>(val2));
}
template <typename X, typename Z>
math_def inline Z nd4j_sin(X val) {
return p_sin<Z>(static_cast<Z>(val));
}
template <typename X, typename Z>
math_def inline Z nd4j_sqrt(X val) {
return p_sqrt<Z>(static_cast<Z>(val));
}
template <typename X>
math_def inline X neg_tanh(X val) {
X o = static_cast<X>(1.0f);
X t = static_cast<X>(2.0f);
X e = static_cast<X>(M_E);
auto p = sd::math::nd4j_pow<X, X, X>(e, val * t);
return (p - o)/ (p + o);
}
template <typename X>
math_def inline X pos_tanh(X val) {
X o = static_cast<X>(1.0f);
X t = static_cast<X>(-2.0f);
X e = static_cast<X>(M_E);
auto p = sd::math::nd4j_pow<X, X, X>(e, val * t);
return (o - p) / (o + p);
}
math_def inline float neu_tanh(float val, float sign) {
float e(M_E);
float av = sign * val;
auto p = sd::math::nd4j_pow<float, float, float>(e, -av * 2.f);
return (1 - p) / (1 + p);
}
template <>
math_def inline float nd4j_tanh(float val) {
float sign = copysignfk(1.0f, val);
return sign * neu_tanh(val, sign);
}
template <typename X, typename Z>
math_def inline Z nd4j_tanh(X val) {
return val <= 0 ? neg_tanh(val) : pos_tanh(val);
}
template <typename T>
math_def inline T nd4j_rotl(T val, T shift) {
return p_rotl<T>(val, shift);
}
template <typename T>
math_def inline T nd4j_rotr(T val, T shift) {
return p_rotr<T>(val, shift);
}
template <typename X, typename Z>
math_def inline Z nd4j_erf(X val) {
return p_erf<Z>(static_cast<Z>(val));
}
template <typename X, typename Z>
math_def inline Z nd4j_erfc(X val) {
return p_erfc<Z>(static_cast<Z>(val));
}
template<typename T>
math_def inline void nd4j_swap(T &val1, T &val2) {
T temp = val1; val1=val2; val2=temp;
};
template <typename X, typename Z>
math_def inline Z nd4j_gamma(X a) {
// nd4j_lgamma<X,Z>(a);
// return (Z)std::tgamma(a);
// Split the function domain into three intervals:
// (0, 0.001), [0.001, 12), and (12, infinity)
///////////////////////////////////////////////////////////////////////////
// First interval: (0, 0.001)
//
// For small a, 1/Gamma(a) has power series a + gamma a^2 - ...
// So in this range, 1/Gamma(a) = a + gamma a^2 with error on the order of a^3.
// The relative error over this interval is less than 6e-7.
const double eulerGamma = 0.577215664901532860606512090; // Euler's gamma constant
if (a < X(0.001))
return Z(1.0 / ((double)a * (1.0 + eulerGamma * (double)a)));
///////////////////////////////////////////////////////////////////////////
// Second interval: [0.001, 12)
if (a < X(12.0)) {
// The algorithm directly approximates gamma over (1,2) and uses
// reduction identities to reduce other arguments to this interval.
double y = (double)a;
int n = 0;
bool argWasLessThanOne = y < 1.0;
// Add or subtract integers as necessary to bring y into (1,2)
// Will correct for this below
if (argWasLessThanOne) {
y += 1.0;
}
else {
n = static_cast<int>(floor(y)) - 1; // will use n later
y -= n;
}
// numerator coefficients for approximation over the interval (1,2)
static const double p[] = {
-1.71618513886549492533811E+0,
2.47656508055759199108314E+1,
-3.79804256470945635097577E+2,
6.29331155312818442661052E+2,
8.66966202790413211295064E+2,
-3.14512729688483675254357E+4,
-3.61444134186911729807069E+4,
6.64561438202405440627855E+4
};
// denominator coefficients for approximation over the interval (1,2)
static const double q[] = {
-3.08402300119738975254353E+1,
3.15350626979604161529144E+2,
-1.01515636749021914166146E+3,
-3.10777167157231109440444E+3,
2.25381184209801510330112E+4,
4.75584627752788110767815E+3,
-1.34659959864969306392456E+5,
-1.15132259675553483497211E+5
};
double num = 0.0;
double den = 1.0;
double z = y - 1;
for (auto i = 0; i < 8; i++) {
num = (num + p[i]) * z;
den = den * z + q[i];
}
double result = num / den + 1.0;
// Apply correction if argument was not initially in (1,2)
if (argWasLessThanOne) {
// Use identity gamma(z) = gamma(z+1)/z
// The variable "result" now holds gamma of the original y + 1
// Thus we use y-1 to get back the orginal y.
result /= (y - 1.0);
}
else {
// Use the identity gamma(z+n) = z*(z+1)* ... *(z+n-1)*gamma(z)
for (auto i = 0; i < n; i++)
result *= y++;
}
return Z(result);
}
///////////////////////////////////////////////////////////////////////////
// Third interval: [12, infinity)
if (a > 171.624) {
// Correct answer too large to display. Force +infinity.
return Z(DOUBLE_MAX_VALUE);
// return DataTypeUtils::infOrMax<Z>();
}
return sd::math::nd4j_exp<Z,Z>(sd::math::nd4j_lgamma<X,Z>(a));
}
template <typename X, typename Y, typename Z>
math_def inline Z nd4j_igamma(X a, Y x) {
Z aim = nd4j_pow<X, X, Z>(x, a) / (nd4j_exp<X, Z>(x) * nd4j_gamma<Y, Z>(a));
auto sum = Z(0.);
auto denom = Z(1.);
if (a <= X(0.000001))
//throw std::runtime_error("Cannot calculate gamma for a zero val.");
return Z(0);
for (int i = 0; Z(1./denom) > Z(1.0e-12); i++) {
denom *= (a + i);
sum += nd4j_pow<X, int, Z>(x, i) / denom;
}
return aim * sum;
}
template <typename X, typename Y, typename Z>
math_def inline Z nd4j_igammac(X a, Y x) {
return Z(1.) - nd4j_igamma<X, Y, Z>(a, x);
}
#ifdef __CUDACC__
namespace atomics {
template <typename T>
inline __device__ T nd4j_atomicAdd(T* address, T val);
template <typename T>
inline __device__ T nd4j_atomicSub(T* address, T val);
template <typename T>
inline __device__ T nd4j_atomicMul(T* address, T val);
template <typename T>
inline __device__ T nd4j_atomicDiv(T* address, T val);
template <typename T>
inline __device__ T nd4j_atomicMin(T* address, T val);
template <typename T>
inline __device__ T nd4j_atomicMax(T* address, T val);
template <>
inline __device__ int32_t nd4j_atomicMin<int32_t>(int32_t* address, int32_t val) {
return atomicMin(address, val);
}
template <>
inline __device__ uint32_t nd4j_atomicMin<uint32_t>(uint32_t* address, uint32_t val) {
return atomicMin(address, val);
}
template <>
inline __device__ float nd4j_atomicMin<float>(float* address, float val) {
int* address_as_ull = (int*)address;
int old = __float_as_int(val), assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __float_as_int(math::nd4j_min(val, __int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
}
template <>
inline __device__ double nd4j_atomicMin<double>(double* address, double val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = __double_as_longlong(val), assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(math::nd4j_min(val, __longlong_as_double(assumed))));
} while (assumed != old);
return __longlong_as_double(old);
}
template <>
inline __device__ uint64_t nd4j_atomicMin<uint64_t>(uint64_t* address, uint64_t val) {
#if __CUDA_ARCH__ >= 350
return atomicMin((unsigned long long*)address, (unsigned long long)val);
#else
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = __double_as_longlong(val), assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, math::nd4j_min((unsigned long long)val, assumed));
} while (assumed != old);
return old;
#endif
}
template <>
inline __device__ Nd4jLong nd4j_atomicMin<Nd4jLong>(Nd4jLong* address, Nd4jLong val) {
#if __CUDA_ARCH__ >= 350
return atomicMin((unsigned long long*)address, (unsigned long long)val);
#else
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = (unsigned long long)val, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, math::nd4j_min(val, (Nd4jLong)assumed));
} while (assumed != old);
return old;
#endif
}
template <>
inline __device__ int16_t nd4j_atomicMin<int16_t>(int16_t* address, int16_t val) {
int32_t temp = *address;
*address = atomicMin(&temp, (int)val);
return *address;
}
template <>
inline __device__ bfloat16 nd4j_atomicMin<bfloat16>(bfloat16* address, bfloat16 val) {
return bfloat16(nd4j_atomicMin<int16_t>(&address->_data, val._data));
}
template <>
inline __device__ float16 nd4j_atomicMin<float16>(float16* address, float16 val) {
return float16(nd4j_atomicMin<int16_t>(reinterpret_cast<int16_t*>(&address->data), (int16_t)val.data));
}
template <>
inline __device__ int32_t nd4j_atomicMax<int32_t>(int32_t* address, int32_t val) {
return atomicMax(address, val);
}
template <>
inline __device__ uint32_t nd4j_atomicMax<uint32_t>(uint32_t* address, uint32_t val) {
return atomicMax(address, val);
}
template <>
inline __device__ double nd4j_atomicMax<double>(double* address, double val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = __double_as_longlong(val), assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(math::nd4j_max(val, __longlong_as_double(assumed))));
} while (assumed != old);
return __longlong_as_double(old);
}
template <>
inline __device__ float nd4j_atomicMax<float>(float* address, float val) {
int* address_as_ull = (int*)address;
int old = __float_as_int(val), assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __float_as_int(math::nd4j_max(val, __int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
}
template <>
inline __device__ uint8_t nd4j_atomicMin<uint8_t>(uint8_t* address, uint8_t val) {
uint32_t temp = *address;
*address = atomicMin(&temp, (uint32_t)val);
return *address;
}
template <>
inline __device__ int8_t nd4j_atomicMin<int8_t>(int8_t* address, int8_t val) {
int32_t temp = *address;
*address = atomicMin(&temp, (int)val);
return *address;
}
template <>
inline __device__ uint16_t nd4j_atomicMin<uint16_t>(uint16_t* address, uint16_t val) {
uint32_t temp = *address;
*address = atomicMin(&temp, (uint32_t)val);
return *address;
}
template <>
inline __device__ uint8_t nd4j_atomicMax<uint8_t>(uint8_t* address, uint8_t val) {
uint32_t temp = *address;
*address = atomicMax(&temp, (uint32_t)val);
return *address;
}
template <>
inline __device__ int8_t nd4j_atomicMax<int8_t>(int8_t* address, int8_t val) {
int32_t temp = *address;
*address = atomicMax(&temp, (int)val);
return *address;
}
template <>
inline __device__ uint16_t nd4j_atomicMax<uint16_t>(uint16_t* address, uint16_t val) {
uint32_t temp = *address;
*address = atomicMax(&temp, (uint32_t)val);
return *address;
}
template <>
inline __device__ int16_t nd4j_atomicMax<int16_t>(int16_t* address, int16_t val) {
int32_t temp = *address;
*address = atomicMax(&temp, (int32_t)val);
return *address;
}
template <>
inline __device__ float16 nd4j_atomicMax<float16>(float16* address, float16 val) {
auto address_as_ull = (int*) address;
long addr = (long) address;
bool misaligned = addr & 0x3;
if (misaligned)
address_as_ull = (int *) (address - 1);
PAIR old, assumed, fresh;
old.W = *address_as_ull;
do {
if (!misaligned) {
float16 res = nd4j_max((float16) old.B.H, val);
fresh.B.H = res.data;
fresh.B.L = old.B.L;
} else {
float16 res = nd4j_max((float16) old.B.L, val);
fresh.B.L = res.data;
fresh.B.H = old.B.H;
}
assumed.W = old.W;
old.W = atomicCAS(address_as_ull, assumed.W, fresh.W);
} while (assumed.W != old.W);
if (!misaligned) return old.B.H;
else return old.B.L;
}
template <>
inline __device__ bfloat16 nd4j_atomicMax<bfloat16>(bfloat16* address, bfloat16 val) {
auto address_as_ull = (int*) address;
long addr = (long)(address);
bool misaligned = addr & 0x3;
if (misaligned)
address_as_ull = (int *) (address - 1);
BPAIR old, assumed, fresh;
old.W = *address_as_ull;
do {
if (!misaligned) {
bfloat16 res = nd4j_max(old.B.H, val);
fresh.B.H = res;
fresh.B.L = old.B.L;
} else {
bfloat16 res = nd4j_max(old.B.L, val);
fresh.B.L = res;
fresh.B.H = old.B.H;
}
assumed.W = old.W;
old.W = atomicCAS(address_as_ull, assumed.W, fresh.W);
} while (assumed.W != old.W);
if (!misaligned) return old.B.H;
else return old.B.L;
}
template <>
inline __device__ uint64_t nd4j_atomicMax<uint64_t>(uint64_t* address, uint64_t val) {
#if __CUDA_ARCH__ >= 350
return atomicMax((unsigned long long*)address, (unsigned long long)val);
#else
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = __double_as_longlong(val), assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, math::nd4j_max((unsigned long long)val, assumed));
} while (assumed != old);
return old;
#endif
}
template <>
inline __device__ Nd4jLong nd4j_atomicMax<Nd4jLong>(Nd4jLong* address, Nd4jLong val) {
unsigned long long int* address_as_ull = (unsigned long long int *) address;
//return (Nd4jLong) atomicAdd(address_as_ull, (unsigned long long int) val);
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, (unsigned long long)nd4j_max(val, (Nd4jLong)assumed));
} while (assumed != old);
return old;
}
template <>
inline __device__ double nd4j_atomicAdd<double>(double* address, double val) {
unsigned long long int* address_as_ull =
(unsigned long long int *) address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,__double_as_longlong(val +
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
template <>
inline __device__ Nd4jLong nd4j_atomicAdd<Nd4jLong>(Nd4jLong* address, Nd4jLong val) {
unsigned long long int* address_as_ull = (unsigned long long int *) address;
//return (Nd4jLong) atomicAdd(address_as_ull, (unsigned long long int) val);
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, val + assumed);
} while (assumed != old);
return old;
}
template <>
inline __device__ long nd4j_atomicAdd<long>(long* address, long val) {
unsigned long long* address_as_ull = (unsigned long long int *) address;
// return atomicAdd(address, val);
unsigned long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, val + assumed);
} while (assumed != old);
return old;
}
template <>
inline __device__ uint32_t nd4j_atomicAdd<uint32_t>(uint32_t* address, uint32_t val) {
return atomicAdd(address, val);
}
template <>
inline __device__ uint64_t nd4j_atomicAdd<uint64_t>(uint64_t* address, uint64_t val) {
// unsigned long long* address_as_ull = (unsigned long long int *) address;
//
//// return atomicAdd(address, val);
// unsigned long int old = *address_as_ull, assumed;
// do {
// assumed = old;
// old = atomicCAS(address_as_ull, assumed, val + assumed);
// } while (assumed != old);
// return old;
return (uint64_t)atomicAdd((unsigned long long*)address, (unsigned long long)val);
}
template <>
inline __device__ float16 nd4j_atomicAdd<float16>(float16* address, float16 val) {
#if __CUDA_ARCH__ >= 700 && CUDA_VERSION_MAJOR >=10
atomicAdd(reinterpret_cast<__half*>(address), val.data);
#else
auto address_as_ull = (int*) address;
long addr = (long) address;
bool misaligned = addr & 0x3;
if (misaligned)
address_as_ull = (int *) (address - 1);
PAIR old, assumed, fresh;
old.W = *address_as_ull;
do {
if (!misaligned) {
float16 res = ((float16) old.B.H) + val;
fresh.B.H = res.data;
fresh.B.L = old.B.L;
} else {
float16 res = ((float16) old.B.L) + val;
fresh.B.L = res.data;
fresh.B.H = old.B.H;
}
assumed.W = old.W;
old.W = atomicCAS(address_as_ull, assumed.W, fresh.W);
} while (assumed.W != old.W);
if (!misaligned) return old.B.H;
else return old.B.L;
#endif
}
template <>
inline __device__ bfloat16 nd4j_atomicAdd<bfloat16>(bfloat16* address, bfloat16 val) {
auto address_as_ull = (int*) address;
auto addr = (long)(address);
bool misaligned = addr & 0x3;
if (misaligned)
address_as_ull = (int *) (address - 1);
BPAIR old, assumed, fresh;
old.W = *address_as_ull;
do {
if (!misaligned) {
bfloat16 res = old.B.H + val;
fresh.B.H = res;
fresh.B.L = old.B.L;
} else {
bfloat16 res = old.B.L + val;
fresh.B.L = res;
fresh.B.H = old.B.H;
}
assumed.W = old.W;
old.W = atomicCAS(address_as_ull, assumed.W, fresh.W);
} while (assumed.W != old.W);
if (!misaligned) return old.B.H;
else return old.B.L;
}
template <typename T>
static inline __device__ T internal_16bit_atomicAdd(T* address, T val) {
size_t shift = ((size_t)address & 2);
int *base_address = (int *)((char*)address - shift);
union I16PAIR {
struct {
T H;
T L;
} B;
int W;
__host__ __device__
I16PAIR() {};
__host__ __device__
~I16PAIR() {};
};
I16PAIR pairNew, pairOld, pairAssumed;
if (reinterpret_cast<int*>(address) == base_address) {
pairOld.B.L = val;
do {
pairNew.B.L = pairOld.B.L;
pairNew.B.H = pairOld.B.H + val;
pairAssumed.W = pairOld.W;
pairOld.W = atomicCAS(base_address, pairAssumed.W, pairNew.W);
} while (pairAssumed.W != pairOld.W);
return (T) pairOld.B.H;
} else {
pairOld.B.H = val;
do {
pairNew.B.H = pairOld.B.H;
pairNew.B.L = pairOld.B.L + val;
pairAssumed.W = pairOld.W;
pairOld.W = atomicCAS(base_address, pairAssumed.W, pairNew.W);
} while (pairAssumed.W != pairOld.W);
return (T) pairOld.B.L;
}
}
template <>
inline __device__ int16_t nd4j_atomicAdd<int16_t>(int16_t* address, int16_t val) {
return internal_16bit_atomicAdd<int16_t>(address, val);
}
template <>
inline __device__ uint16_t nd4j_atomicAdd<uint16_t>(uint16_t* address, uint16_t val) {
return internal_16bit_atomicAdd<uint16_t>(address, val);
}
template <>
inline __device__ int8_t nd4j_atomicAdd<int8_t>(int8_t* address, int8_t val) {
int res = *address;
atomicAdd(&res, (int)val);
*address = res;
return *address;
}
template <>
inline __device__ uint8_t nd4j_atomicAdd<uint8_t>(uint8_t* address, uint8_t val) {
int res = *address;
atomicAdd(&res, (int)val);
*address = res;
return *address;
}
template <>
inline __device__ bool nd4j_atomicAdd<bool>(bool* address, bool val) {
*address += (val);
return *address;
}
template <>
inline __device__ double nd4j_atomicSub<double>(double* address, double val) {
return nd4j_atomicAdd<double>(address, -val);
}
template <>
inline __device__ double nd4j_atomicMul<double>(double* address, double val) {
unsigned long long int* address_as_ull =
(unsigned long long int*) address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,__double_as_longlong(val *
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
template <>
inline __device__ double nd4j_atomicDiv<double>(double* address, double val) {
return nd4j_atomicMul<double>(address, 1./val);
}
template <>
inline __device__ float nd4j_atomicAdd<float>(float* address, float val) {
return atomicAdd(address,val);
}
//template <>
//inline __device__ int nd4j_atomicAdd<int>(int* address, int val) {
// return atomicAdd(address, val);
//}
template <>
inline __device__ int32_t nd4j_atomicAdd<int32_t>(int32_t* address, int32_t val) {
return (int32_t)atomicAdd((int*)address, (int)val);
}
template <>
inline __device__ float nd4j_atomicSub<float>(float* address, float val) {
return nd4j_atomicAdd<float>(address, -val);
}
template <>
inline __device__ float16 nd4j_atomicSub<float16>(float16* address, float16 val) {
return nd4j_atomicAdd<float16>(address, -val);
}
template <>
inline __device__ bfloat16 nd4j_atomicSub<bfloat16>(bfloat16* address, bfloat16 val) {
return nd4j_atomicAdd<bfloat16>(address, -val);
}
template <>
inline __device__ float nd4j_atomicMul<float>(float* address, float val) {
int* address_as_ull =
( int*)address;
int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __float_as_int(val *
__int_as_float(assumed)));
} while (assumed != old);
return __int_as_float(old);
}
template <>
inline __device__ int8_t nd4j_atomicMul<int8_t>(int8_t* address, int8_t val) {
unsigned int *base_address = (unsigned int *)((size_t)address & ~3);
unsigned int selectors[] = {0x3214, 0x3240, 0x3410, 0x4210};
unsigned int sel = selectors[(size_t)address & 3];
unsigned int old, assumed, mul, new_;
old = *base_address;
do {
assumed = old;
mul = val * (int8_t)__byte_perm(old, 0, ((size_t)address & 3) | 0x4440);
new_ = __byte_perm(old, mul, sel);
if (new_ == old)
break;
old = atomicCAS(base_address, assumed, new_);
} while (assumed != old);
return (int8_t)old;
}
template <>
inline __device__ unsigned char nd4j_atomicMul<unsigned char>(unsigned char* address, unsigned char val) {
unsigned int *base_address = (unsigned int *)((size_t)address & ~3);
unsigned int selectors[] = {0x3214, 0x3240, 0x3410, 0x4210};
unsigned int sel = selectors[(size_t)address & 3];
unsigned int old, assumed, mul, new_;
old = *base_address;
do {
assumed = old;
mul = val * (uint8_t)__byte_perm(old, 0, ((size_t)address & 3) | 0x4440);
new_ = __byte_perm(old, mul, sel);
if (new_ == old)
break;
old = atomicCAS(base_address, assumed, new_);
} while (assumed != old);
return (uint8_t)old;
}
template <typename T>
static inline __device__ T internal_16bit_atomicMul(T* address, T val) {
size_t shift = ((size_t)address & 2);
int *base_address = (int *)((char*)address - shift);
union I16PAIR {
struct {
T H;
T L;
} B;
int W;
__host__ __device__
I16PAIR() {};
__host__ __device__
~I16PAIR() {};
};
I16PAIR pairNew, pairOld, pairAssumed;
if (reinterpret_cast<int*>(address) == base_address) {
pairOld.B.L = val;
do {
pairNew.B.L = pairOld.B.L;
pairNew.B.H = pairOld.B.H * val;
pairAssumed.W = pairOld.W;
pairOld.W = atomicCAS(base_address, pairAssumed.W, pairNew.W);
} while (pairAssumed.W != pairOld.W);
return (T) pairOld.B.H;
} else {
pairOld.B.H = val;
do {
pairNew.B.H = pairOld.B.H;
pairNew.B.L = pairOld.B.L * val;
pairAssumed.W = pairOld.W;
pairOld.W = atomicCAS(base_address, pairAssumed.W, pairNew.W);
} while (pairAssumed.W != pairOld.W);
return (T) pairOld.B.L;
}
}
template <>
inline __device__ int16_t nd4j_atomicMul<int16_t>(int16_t* address, int16_t val) {
return internal_16bit_atomicMul<int16_t>(address, val);
}
template <>
inline __device__ uint16_t nd4j_atomicMul<uint16_t>(uint16_t* address, uint16_t val) {
return internal_16bit_atomicMul<uint16_t>(address, val);
}
template <>
inline __device__ int nd4j_atomicMul<int>(int* address, int val) {
int* res_address = address;
int old = *res_address, assumed;
do {
assumed = old;
old = atomicCAS(res_address, assumed, val * assumed);
} while (assumed != old);
return old;
}
template <>
inline __device__ unsigned int nd4j_atomicMul<unsigned int>(unsigned int* address, unsigned int val) {
unsigned int* res_address = address;
unsigned int old = *res_address, assumed;
do {
assumed = old;
old = atomicCAS(res_address, assumed, val * assumed);
} while (assumed != old);
return old;
}
template <>
inline __device__ int64_t nd4j_atomicMul<int64_t>(int64_t* address, int64_t val) {
unsigned long long int* res_address = (unsigned long long int*)address;
unsigned long long int old = *res_address, assumed;
do {
assumed = old;
old = atomicCAS(res_address, assumed, val * assumed);
} while (assumed != old);
return (int64_t)old;
}
template <>
inline __device__ uint64_t nd4j_atomicMul<uint64_t>(uint64_t* address, uint64_t val) {
unsigned long long int* res_address = (unsigned long long int*)address;
unsigned long long int old = *res_address, assumed;
do {
assumed = old;
old = atomicCAS(res_address, assumed, val * assumed);
} while (assumed != old);
return (uint64_t)old;
}
#if !defined(_WIN32) && !defined(_WIN64)
template <>
inline __device__ Nd4jLong nd4j_atomicMul<Nd4jLong>(Nd4jLong* address, Nd4jLong val) {
unsigned long long int* res_address = (unsigned long long*)address;
unsigned long long int old = *res_address, assumed;
do {
assumed = old;
old = atomicCAS(res_address, assumed, val * assumed);
} while (assumed != old);
return (Nd4jLong)old;
}
#endif
template <>
inline __device__ bfloat16 nd4j_atomicMul<bfloat16>(bfloat16* address, bfloat16 val) {
return internal_16bit_atomicMul<bfloat16>(address, val);
}
template <>
inline __device__ float16 nd4j_atomicMul<float16>(float16* address, float16 val) {
return internal_16bit_atomicMul<float16>(address, val);
}
template <>
inline __device__ float nd4j_atomicDiv<float>(float* address, float val) {
return nd4j_atomicMul<float>(address, 1.f / val);
}
template <>
inline __device__ float16 nd4j_atomicDiv<float16>(float16* address, float16 val) {
return internal_16bit_atomicMul<float16>(address, (float16) 1.f / val);
}
template <>
inline __device__ bfloat16 nd4j_atomicDiv<bfloat16>(bfloat16* address, bfloat16 val) {
return internal_16bit_atomicMul<bfloat16>(address, (bfloat16) 1 / val);
}
}
#endif
}
}
#ifdef _OPENMP
#ifndef MAX_FLOAT
#define MAX_FLOAT 1e37
#endif
#pragma omp declare reduction(maxTF : float,double,float16,bfloat16 : \
omp_out = sd::math::nd4j_max(omp_in, omp_out) )\
initializer (omp_priv=-MAX_FLOAT)
#pragma omp declare reduction(minTF : float,double,float16,bfloat16 : \
omp_out = sd::math::nd4j_min(omp_in, omp_out) )\
initializer (omp_priv=MAX_FLOAT)
#pragma omp declare reduction(maxT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \
omp_out = sd::math::nd4j_max(omp_in, omp_out) )\
initializer (omp_priv=0)
#pragma omp declare reduction(minT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \
omp_out = sd::math::nd4j_min(omp_in, omp_out) )\
initializer (omp_priv=0)
#pragma omp declare reduction(amaxT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \
omp_out = sd::math::nd4j_max(sd::math::nd4j_abs(omp_in), sd::math::nd4j_abs(omp_out)) )
#pragma omp declare reduction(aminT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \
omp_out = sd::math::nd4j_min(sd::math::nd4j_abs(omp_in), sd::math::nd4j_abs(omp_out)) )
#pragma omp declare reduction(asumT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \
omp_out = sd::math::nd4j_abs(omp_in) + sd::math::nd4j_abs(omp_out))\
initializer (omp_priv=0)
#pragma omp declare reduction(sumT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \
omp_out = omp_in + omp_out)\
initializer (omp_priv=0)
#pragma omp declare reduction(prodT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \
omp_out = omp_in * omp_out)\
initializer (omp_priv=1)
#endif
#endif /* TEMPLATEMATH_H_ */
|
visual-effects.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% V V IIIII SSSSS U U AAA L %
% V V I SS U U A A L %
% V V I SSS U U AAAAA L %
% V V I SS U U A A L %
% V IIIII SSSSS UUU A A LLLLL %
% %
% EEEEE FFFFF FFFFF EEEEE CCCC TTTTT SSSSS %
% E F F E C T SS %
% EEE FFF FFF EEE C T SSS %
% E F F E C T SS %
% EEEEE F F EEEEE CCCC T SSSSS %
% %
% %
% MagickCore Image Special Effects Methods %
% %
% Software Design %
% Cristy %
% October 1996 %
% %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/accelerate-private.h"
#include "MagickCore/annotate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/decorate.h"
#include "MagickCore/distort.h"
#include "MagickCore/draw.h"
#include "MagickCore/effect.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/layer.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/property.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/random-private.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resize.h"
#include "MagickCore/resource_.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/transform.h"
#include "MagickCore/transform-private.h"
#include "MagickCore/utility.h"
#include "MagickCore/visual-effects.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d d N o i s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AddNoiseImage() adds random noise to the image.
%
% The format of the AddNoiseImage method is:
%
% Image *AddNoiseImage(const Image *image,const NoiseType noise_type,
% const double attenuate,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o noise_type: The type of noise: Uniform, Gaussian, Multiplicative,
% Impulse, Laplacian, or Poisson.
%
% o attenuate: attenuate the random distribution.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AddNoiseImage(const Image *image,const NoiseType noise_type,
const double attenuate,ExceptionInfo *exception)
{
#define AddNoiseImageTag "AddNoise/Image"
CacheView
*image_view,
*noise_view;
Image
*noise_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RandomInfo
**magick_restrict random_info;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
/*
Initialize noise image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
noise_image=AccelerateAddNoiseImage(image,noise_type,attenuate,exception);
if (noise_image != (Image *) NULL)
return(noise_image);
#endif
noise_image=CloneImage(image,0,0,MagickTrue,exception);
if (noise_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(noise_image,DirectClass,exception) == MagickFalse)
{
noise_image=DestroyImage(noise_image);
return((Image *) NULL);
}
/*
Add noise in each row.
*/
status=MagickTrue;
progress=0;
random_info=AcquireRandomInfoThreadSet();
image_view=AcquireVirtualCacheView(image,exception);
noise_view=AcquireAuthenticCacheView(noise_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,noise_image,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
const Quantum
*magick_restrict p;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(noise_view,0,y,noise_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait noise_traits=GetPixelChannelTraits(noise_image,channel);
if ((traits == UndefinedPixelTrait) ||
(noise_traits == UndefinedPixelTrait))
continue;
if ((noise_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(noise_image,channel,p[i],q);
continue;
}
SetPixelChannel(noise_image,channel,ClampToQuantum(
GenerateDifferentialNoise(random_info[id],p[i],noise_type,attenuate)),
q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(noise_image);
}
sync=SyncCacheViewAuthenticPixels(noise_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,AddNoiseImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
noise_view=DestroyCacheView(noise_view);
image_view=DestroyCacheView(image_view);
random_info=DestroyRandomInfoThreadSet(random_info);
if (status == MagickFalse)
noise_image=DestroyImage(noise_image);
return(noise_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B l u e S h i f t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BlueShiftImage() mutes the colors of the image to simulate a scene at
% nighttime in the moonlight.
%
% The format of the BlueShiftImage method is:
%
% Image *BlueShiftImage(const Image *image,const double factor,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o factor: the shift factor.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *BlueShiftImage(const Image *image,const double factor,
ExceptionInfo *exception)
{
#define BlueShiftImageTag "BlueShift/Image"
CacheView
*image_view,
*shift_view;
Image
*shift_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Allocate blue shift image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
shift_image=CloneImage(image,0,0,MagickTrue,exception);
if (shift_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(shift_image,DirectClass,exception) == MagickFalse)
{
shift_image=DestroyImage(shift_image);
return((Image *) NULL);
}
/*
Blue-shift DirectClass image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
shift_view=AcquireAuthenticCacheView(shift_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,shift_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
Quantum
quantum;
const Quantum
*magick_restrict p;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(shift_view,0,y,shift_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
quantum=GetPixelRed(image,p);
if (GetPixelGreen(image,p) < quantum)
quantum=GetPixelGreen(image,p);
if (GetPixelBlue(image,p) < quantum)
quantum=GetPixelBlue(image,p);
pixel.red=0.5*(GetPixelRed(image,p)+factor*quantum);
pixel.green=0.5*(GetPixelGreen(image,p)+factor*quantum);
pixel.blue=0.5*(GetPixelBlue(image,p)+factor*quantum);
quantum=GetPixelRed(image,p);
if (GetPixelGreen(image,p) > quantum)
quantum=GetPixelGreen(image,p);
if (GetPixelBlue(image,p) > quantum)
quantum=GetPixelBlue(image,p);
pixel.red=0.5*(pixel.red+factor*quantum);
pixel.green=0.5*(pixel.green+factor*quantum);
pixel.blue=0.5*(pixel.blue+factor*quantum);
SetPixelRed(shift_image,ClampToQuantum(pixel.red),q);
SetPixelGreen(shift_image,ClampToQuantum(pixel.green),q);
SetPixelBlue(shift_image,ClampToQuantum(pixel.blue),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(shift_image);
}
sync=SyncCacheViewAuthenticPixels(shift_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,BlueShiftImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
shift_view=DestroyCacheView(shift_view);
if (status == MagickFalse)
shift_image=DestroyImage(shift_image);
return(shift_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C h a r c o a l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CharcoalImage() creates a new image that is a copy of an existing one with
% the edge highlighted. It allocates the memory necessary for the new Image
% structure and returns a pointer to the new image.
%
% The format of the CharcoalImage method is:
%
% Image *CharcoalImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CharcoalImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
Image
*charcoal_image,
*edge_image;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
edge_image=EdgeImage(image,radius,exception);
if (edge_image == (Image *) NULL)
return((Image *) NULL);
edge_image->alpha_trait=UndefinedPixelTrait;
charcoal_image=(Image *) NULL;
status=ClampImage(edge_image,exception);
if (status != MagickFalse)
charcoal_image=BlurImage(edge_image,radius,sigma,exception);
edge_image=DestroyImage(edge_image);
if (charcoal_image == (Image *) NULL)
return((Image *) NULL);
status=NormalizeImage(charcoal_image,exception);
if (status != MagickFalse)
status=NegateImage(charcoal_image,MagickFalse,exception);
if (status != MagickFalse)
status=GrayscaleImage(charcoal_image,image->intensity,exception);
if (status == MagickFalse)
charcoal_image=DestroyImage(charcoal_image);
return(charcoal_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o l o r i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ColorizeImage() blends the fill color with each pixel in the image.
% A percentage blend is specified with opacity. Control the application
% of different color components by specifying a different percentage for
% each component (e.g. 90/100/10 is 90% red, 100% green, and 10% blue).
%
% The format of the ColorizeImage method is:
%
% Image *ColorizeImage(const Image *image,const char *blend,
% const PixelInfo *colorize,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o blend: A character string indicating the level of blending as a
% percentage.
%
% o colorize: A color value.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ColorizeImage(const Image *image,const char *blend,
const PixelInfo *colorize,ExceptionInfo *exception)
{
#define ColorizeImageTag "Colorize/Image"
#define Colorize(pixel,blend_percentage,colorize) \
(((pixel)*(100.0-(blend_percentage))+(colorize)*(blend_percentage))/100.0)
CacheView
*image_view;
GeometryInfo
geometry_info;
Image
*colorize_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickStatusType
flags;
PixelInfo
blend_percentage;
ssize_t
y;
/*
Allocate colorized image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
colorize_image=CloneImage(image,0,0,MagickTrue,exception);
if (colorize_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(colorize_image,DirectClass,exception) == MagickFalse)
{
colorize_image=DestroyImage(colorize_image);
return((Image *) NULL);
}
if ((IsGrayColorspace(colorize_image->colorspace) != MagickFalse) ||
(IsPixelInfoGray(colorize) != MagickFalse))
(void) SetImageColorspace(colorize_image,sRGBColorspace,exception);
if ((colorize_image->alpha_trait == UndefinedPixelTrait) &&
(colorize->alpha_trait != UndefinedPixelTrait))
(void) SetImageAlpha(colorize_image,OpaqueAlpha,exception);
if (blend == (const char *) NULL)
return(colorize_image);
GetPixelInfo(colorize_image,&blend_percentage);
flags=ParseGeometry(blend,&geometry_info);
blend_percentage.red=geometry_info.rho;
blend_percentage.green=geometry_info.rho;
blend_percentage.blue=geometry_info.rho;
blend_percentage.black=geometry_info.rho;
blend_percentage.alpha=(MagickRealType) TransparentAlpha;
if ((flags & SigmaValue) != 0)
blend_percentage.green=geometry_info.sigma;
if ((flags & XiValue) != 0)
blend_percentage.blue=geometry_info.xi;
if ((flags & PsiValue) != 0)
blend_percentage.alpha=geometry_info.psi;
if (blend_percentage.colorspace == CMYKColorspace)
{
if ((flags & PsiValue) != 0)
blend_percentage.black=geometry_info.psi;
if ((flags & ChiValue) != 0)
blend_percentage.alpha=geometry_info.chi;
}
/*
Colorize DirectClass image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(colorize_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(colorize_image,colorize_image,colorize_image->rows,1)
#endif
for (y=0; y < (ssize_t) colorize_image->rows; y++)
{
MagickBooleanType
sync;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,colorize_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) colorize_image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(colorize_image); i++)
{
PixelTrait traits = GetPixelChannelTraits(colorize_image,
(PixelChannel) i);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & CopyPixelTrait) != 0)
continue;
SetPixelChannel(colorize_image,(PixelChannel) i,ClampToQuantum(
Colorize(q[i],GetPixelInfoChannel(&blend_percentage,(PixelChannel) i),
GetPixelInfoChannel(colorize,(PixelChannel) i))),q);
}
q+=GetPixelChannels(colorize_image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ColorizeImageTag,progress,
colorize_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
colorize_image=DestroyImage(colorize_image);
return(colorize_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o l o r M a t r i x I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ColorMatrixImage() applies color transformation to an image. This method
% permits saturation changes, hue rotation, luminance to alpha, and various
% other effects. Although variable-sized transformation matrices can be used,
% typically one uses a 5x5 matrix for an RGBA image and a 6x6 for CMYKA
% (or RGBA with offsets). The matrix is similar to those used by Adobe Flash
% except offsets are in column 6 rather than 5 (in support of CMYKA images)
% and offsets are normalized (divide Flash offset by 255).
%
% The format of the ColorMatrixImage method is:
%
% Image *ColorMatrixImage(const Image *image,
% const KernelInfo *color_matrix,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o color_matrix: the color matrix.
%
% o exception: return any errors or warnings in this structure.
%
*/
/* FUTURE: modify to make use of a MagickMatrix Mutliply function
That should be provided in "matrix.c"
(ASIDE: actually distorts should do this too but currently doesn't)
*/
MagickExport Image *ColorMatrixImage(const Image *image,
const KernelInfo *color_matrix,ExceptionInfo *exception)
{
#define ColorMatrixImageTag "ColorMatrix/Image"
CacheView
*color_view,
*image_view;
double
ColorMatrix[6][6] =
{
{ 1.0, 0.0, 0.0, 0.0, 0.0, 0.0 },
{ 0.0, 1.0, 0.0, 0.0, 0.0, 0.0 },
{ 0.0, 0.0, 1.0, 0.0, 0.0, 0.0 },
{ 0.0, 0.0, 0.0, 1.0, 0.0, 0.0 },
{ 0.0, 0.0, 0.0, 0.0, 1.0, 0.0 },
{ 0.0, 0.0, 0.0, 0.0, 0.0, 1.0 }
};
Image
*color_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
i;
ssize_t
u,
v,
y;
/*
Map given color_matrix, into a 6x6 matrix RGBKA and a constant
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
i=0;
for (v=0; v < (ssize_t) color_matrix->height; v++)
for (u=0; u < (ssize_t) color_matrix->width; u++)
{
if ((v < 6) && (u < 6))
ColorMatrix[v][u]=color_matrix->values[i];
i++;
}
/*
Initialize color image.
*/
color_image=CloneImage(image,0,0,MagickTrue,exception);
if (color_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(color_image,DirectClass,exception) == MagickFalse)
{
color_image=DestroyImage(color_image);
return((Image *) NULL);
}
if (image->debug != MagickFalse)
{
char
format[MagickPathExtent],
*message;
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" ColorMatrix image with color matrix:");
message=AcquireString("");
for (v=0; v < 6; v++)
{
*message='\0';
(void) FormatLocaleString(format,MagickPathExtent,"%.20g: ",(double) v);
(void) ConcatenateString(&message,format);
for (u=0; u < 6; u++)
{
(void) FormatLocaleString(format,MagickPathExtent,"%+f ",
ColorMatrix[v][u]);
(void) ConcatenateString(&message,format);
}
(void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message);
}
message=DestroyString(message);
}
/*
Apply the ColorMatrix to image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
color_view=AcquireAuthenticCacheView(color_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,color_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
PixelInfo
pixel;
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(color_view,0,y,color_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
GetPixelInfo(image,&pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
v;
size_t
height;
GetPixelInfoPixel(image,p,&pixel);
height=color_matrix->height > 6 ? 6UL : color_matrix->height;
for (v=0; v < (ssize_t) height; v++)
{
double
sum;
sum=ColorMatrix[v][0]*GetPixelRed(image,p)+ColorMatrix[v][1]*
GetPixelGreen(image,p)+ColorMatrix[v][2]*GetPixelBlue(image,p);
if (image->colorspace == CMYKColorspace)
sum+=ColorMatrix[v][3]*GetPixelBlack(image,p);
if (image->alpha_trait != UndefinedPixelTrait)
sum+=ColorMatrix[v][4]*GetPixelAlpha(image,p);
sum+=QuantumRange*ColorMatrix[v][5];
switch (v)
{
case 0: pixel.red=sum; break;
case 1: pixel.green=sum; break;
case 2: pixel.blue=sum; break;
case 3: pixel.black=sum; break;
case 4: pixel.alpha=sum; break;
default: break;
}
}
SetPixelViaPixelInfo(color_image,&pixel,q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(color_image);
}
if (SyncCacheViewAuthenticPixels(color_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ColorMatrixImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
color_view=DestroyCacheView(color_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
color_image=DestroyImage(color_image);
return(color_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I m p l o d e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ImplodeImage() creates a new image that is a copy of an existing
% one with the image pixels "implode" by the specified percentage. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ImplodeImage method is:
%
% Image *ImplodeImage(const Image *image,const double amount,
% const PixelInterpolateMethod method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o implode_image: Method ImplodeImage returns a pointer to the image
% after it is implode. A null image is returned if there is a memory
% shortage.
%
% o image: the image.
%
% o amount: Define the extent of the implosion.
%
% o method: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ImplodeImage(const Image *image,const double amount,
const PixelInterpolateMethod method,ExceptionInfo *exception)
{
#define ImplodeImageTag "Implode/Image"
CacheView
*canvas_view,
*implode_view,
*interpolate_view;
double
radius;
Image
*canvas_image,
*implode_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PointInfo
center,
scale;
ssize_t
y;
/*
Initialize implode image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
canvas_image=CloneImage(image,0,0,MagickTrue,exception);
if (canvas_image == (Image *) NULL)
return((Image *) NULL);
if ((canvas_image->alpha_trait == UndefinedPixelTrait) &&
(canvas_image->background_color.alpha != OpaqueAlpha))
(void) SetImageAlphaChannel(canvas_image,OpaqueAlphaChannel,exception);
implode_image=CloneImage(canvas_image,0,0,MagickTrue,exception);
if (implode_image == (Image *) NULL)
{
canvas_image=DestroyImage(canvas_image);
return((Image *) NULL);
}
if (SetImageStorageClass(implode_image,DirectClass,exception) == MagickFalse)
{
canvas_image=DestroyImage(canvas_image);
implode_image=DestroyImage(implode_image);
return((Image *) NULL);
}
/*
Compute scaling factor.
*/
scale.x=1.0;
scale.y=1.0;
center.x=0.5*canvas_image->columns;
center.y=0.5*canvas_image->rows;
radius=center.x;
if (canvas_image->columns > canvas_image->rows)
scale.y=(double) canvas_image->columns/(double) canvas_image->rows;
else
if (canvas_image->columns < canvas_image->rows)
{
scale.x=(double) canvas_image->rows/(double) canvas_image->columns;
radius=center.y;
}
/*
Implode image.
*/
status=MagickTrue;
progress=0;
canvas_view=AcquireVirtualCacheView(canvas_image,exception);
interpolate_view=AcquireVirtualCacheView(canvas_image,exception);
implode_view=AcquireAuthenticCacheView(implode_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(canvas_image,implode_image,canvas_image->rows,1)
#endif
for (y=0; y < (ssize_t) canvas_image->rows; y++)
{
double
distance;
PointInfo
delta;
const Quantum
*magick_restrict p;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(canvas_view,0,y,canvas_image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(implode_view,0,y,implode_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
delta.y=scale.y*(double) (y-center.y);
for (x=0; x < (ssize_t) canvas_image->columns; x++)
{
ssize_t
i;
/*
Determine if the pixel is within an ellipse.
*/
delta.x=scale.x*(double) (x-center.x);
distance=delta.x*delta.x+delta.y*delta.y;
if (distance >= (radius*radius))
for (i=0; i < (ssize_t) GetPixelChannels(canvas_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(canvas_image,i);
PixelTrait traits = GetPixelChannelTraits(canvas_image,channel);
PixelTrait implode_traits = GetPixelChannelTraits(implode_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(implode_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(implode_image,channel,p[i],q);
}
else
{
double
factor;
/*
Implode the pixel.
*/
factor=1.0;
if (distance > 0.0)
factor=pow(sin(MagickPI*sqrt((double) distance)/radius/2),-amount);
status=InterpolatePixelChannels(canvas_image,interpolate_view,
implode_image,method,(double) (factor*delta.x/scale.x+center.x),
(double) (factor*delta.y/scale.y+center.y),q,exception);
if (status == MagickFalse)
break;
}
p+=GetPixelChannels(canvas_image);
q+=GetPixelChannels(implode_image);
}
if (SyncCacheViewAuthenticPixels(implode_view,exception) == MagickFalse)
status=MagickFalse;
if (canvas_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(canvas_image,ImplodeImageTag,progress,
canvas_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
implode_view=DestroyCacheView(implode_view);
interpolate_view=DestroyCacheView(interpolate_view);
canvas_view=DestroyCacheView(canvas_view);
canvas_image=DestroyImage(canvas_image);
if (status == MagickFalse)
implode_image=DestroyImage(implode_image);
return(implode_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o r p h I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% The MorphImages() method requires a minimum of two images. The first
% image is transformed into the second by a number of intervening images
% as specified by frames.
%
% The format of the MorphImage method is:
%
% Image *MorphImages(const Image *image,const size_t number_frames,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o number_frames: Define the number of in-between image to generate.
% The more in-between frames, the smoother the morph.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MorphImages(const Image *image,const size_t number_frames,
ExceptionInfo *exception)
{
#define MorphImageTag "Morph/Image"
double
alpha,
beta;
Image
*morph_image,
*morph_images;
MagickBooleanType
status;
MagickOffsetType
scene;
const Image
*next;
ssize_t
n;
ssize_t
y;
/*
Clone first frame in sequence.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
morph_images=CloneImage(image,0,0,MagickTrue,exception);
if (morph_images == (Image *) NULL)
return((Image *) NULL);
if (GetNextImageInList(image) == (Image *) NULL)
{
/*
Morph single image.
*/
for (n=1; n < (ssize_t) number_frames; n++)
{
morph_image=CloneImage(image,0,0,MagickTrue,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
AppendImageToList(&morph_images,morph_image);
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,MorphImageTag,(MagickOffsetType) n,
number_frames);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(GetFirstImageInList(morph_images));
}
/*
Morph image sequence.
*/
status=MagickTrue;
scene=0;
next=image;
for ( ; GetNextImageInList(next) != (Image *) NULL; next=GetNextImageInList(next))
{
for (n=0; n < (ssize_t) number_frames; n++)
{
CacheView
*image_view,
*morph_view;
beta=(double) (n+1.0)/(double) (number_frames+1.0);
alpha=1.0-beta;
morph_image=ResizeImage(next,(size_t) (alpha*next->columns+beta*
GetNextImageInList(next)->columns+0.5),(size_t) (alpha*next->rows+beta*
GetNextImageInList(next)->rows+0.5),next->filter,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
status=SetImageStorageClass(morph_image,DirectClass,exception);
if (status == MagickFalse)
{
morph_image=DestroyImage(morph_image);
return((Image *) NULL);
}
AppendImageToList(&morph_images,morph_image);
morph_images=GetLastImageInList(morph_images);
morph_image=ResizeImage(GetNextImageInList(next),morph_images->columns,
morph_images->rows,GetNextImageInList(next)->filter,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
image_view=AcquireVirtualCacheView(morph_image,exception);
morph_view=AcquireAuthenticCacheView(morph_images,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(morph_image,morph_image,morph_image->rows,1)
#endif
for (y=0; y < (ssize_t) morph_images->rows; y++)
{
MagickBooleanType
sync;
const Quantum
*magick_restrict p;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,morph_image->columns,1,
exception);
q=GetCacheViewAuthenticPixels(morph_view,0,y,morph_images->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) morph_images->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(morph_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(morph_image,i);
PixelTrait traits = GetPixelChannelTraits(morph_image,channel);
PixelTrait morph_traits=GetPixelChannelTraits(morph_images,channel);
if ((traits == UndefinedPixelTrait) ||
(morph_traits == UndefinedPixelTrait))
continue;
if ((morph_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(morph_image,channel,p[i],q);
continue;
}
SetPixelChannel(morph_image,channel,ClampToQuantum(alpha*
GetPixelChannel(morph_images,channel,q)+beta*p[i]),q);
}
p+=GetPixelChannels(morph_image);
q+=GetPixelChannels(morph_images);
}
sync=SyncCacheViewAuthenticPixels(morph_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
morph_view=DestroyCacheView(morph_view);
image_view=DestroyCacheView(image_view);
morph_image=DestroyImage(morph_image);
}
if (n < (ssize_t) number_frames)
break;
/*
Clone last frame in sequence.
*/
morph_image=CloneImage(GetNextImageInList(next),0,0,MagickTrue,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
AppendImageToList(&morph_images,morph_image);
morph_images=GetLastImageInList(morph_images);
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,MorphImageTag,scene,
GetImageListLength(image));
if (proceed == MagickFalse)
status=MagickFalse;
}
scene++;
}
if (GetNextImageInList(next) != (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
return(GetFirstImageInList(morph_images));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P l a s m a I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PlasmaImage() initializes an image with plasma fractal values. The image
% must be initialized with a base color and the random number generator
% seeded before this method is called.
%
% The format of the PlasmaImage method is:
%
% MagickBooleanType PlasmaImage(Image *image,const SegmentInfo *segment,
% size_t attenuate,size_t depth,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o segment: Define the region to apply plasma fractals values.
%
% o attenuate: Define the plasma attenuation factor.
%
% o depth: Limit the plasma recursion depth.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline Quantum PlasmaPixel(RandomInfo *magick_restrict random_info,
const double pixel,const double noise)
{
MagickRealType
plasma;
plasma=pixel+noise*GetPseudoRandomValue(random_info)-noise/2.0;
return(ClampToQuantum(plasma));
}
static MagickBooleanType PlasmaImageProxy(Image *image,CacheView *image_view,
CacheView *u_view,CacheView *v_view,RandomInfo *magick_restrict random_info,
const SegmentInfo *magick_restrict segment,size_t attenuate,size_t depth,
ExceptionInfo *exception)
{
double
plasma;
MagickStatusType
status;
const Quantum
*magick_restrict u,
*magick_restrict v;
Quantum
*magick_restrict q;
ssize_t
i;
ssize_t
x,
x_mid,
y,
y_mid;
if ((fabs(segment->x2-segment->x1) < MagickEpsilon) &&
(fabs(segment->y2-segment->y1) < MagickEpsilon))
return(MagickTrue);
if (depth != 0)
{
SegmentInfo
local_info;
/*
Divide the area into quadrants and recurse.
*/
depth--;
attenuate++;
x_mid=CastDoubleToLong(ceil((segment->x1+segment->x2)/2-0.5));
y_mid=CastDoubleToLong(ceil((segment->y1+segment->y2)/2-0.5));
local_info=(*segment);
local_info.x2=(double) x_mid;
local_info.y2=(double) y_mid;
status=PlasmaImageProxy(image,image_view,u_view,v_view,random_info,
&local_info,attenuate,depth,exception);
local_info=(*segment);
local_info.y1=(double) y_mid;
local_info.x2=(double) x_mid;
status&=PlasmaImageProxy(image,image_view,u_view,v_view,random_info,
&local_info,attenuate,depth,exception);
local_info=(*segment);
local_info.x1=(double) x_mid;
local_info.y2=(double) y_mid;
status&=PlasmaImageProxy(image,image_view,u_view,v_view,random_info,
&local_info,attenuate,depth,exception);
local_info=(*segment);
local_info.x1=(double) x_mid;
local_info.y1=(double) y_mid;
status&=PlasmaImageProxy(image,image_view,u_view,v_view,random_info,
&local_info,attenuate,depth,exception);
return(status == 0 ? MagickFalse : MagickTrue);
}
x_mid=CastDoubleToLong(ceil((segment->x1+segment->x2)/2-0.5));
y_mid=CastDoubleToLong(ceil((segment->y1+segment->y2)/2-0.5));
if ((fabs(segment->x1-x_mid) < MagickEpsilon) &&
(fabs(segment->x2-x_mid) < MagickEpsilon) &&
(fabs(segment->y1-y_mid) < MagickEpsilon) &&
(fabs(segment->y2-y_mid) < MagickEpsilon))
return(MagickFalse);
/*
Average pixels and apply plasma.
*/
status=MagickTrue;
plasma=(double) QuantumRange/(2.0*attenuate);
if ((fabs(segment->x1-x_mid) >= MagickEpsilon) ||
(fabs(segment->x2-x_mid) >= MagickEpsilon))
{
/*
Left pixel.
*/
x=CastDoubleToLong(ceil(segment->x1-0.5));
u=GetCacheViewVirtualPixels(u_view,x,CastDoubleToLong(ceil(
segment->y1-0.5)),1,1,exception);
v=GetCacheViewVirtualPixels(v_view,x,CastDoubleToLong(ceil(
segment->y2-0.5)),1,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,x,y_mid,1,1,exception);
if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
return(MagickTrue);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PlasmaPixel(random_info,((double) u[i]+v[i])/2.0,plasma);
}
status=SyncCacheViewAuthenticPixels(image_view,exception);
if (fabs(segment->x1-segment->x2) >= MagickEpsilon)
{
/*
Right pixel.
*/
x=CastDoubleToLong(ceil(segment->x2-0.5));
u=GetCacheViewVirtualPixels(u_view,x,CastDoubleToLong(ceil(
segment->y1-0.5)),1,1,exception);
v=GetCacheViewVirtualPixels(v_view,x,CastDoubleToLong(ceil(
segment->y2-0.5)),1,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,x,y_mid,1,1,exception);
if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
return(MagickFalse);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PlasmaPixel(random_info,((double) u[i]+v[i])/2.0,plasma);
}
status=SyncCacheViewAuthenticPixels(image_view,exception);
}
}
if ((fabs(segment->y1-y_mid) >= MagickEpsilon) ||
(fabs(segment->y2-y_mid) >= MagickEpsilon))
{
if ((fabs(segment->x1-x_mid) >= MagickEpsilon) ||
(fabs(segment->y2-y_mid) >= MagickEpsilon))
{
/*
Bottom pixel.
*/
y=CastDoubleToLong(ceil(segment->y2-0.5));
u=GetCacheViewVirtualPixels(u_view,CastDoubleToLong(ceil(
segment->x1-0.5)),y,1,1,exception);
v=GetCacheViewVirtualPixels(v_view,CastDoubleToLong(ceil(
segment->x2-0.5)),y,1,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,x_mid,y,1,1,exception);
if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
return(MagickTrue);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PlasmaPixel(random_info,((double) u[i]+v[i])/2.0,plasma);
}
status=SyncCacheViewAuthenticPixels(image_view,exception);
}
if (fabs(segment->y1-segment->y2) >= MagickEpsilon)
{
/*
Top pixel.
*/
y=CastDoubleToLong(ceil(segment->y1-0.5));
u=GetCacheViewVirtualPixels(u_view,CastDoubleToLong(ceil(
segment->x1-0.5)),y,1,1,exception);
v=GetCacheViewVirtualPixels(v_view,CastDoubleToLong(ceil(
segment->x2-0.5)),y,1,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,x_mid,y,1,1,exception);
if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
return(MagickTrue);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PlasmaPixel(random_info,((double) u[i]+v[i])/2.0,plasma);
}
status=SyncCacheViewAuthenticPixels(image_view,exception);
}
}
if ((fabs(segment->x1-segment->x2) >= MagickEpsilon) ||
(fabs(segment->y1-segment->y2) >= MagickEpsilon))
{
/*
Middle pixel.
*/
x=CastDoubleToLong(ceil(segment->x1-0.5));
y=CastDoubleToLong(ceil(segment->y1-0.5));
u=GetCacheViewVirtualPixels(u_view,x,y,1,1,exception);
x=CastDoubleToLong(ceil(segment->x2-0.5));
y=CastDoubleToLong(ceil(segment->y2-0.5));
v=GetCacheViewVirtualPixels(v_view,x,y,1,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,x_mid,y_mid,1,1,exception);
if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
return(MagickTrue);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PlasmaPixel(random_info,((double) u[i]+v[i])/2.0,plasma);
}
status=SyncCacheViewAuthenticPixels(image_view,exception);
}
if ((fabs(segment->x2-segment->x1) < 3.0) &&
(fabs(segment->y2-segment->y1) < 3.0))
return(status == 0 ? MagickFalse : MagickTrue);
return(MagickFalse);
}
MagickExport MagickBooleanType PlasmaImage(Image *image,
const SegmentInfo *segment,size_t attenuate,size_t depth,
ExceptionInfo *exception)
{
CacheView
*image_view,
*u_view,
*v_view;
MagickBooleanType
status;
RandomInfo
*random_info;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
image_view=AcquireAuthenticCacheView(image,exception);
u_view=AcquireVirtualCacheView(image,exception);
v_view=AcquireVirtualCacheView(image,exception);
random_info=AcquireRandomInfo();
status=PlasmaImageProxy(image,image_view,u_view,v_view,random_info,segment,
attenuate,depth,exception);
random_info=DestroyRandomInfo(random_info);
v_view=DestroyCacheView(v_view);
u_view=DestroyCacheView(u_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o l a r o i d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PolaroidImage() simulates a Polaroid picture.
%
% The format of the PolaroidImage method is:
%
% Image *PolaroidImage(const Image *image,const DrawInfo *draw_info,
% const char *caption,const double angle,
% const PixelInterpolateMethod method,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o caption: the Polaroid caption.
%
% o angle: Apply the effect along this angle.
%
% o method: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *PolaroidImage(const Image *image,const DrawInfo *draw_info,
const char *caption,const double angle,const PixelInterpolateMethod method,
ExceptionInfo *exception)
{
Image
*bend_image,
*caption_image,
*flop_image,
*picture_image,
*polaroid_image,
*rotate_image,
*trim_image;
size_t
height;
ssize_t
quantum;
/*
Simulate a Polaroid picture.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
quantum=(ssize_t) MagickMax(MagickMax((double) image->columns,(double)
image->rows)/25.0,10.0);
height=image->rows+2*quantum;
caption_image=(Image *) NULL;
if (caption != (const char *) NULL)
{
char
*text;
/*
Generate caption image.
*/
caption_image=CloneImage(image,image->columns,1,MagickTrue,exception);
if (caption_image == (Image *) NULL)
return((Image *) NULL);
text=InterpretImageProperties((ImageInfo *) NULL,(Image *) image,caption,
exception);
if (text != (char *) NULL)
{
char
geometry[MagickPathExtent];
DrawInfo
*annotate_info;
MagickBooleanType
status;
ssize_t
count;
TypeMetric
metrics;
annotate_info=CloneDrawInfo((const ImageInfo *) NULL,draw_info);
(void) CloneString(&annotate_info->text,text);
count=FormatMagickCaption(caption_image,annotate_info,MagickTrue,
&metrics,&text,exception);
status=SetImageExtent(caption_image,image->columns,(size_t)
((count+1)*(metrics.ascent-metrics.descent)+0.5),exception);
if (status == MagickFalse)
caption_image=DestroyImage(caption_image);
else
{
caption_image->background_color=image->border_color;
(void) SetImageBackgroundColor(caption_image,exception);
(void) CloneString(&annotate_info->text,text);
(void) FormatLocaleString(geometry,MagickPathExtent,"+0+%.20g",
metrics.ascent);
if (annotate_info->gravity == UndefinedGravity)
(void) CloneString(&annotate_info->geometry,AcquireString(
geometry));
(void) AnnotateImage(caption_image,annotate_info,exception);
height+=caption_image->rows;
}
annotate_info=DestroyDrawInfo(annotate_info);
text=DestroyString(text);
}
}
picture_image=CloneImage(image,image->columns+2*quantum,height,MagickTrue,
exception);
if (picture_image == (Image *) NULL)
{
if (caption_image != (Image *) NULL)
caption_image=DestroyImage(caption_image);
return((Image *) NULL);
}
picture_image->background_color=image->border_color;
(void) SetImageBackgroundColor(picture_image,exception);
(void) CompositeImage(picture_image,image,OverCompositeOp,MagickTrue,quantum,
quantum,exception);
if (caption_image != (Image *) NULL)
{
(void) CompositeImage(picture_image,caption_image,OverCompositeOp,
MagickTrue,quantum,(ssize_t) (image->rows+3*quantum/2),exception);
caption_image=DestroyImage(caption_image);
}
(void) QueryColorCompliance("none",AllCompliance,
&picture_image->background_color,exception);
(void) SetImageAlphaChannel(picture_image,OpaqueAlphaChannel,exception);
rotate_image=RotateImage(picture_image,90.0,exception);
picture_image=DestroyImage(picture_image);
if (rotate_image == (Image *) NULL)
return((Image *) NULL);
picture_image=rotate_image;
bend_image=WaveImage(picture_image,0.01*picture_image->rows,2.0*
picture_image->columns,method,exception);
picture_image=DestroyImage(picture_image);
if (bend_image == (Image *) NULL)
return((Image *) NULL);
picture_image=bend_image;
rotate_image=RotateImage(picture_image,-90.0,exception);
picture_image=DestroyImage(picture_image);
if (rotate_image == (Image *) NULL)
return((Image *) NULL);
picture_image=rotate_image;
picture_image->background_color=image->background_color;
polaroid_image=ShadowImage(picture_image,80.0,2.0,quantum/3,quantum/3,
exception);
if (polaroid_image == (Image *) NULL)
{
picture_image=DestroyImage(picture_image);
return(picture_image);
}
flop_image=FlopImage(polaroid_image,exception);
polaroid_image=DestroyImage(polaroid_image);
if (flop_image == (Image *) NULL)
{
picture_image=DestroyImage(picture_image);
return(picture_image);
}
polaroid_image=flop_image;
(void) CompositeImage(polaroid_image,picture_image,OverCompositeOp,
MagickTrue,(ssize_t) (-0.01*picture_image->columns/2.0),0L,exception);
picture_image=DestroyImage(picture_image);
(void) QueryColorCompliance("none",AllCompliance,
&polaroid_image->background_color,exception);
rotate_image=RotateImage(polaroid_image,angle,exception);
polaroid_image=DestroyImage(polaroid_image);
if (rotate_image == (Image *) NULL)
return((Image *) NULL);
polaroid_image=rotate_image;
trim_image=TrimImage(polaroid_image,exception);
polaroid_image=DestroyImage(polaroid_image);
if (trim_image == (Image *) NULL)
return((Image *) NULL);
polaroid_image=trim_image;
return(polaroid_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e p i a T o n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickSepiaToneImage() applies a special effect to the image, similar to the
% effect achieved in a photo darkroom by sepia toning. Threshold ranges from
% 0 to QuantumRange and is a measure of the extent of the sepia toning. A
% threshold of 80% is a good starting point for a reasonable tone.
%
% The format of the SepiaToneImage method is:
%
% Image *SepiaToneImage(const Image *image,const double threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: the tone threshold.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SepiaToneImage(const Image *image,const double threshold,
ExceptionInfo *exception)
{
#define SepiaToneImageTag "SepiaTone/Image"
CacheView
*image_view,
*sepia_view;
Image
*sepia_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Initialize sepia-toned image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
sepia_image=CloneImage(image,0,0,MagickTrue,exception);
if (sepia_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(sepia_image,DirectClass,exception) == MagickFalse)
{
sepia_image=DestroyImage(sepia_image);
return((Image *) NULL);
}
/*
Tone each row of the image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
sepia_view=AcquireAuthenticCacheView(sepia_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,sepia_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(sepia_view,0,y,sepia_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
intensity,
tone;
intensity=GetPixelIntensity(image,p);
tone=intensity > threshold ? (double) QuantumRange : intensity+
(double) QuantumRange-threshold;
SetPixelRed(sepia_image,ClampToQuantum(tone),q);
tone=intensity > (7.0*threshold/6.0) ? (double) QuantumRange :
intensity+(double) QuantumRange-7.0*threshold/6.0;
SetPixelGreen(sepia_image,ClampToQuantum(tone),q);
tone=intensity < (threshold/6.0) ? 0 : intensity-threshold/6.0;
SetPixelBlue(sepia_image,ClampToQuantum(tone),q);
tone=threshold/7.0;
if ((double) GetPixelGreen(image,q) < tone)
SetPixelGreen(sepia_image,ClampToQuantum(tone),q);
if ((double) GetPixelBlue(image,q) < tone)
SetPixelBlue(sepia_image,ClampToQuantum(tone),q);
SetPixelAlpha(sepia_image,GetPixelAlpha(image,p),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(sepia_image);
}
if (SyncCacheViewAuthenticPixels(sepia_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SepiaToneImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
sepia_view=DestroyCacheView(sepia_view);
image_view=DestroyCacheView(image_view);
(void) NormalizeImage(sepia_image,exception);
(void) ContrastImage(sepia_image,MagickTrue,exception);
if (status == MagickFalse)
sepia_image=DestroyImage(sepia_image);
return(sepia_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h a d o w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShadowImage() simulates a shadow from the specified image and returns it.
%
% The format of the ShadowImage method is:
%
% Image *ShadowImage(const Image *image,const double alpha,
% const double sigma,const ssize_t x_offset,const ssize_t y_offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o alpha: percentage transparency.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o x_offset: the shadow x-offset.
%
% o y_offset: the shadow y-offset.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ShadowImage(const Image *image,const double alpha,
const double sigma,const ssize_t x_offset,const ssize_t y_offset,
ExceptionInfo *exception)
{
#define ShadowImageTag "Shadow/Image"
CacheView
*image_view;
ChannelType
channel_mask;
Image
*border_image,
*clone_image,
*shadow_image;
MagickBooleanType
status;
PixelInfo
background_color;
RectangleInfo
border_info;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(clone_image,sRGBColorspace,exception);
(void) SetImageVirtualPixelMethod(clone_image,EdgeVirtualPixelMethod,
exception);
border_info.width=(size_t) floor(2.0*sigma+0.5);
border_info.height=(size_t) floor(2.0*sigma+0.5);
border_info.x=0;
border_info.y=0;
(void) QueryColorCompliance("none",AllCompliance,&clone_image->border_color,
exception);
clone_image->alpha_trait=BlendPixelTrait;
border_image=BorderImage(clone_image,&border_info,OverCompositeOp,exception);
clone_image=DestroyImage(clone_image);
if (border_image == (Image *) NULL)
return((Image *) NULL);
if (border_image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(border_image,OpaqueAlphaChannel,exception);
/*
Shadow image.
*/
status=MagickTrue;
background_color=border_image->background_color;
background_color.alpha_trait=BlendPixelTrait;
image_view=AcquireAuthenticCacheView(border_image,exception);
for (y=0; y < (ssize_t) border_image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,border_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) border_image->columns; x++)
{
if (border_image->alpha_trait != UndefinedPixelTrait)
background_color.alpha=GetPixelAlpha(border_image,q)*alpha/100.0;
SetPixelViaPixelInfo(border_image,&background_color,q);
q+=GetPixelChannels(border_image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
{
border_image=DestroyImage(border_image);
return((Image *) NULL);
}
channel_mask=SetImageChannelMask(border_image,AlphaChannel);
shadow_image=BlurImage(border_image,0.0,sigma,exception);
border_image=DestroyImage(border_image);
if (shadow_image == (Image *) NULL)
return((Image *) NULL);
(void) SetPixelChannelMask(shadow_image,channel_mask);
if (shadow_image->page.width == 0)
shadow_image->page.width=shadow_image->columns;
if (shadow_image->page.height == 0)
shadow_image->page.height=shadow_image->rows;
shadow_image->page.width+=x_offset-(ssize_t) border_info.width;
shadow_image->page.height+=y_offset-(ssize_t) border_info.height;
shadow_image->page.x+=x_offset-(ssize_t) border_info.width;
shadow_image->page.y+=y_offset-(ssize_t) border_info.height;
return(shadow_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S k e t c h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SketchImage() simulates a pencil sketch. We convolve the image with a
% Gaussian operator of the given radius and standard deviation (sigma). For
% reasonable results, radius should be larger than sigma. Use a radius of 0
% and SketchImage() selects a suitable radius for you. Angle gives the angle
% of the sketch.
%
% The format of the SketchImage method is:
%
% Image *SketchImage(const Image *image,const double radius,
% const double sigma,const double angle,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting the
% center pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o angle: apply the effect along this angle.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SketchImage(const Image *image,const double radius,
const double sigma,const double angle,ExceptionInfo *exception)
{
CacheView
*random_view;
Image
*blend_image,
*blur_image,
*dodge_image,
*random_image,
*sketch_image;
MagickBooleanType
status;
RandomInfo
**magick_restrict random_info;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
/*
Sketch image.
*/
random_image=CloneImage(image,image->columns << 1,image->rows << 1,
MagickTrue,exception);
if (random_image == (Image *) NULL)
return((Image *) NULL);
status=MagickTrue;
random_info=AcquireRandomInfoThreadSet();
random_view=AcquireAuthenticCacheView(random_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(random_image,random_image,random_image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) random_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(random_view,0,y,random_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) random_image->columns; x++)
{
double
value;
ssize_t
i;
value=GetPseudoRandomValue(random_info[id]);
for (i=0; i < (ssize_t) GetPixelChannels(random_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=ClampToQuantum(QuantumRange*value);
}
q+=GetPixelChannels(random_image);
}
if (SyncCacheViewAuthenticPixels(random_view,exception) == MagickFalse)
status=MagickFalse;
}
random_view=DestroyCacheView(random_view);
random_info=DestroyRandomInfoThreadSet(random_info);
if (status == MagickFalse)
{
random_image=DestroyImage(random_image);
return(random_image);
}
blur_image=MotionBlurImage(random_image,radius,sigma,angle,exception);
random_image=DestroyImage(random_image);
if (blur_image == (Image *) NULL)
return((Image *) NULL);
dodge_image=EdgeImage(blur_image,radius,exception);
blur_image=DestroyImage(blur_image);
if (dodge_image == (Image *) NULL)
return((Image *) NULL);
status=ClampImage(dodge_image,exception);
if (status != MagickFalse)
status=NormalizeImage(dodge_image,exception);
if (status != MagickFalse)
status=NegateImage(dodge_image,MagickFalse,exception);
if (status != MagickFalse)
status=TransformImage(&dodge_image,(char *) NULL,"50%",exception);
sketch_image=CloneImage(image,0,0,MagickTrue,exception);
if (sketch_image == (Image *) NULL)
{
dodge_image=DestroyImage(dodge_image);
return((Image *) NULL);
}
(void) CompositeImage(sketch_image,dodge_image,ColorDodgeCompositeOp,
MagickTrue,0,0,exception);
dodge_image=DestroyImage(dodge_image);
blend_image=CloneImage(image,0,0,MagickTrue,exception);
if (blend_image == (Image *) NULL)
{
sketch_image=DestroyImage(sketch_image);
return((Image *) NULL);
}
if (blend_image->alpha_trait != BlendPixelTrait)
(void) SetImageAlpha(blend_image,TransparentAlpha,exception);
(void) SetImageArtifact(blend_image,"compose:args","20x80");
(void) CompositeImage(sketch_image,blend_image,BlendCompositeOp,MagickTrue,
0,0,exception);
blend_image=DestroyImage(blend_image);
return(sketch_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S o l a r i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SolarizeImage() applies a special effect to the image, similar to the effect
% achieved in a photo darkroom by selectively exposing areas of photo
% sensitive paper to light. Threshold ranges from 0 to QuantumRange and is a
% measure of the extent of the solarization.
%
% The format of the SolarizeImage method is:
%
% MagickBooleanType SolarizeImage(Image *image,const double threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: Define the extent of the solarization.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SolarizeImage(Image *image,
const double threshold,ExceptionInfo *exception)
{
#define SolarizeImageTag "Solarize/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace,exception);
if (image->storage_class == PseudoClass)
{
ssize_t
i;
/*
Solarize colormap.
*/
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((double) image->colormap[i].red > threshold)
image->colormap[i].red=QuantumRange-image->colormap[i].red;
if ((double) image->colormap[i].green > threshold)
image->colormap[i].green=QuantumRange-image->colormap[i].green;
if ((double) image->colormap[i].blue > threshold)
image->colormap[i].blue=QuantumRange-image->colormap[i].blue;
}
return(SyncImage(image,exception));
}
/*
Solarize image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if ((double) q[i] > threshold)
q[i]=QuantumRange-q[i];
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SolarizeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t e g a n o I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SteganoImage() hides a digital watermark within the image. Recover
% the hidden watermark later to prove that the authenticity of an image.
% Offset defines the start position within the image to hide the watermark.
%
% The format of the SteganoImage method is:
%
% Image *SteganoImage(const Image *image,Image *watermark,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o watermark: the watermark image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SteganoImage(const Image *image,const Image *watermark,
ExceptionInfo *exception)
{
#define GetBit(alpha,i) ((((size_t) (alpha) >> (size_t) (i)) & 0x01) != 0)
#define SetBit(alpha,i,set) (Quantum) ((set) != 0 ? (size_t) (alpha) \
| (one << (size_t) (i)) : (size_t) (alpha) & ~(one << (size_t) (i)))
#define SteganoImageTag "Stegano/Image"
CacheView
*stegano_view,
*watermark_view;
Image
*stegano_image;
int
c;
MagickBooleanType
status;
PixelInfo
pixel;
Quantum
*q;
ssize_t
x;
size_t
depth,
one;
ssize_t
i,
j,
k,
y;
/*
Initialize steganographic image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(watermark != (const Image *) NULL);
assert(watermark->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
one=1UL;
stegano_image=CloneImage(image,0,0,MagickTrue,exception);
if (stegano_image == (Image *) NULL)
return((Image *) NULL);
stegano_image->depth=MAGICKCORE_QUANTUM_DEPTH;
if (SetImageStorageClass(stegano_image,DirectClass,exception) == MagickFalse)
{
stegano_image=DestroyImage(stegano_image);
return((Image *) NULL);
}
/*
Hide watermark in low-order bits of image.
*/
c=0;
i=0;
j=0;
depth=stegano_image->depth;
k=stegano_image->offset;
status=MagickTrue;
watermark_view=AcquireVirtualCacheView(watermark,exception);
stegano_view=AcquireAuthenticCacheView(stegano_image,exception);
for (i=(ssize_t) depth-1; (i >= 0) && (j < (ssize_t) depth); i--)
{
for (y=0; (y < (ssize_t) watermark->rows) && (j < (ssize_t) depth); y++)
{
for (x=0; (x < (ssize_t) watermark->columns) && (j < (ssize_t) depth); x++)
{
ssize_t
offset;
(void) GetOneCacheViewVirtualPixelInfo(watermark_view,x,y,&pixel,
exception);
offset=k/(ssize_t) stegano_image->columns;
if (offset >= (ssize_t) stegano_image->rows)
break;
q=GetCacheViewAuthenticPixels(stegano_view,k % (ssize_t)
stegano_image->columns,k/(ssize_t) stegano_image->columns,1,1,
exception);
if (q == (Quantum *) NULL)
break;
switch (c)
{
case 0:
{
SetPixelRed(stegano_image,SetBit(GetPixelRed(stegano_image,q),j,
GetBit(GetPixelInfoIntensity(stegano_image,&pixel),i)),q);
break;
}
case 1:
{
SetPixelGreen(stegano_image,SetBit(GetPixelGreen(stegano_image,q),j,
GetBit(GetPixelInfoIntensity(stegano_image,&pixel),i)),q);
break;
}
case 2:
{
SetPixelBlue(stegano_image,SetBit(GetPixelBlue(stegano_image,q),j,
GetBit(GetPixelInfoIntensity(stegano_image,&pixel),i)),q);
break;
}
}
if (SyncCacheViewAuthenticPixels(stegano_view,exception) == MagickFalse)
break;
c++;
if (c == 3)
c=0;
k++;
if (k == (ssize_t) (stegano_image->columns*stegano_image->columns))
k=0;
if (k == stegano_image->offset)
j++;
}
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,SteganoImageTag,(MagickOffsetType)
(depth-i),depth);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
stegano_view=DestroyCacheView(stegano_view);
watermark_view=DestroyCacheView(watermark_view);
if (status == MagickFalse)
stegano_image=DestroyImage(stegano_image);
return(stegano_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t e r e o A n a g l y p h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% StereoAnaglyphImage() combines two images and produces a single image that
% is the composite of a left and right image of a stereo pair. Special
% red-green stereo glasses are required to view this effect.
%
% The format of the StereoAnaglyphImage method is:
%
% Image *StereoImage(const Image *left_image,const Image *right_image,
% ExceptionInfo *exception)
% Image *StereoAnaglyphImage(const Image *left_image,
% const Image *right_image,const ssize_t x_offset,const ssize_t y_offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o left_image: the left image.
%
% o right_image: the right image.
%
% o exception: return any errors or warnings in this structure.
%
% o x_offset: amount, in pixels, by which the left image is offset to the
% right of the right image.
%
% o y_offset: amount, in pixels, by which the left image is offset to the
% bottom of the right image.
%
%
*/
MagickExport Image *StereoImage(const Image *left_image,
const Image *right_image,ExceptionInfo *exception)
{
return(StereoAnaglyphImage(left_image,right_image,0,0,exception));
}
MagickExport Image *StereoAnaglyphImage(const Image *left_image,
const Image *right_image,const ssize_t x_offset,const ssize_t y_offset,
ExceptionInfo *exception)
{
#define StereoImageTag "Stereo/Image"
const Image
*image;
Image
*stereo_image;
MagickBooleanType
status;
ssize_t
y;
assert(left_image != (const Image *) NULL);
assert(left_image->signature == MagickCoreSignature);
if (left_image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
left_image->filename);
assert(right_image != (const Image *) NULL);
assert(right_image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=left_image;
if ((left_image->columns != right_image->columns) ||
(left_image->rows != right_image->rows))
ThrowImageException(ImageError,"LeftAndRightImageSizesDiffer");
/*
Initialize stereo image attributes.
*/
stereo_image=CloneImage(left_image,left_image->columns,left_image->rows,
MagickTrue,exception);
if (stereo_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(stereo_image,DirectClass,exception) == MagickFalse)
{
stereo_image=DestroyImage(stereo_image);
return((Image *) NULL);
}
(void) SetImageColorspace(stereo_image,sRGBColorspace,exception);
/*
Copy left image to red channel and right image to blue channel.
*/
status=MagickTrue;
for (y=0; y < (ssize_t) stereo_image->rows; y++)
{
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
x;
Quantum
*magick_restrict r;
p=GetVirtualPixels(left_image,-x_offset,y-y_offset,image->columns,1,
exception);
q=GetVirtualPixels(right_image,0,y,right_image->columns,1,exception);
r=QueueAuthenticPixels(stereo_image,0,y,stereo_image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL) ||
(r == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) stereo_image->columns; x++)
{
SetPixelRed(stereo_image,GetPixelRed(left_image,p),r);
SetPixelGreen(stereo_image,GetPixelGreen(right_image,q),r);
SetPixelBlue(stereo_image,GetPixelBlue(right_image,q),r);
if ((GetPixelAlphaTraits(stereo_image) & CopyPixelTrait) != 0)
SetPixelAlpha(stereo_image,(GetPixelAlpha(left_image,p)+
GetPixelAlpha(right_image,q))/2,r);
p+=GetPixelChannels(left_image);
q+=GetPixelChannels(right_image);
r+=GetPixelChannels(stereo_image);
}
if (SyncAuthenticPixels(stereo_image,exception) == MagickFalse)
break;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,StereoImageTag,(MagickOffsetType) y,
stereo_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
if (status == MagickFalse)
stereo_image=DestroyImage(stereo_image);
return(stereo_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S w i r l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SwirlImage() swirls the pixels about the center of the image, where
% degrees indicates the sweep of the arc through which each pixel is moved.
% You get a more dramatic effect as the degrees move from 1 to 360.
%
% The format of the SwirlImage method is:
%
% Image *SwirlImage(const Image *image,double degrees,
% const PixelInterpolateMethod method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o degrees: Define the tightness of the swirling effect.
%
% o method: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SwirlImage(const Image *image,double degrees,
const PixelInterpolateMethod method,ExceptionInfo *exception)
{
#define SwirlImageTag "Swirl/Image"
CacheView
*canvas_view,
*interpolate_view,
*swirl_view;
double
radius;
Image
*canvas_image,
*swirl_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PointInfo
center,
scale;
ssize_t
y;
/*
Initialize swirl image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
canvas_image=CloneImage(image,0,0,MagickTrue,exception);
if (canvas_image == (Image *) NULL)
return((Image *) NULL);
swirl_image=CloneImage(canvas_image,0,0,MagickTrue,exception);
if (swirl_image == (Image *) NULL)
{
canvas_image=DestroyImage(canvas_image);
return((Image *) NULL);
}
if (SetImageStorageClass(swirl_image,DirectClass,exception) == MagickFalse)
{
canvas_image=DestroyImage(canvas_image);
swirl_image=DestroyImage(swirl_image);
return((Image *) NULL);
}
if (swirl_image->background_color.alpha_trait != UndefinedPixelTrait)
(void) SetImageAlphaChannel(swirl_image,OnAlphaChannel,exception);
/*
Compute scaling factor.
*/
center.x=(double) canvas_image->columns/2.0;
center.y=(double) canvas_image->rows/2.0;
radius=MagickMax(center.x,center.y);
scale.x=1.0;
scale.y=1.0;
if (canvas_image->columns > canvas_image->rows)
scale.y=(double) canvas_image->columns/(double) canvas_image->rows;
else
if (canvas_image->columns < canvas_image->rows)
scale.x=(double) canvas_image->rows/(double) canvas_image->columns;
degrees=(double) DegreesToRadians(degrees);
/*
Swirl image.
*/
status=MagickTrue;
progress=0;
canvas_view=AcquireVirtualCacheView(canvas_image,exception);
interpolate_view=AcquireVirtualCacheView(image,exception);
swirl_view=AcquireAuthenticCacheView(swirl_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(canvas_image,swirl_image,canvas_image->rows,1)
#endif
for (y=0; y < (ssize_t) canvas_image->rows; y++)
{
double
distance;
PointInfo
delta;
const Quantum
*magick_restrict p;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(canvas_view,0,y,canvas_image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(swirl_view,0,y,swirl_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
delta.y=scale.y*(double) (y-center.y);
for (x=0; x < (ssize_t) canvas_image->columns; x++)
{
/*
Determine if the pixel is within an ellipse.
*/
delta.x=scale.x*(double) (x-center.x);
distance=delta.x*delta.x+delta.y*delta.y;
if (distance >= (radius*radius))
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(canvas_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(canvas_image,i);
PixelTrait traits = GetPixelChannelTraits(canvas_image,channel);
PixelTrait swirl_traits = GetPixelChannelTraits(swirl_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(swirl_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(swirl_image,channel,p[i],q);
}
}
else
{
double
cosine,
factor,
sine;
/*
Swirl the pixel.
*/
factor=1.0-sqrt((double) distance)/radius;
sine=sin((double) (degrees*factor*factor));
cosine=cos((double) (degrees*factor*factor));
status=InterpolatePixelChannels(canvas_image,interpolate_view,
swirl_image,method,((cosine*delta.x-sine*delta.y)/scale.x+center.x),
(double) ((sine*delta.x+cosine*delta.y)/scale.y+center.y),q,
exception);
if (status == MagickFalse)
break;
}
p+=GetPixelChannels(canvas_image);
q+=GetPixelChannels(swirl_image);
}
if (SyncCacheViewAuthenticPixels(swirl_view,exception) == MagickFalse)
status=MagickFalse;
if (canvas_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(canvas_image,SwirlImageTag,progress,
canvas_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
swirl_view=DestroyCacheView(swirl_view);
interpolate_view=DestroyCacheView(interpolate_view);
canvas_view=DestroyCacheView(canvas_view);
canvas_image=DestroyImage(canvas_image);
if (status == MagickFalse)
swirl_image=DestroyImage(swirl_image);
return(swirl_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TintImage() applies a color vector to each pixel in the image. The length
% of the vector is 0 for black and white and at its maximum for the midtones.
% The vector weighting function is f(x)=(1-(4.0*((x-0.5)*(x-0.5))))
%
% The format of the TintImage method is:
%
% Image *TintImage(const Image *image,const char *blend,
% const PixelInfo *tint,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o blend: A color value used for tinting.
%
% o tint: A color value used for tinting.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *TintImage(const Image *image,const char *blend,
const PixelInfo *tint,ExceptionInfo *exception)
{
#define TintImageTag "Tint/Image"
CacheView
*image_view,
*tint_view;
double
intensity;
GeometryInfo
geometry_info;
Image
*tint_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
color_vector;
MagickStatusType
flags;
ssize_t
y;
/*
Allocate tint image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
tint_image=CloneImage(image,0,0,MagickTrue,exception);
if (tint_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(tint_image,DirectClass,exception) == MagickFalse)
{
tint_image=DestroyImage(tint_image);
return((Image *) NULL);
}
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
(IsPixelInfoGray(tint) == MagickFalse))
(void) SetImageColorspace(tint_image,sRGBColorspace,exception);
if (blend == (const char *) NULL)
return(tint_image);
/*
Determine RGB values of the color.
*/
GetPixelInfo(image,&color_vector);
flags=ParseGeometry(blend,&geometry_info);
color_vector.red=geometry_info.rho;
color_vector.green=geometry_info.rho;
color_vector.blue=geometry_info.rho;
color_vector.alpha=(MagickRealType) OpaqueAlpha;
if ((flags & SigmaValue) != 0)
color_vector.green=geometry_info.sigma;
if ((flags & XiValue) != 0)
color_vector.blue=geometry_info.xi;
if ((flags & PsiValue) != 0)
color_vector.alpha=geometry_info.psi;
if (image->colorspace == CMYKColorspace)
{
color_vector.black=geometry_info.rho;
if ((flags & PsiValue) != 0)
color_vector.black=geometry_info.psi;
if ((flags & ChiValue) != 0)
color_vector.alpha=geometry_info.chi;
}
intensity=(double) GetPixelInfoIntensity((const Image *) NULL,tint);
color_vector.red=(double) (color_vector.red*tint->red/100.0-intensity);
color_vector.green=(double) (color_vector.green*tint->green/100.0-intensity);
color_vector.blue=(double) (color_vector.blue*tint->blue/100.0-intensity);
color_vector.black=(double) (color_vector.black*tint->black/100.0-intensity);
color_vector.alpha=(double) (color_vector.alpha*tint->alpha/100.0-intensity);
/*
Tint image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
tint_view=AcquireAuthenticCacheView(tint_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,tint_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(tint_view,0,y,tint_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
PixelInfo
pixel;
double
weight;
GetPixelInfo(image,&pixel);
weight=QuantumScale*GetPixelRed(image,p)-0.5;
pixel.red=(MagickRealType) GetPixelRed(image,p)+color_vector.red*
(1.0-(4.0*(weight*weight)));
weight=QuantumScale*GetPixelGreen(image,p)-0.5;
pixel.green=(MagickRealType) GetPixelGreen(image,p)+color_vector.green*
(1.0-(4.0*(weight*weight)));
weight=QuantumScale*GetPixelBlue(image,p)-0.5;
pixel.blue=(MagickRealType) GetPixelBlue(image,p)+color_vector.blue*
(1.0-(4.0*(weight*weight)));
weight=QuantumScale*GetPixelBlack(image,p)-0.5;
pixel.black=(MagickRealType) GetPixelBlack(image,p)+color_vector.black*
(1.0-(4.0*(weight*weight)));
pixel.alpha=(MagickRealType) GetPixelAlpha(image,p);
SetPixelViaPixelInfo(tint_image,&pixel,q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(tint_image);
}
if (SyncCacheViewAuthenticPixels(tint_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,TintImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
tint_view=DestroyCacheView(tint_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
tint_image=DestroyImage(tint_image);
return(tint_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% V i g n e t t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% VignetteImage() softens the edges of the image in vignette style.
%
% The format of the VignetteImage method is:
%
% Image *VignetteImage(const Image *image,const double radius,
% const double sigma,const ssize_t x,const ssize_t y,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o x, y: Define the x and y ellipse offset.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *VignetteImage(const Image *image,const double radius,
const double sigma,const ssize_t x,const ssize_t y,ExceptionInfo *exception)
{
char
ellipse[MagickPathExtent];
DrawInfo
*draw_info;
Image
*canvas,
*blur_image,
*oval_image,
*vignette_image;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
canvas=CloneImage(image,0,0,MagickTrue,exception);
if (canvas == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(canvas,DirectClass,exception) == MagickFalse)
{
canvas=DestroyImage(canvas);
return((Image *) NULL);
}
canvas->alpha_trait=BlendPixelTrait;
oval_image=CloneImage(canvas,canvas->columns,canvas->rows,MagickTrue,
exception);
if (oval_image == (Image *) NULL)
{
canvas=DestroyImage(canvas);
return((Image *) NULL);
}
(void) QueryColorCompliance("#000000",AllCompliance,
&oval_image->background_color,exception);
(void) SetImageBackgroundColor(oval_image,exception);
draw_info=CloneDrawInfo((const ImageInfo *) NULL,(const DrawInfo *) NULL);
(void) QueryColorCompliance("#ffffff",AllCompliance,&draw_info->fill,
exception);
(void) QueryColorCompliance("#ffffff",AllCompliance,&draw_info->stroke,
exception);
(void) FormatLocaleString(ellipse,MagickPathExtent,"ellipse %g,%g,%g,%g,"
"0.0,360.0",image->columns/2.0,image->rows/2.0,image->columns/2.0-x,
image->rows/2.0-y);
draw_info->primitive=AcquireString(ellipse);
(void) DrawImage(oval_image,draw_info,exception);
draw_info=DestroyDrawInfo(draw_info);
blur_image=BlurImage(oval_image,radius,sigma,exception);
oval_image=DestroyImage(oval_image);
if (blur_image == (Image *) NULL)
{
canvas=DestroyImage(canvas);
return((Image *) NULL);
}
blur_image->alpha_trait=UndefinedPixelTrait;
(void) CompositeImage(canvas,blur_image,IntensityCompositeOp,MagickTrue,
0,0,exception);
blur_image=DestroyImage(blur_image);
vignette_image=MergeImageLayers(canvas,FlattenLayer,exception);
canvas=DestroyImage(canvas);
if (vignette_image != (Image *) NULL)
(void) TransformImageColorspace(vignette_image,image->colorspace,exception);
return(vignette_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W a v e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WaveImage() creates a "ripple" effect in the image by shifting the pixels
% vertically along a sine wave whose amplitude and wavelength is specified
% by the given parameters.
%
% The format of the WaveImage method is:
%
% Image *WaveImage(const Image *image,const double amplitude,
% const double wave_length,const PixelInterpolateMethod method,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o amplitude, wave_length: Define the amplitude and wave length of the
% sine wave.
%
% o interpolate: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *WaveImage(const Image *image,const double amplitude,
const double wave_length,const PixelInterpolateMethod method,
ExceptionInfo *exception)
{
#define WaveImageTag "Wave/Image"
CacheView
*canvas_image_view,
*wave_view;
float
*sine_map;
Image
*canvas_image,
*wave_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
i;
ssize_t
y;
/*
Initialize wave image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
canvas_image=CloneImage(image,0,0,MagickTrue,exception);
if (canvas_image == (Image *) NULL)
return((Image *) NULL);
if ((canvas_image->alpha_trait == UndefinedPixelTrait) &&
(canvas_image->background_color.alpha != OpaqueAlpha))
(void) SetImageAlpha(canvas_image,OpaqueAlpha,exception);
wave_image=CloneImage(canvas_image,canvas_image->columns,(size_t)
(canvas_image->rows+2.0*fabs(amplitude)),MagickTrue,exception);
if (wave_image == (Image *) NULL)
{
canvas_image=DestroyImage(canvas_image);
return((Image *) NULL);
}
if (SetImageStorageClass(wave_image,DirectClass,exception) == MagickFalse)
{
canvas_image=DestroyImage(canvas_image);
wave_image=DestroyImage(wave_image);
return((Image *) NULL);
}
/*
Allocate sine map.
*/
sine_map=(float *) AcquireQuantumMemory((size_t) wave_image->columns,
sizeof(*sine_map));
if (sine_map == (float *) NULL)
{
canvas_image=DestroyImage(canvas_image);
wave_image=DestroyImage(wave_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
for (i=0; i < (ssize_t) wave_image->columns; i++)
sine_map[i]=(float) fabs(amplitude)+amplitude*sin((double)
((2.0*MagickPI*i)/wave_length));
/*
Wave image.
*/
status=MagickTrue;
progress=0;
canvas_image_view=AcquireVirtualCacheView(canvas_image,exception);
wave_view=AcquireAuthenticCacheView(wave_image,exception);
(void) SetCacheViewVirtualPixelMethod(canvas_image_view,
BackgroundVirtualPixelMethod);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(canvas_image,wave_image,wave_image->rows,1)
#endif
for (y=0; y < (ssize_t) wave_image->rows; y++)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(canvas_image_view,0,y,canvas_image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(wave_view,0,y,wave_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) wave_image->columns; x++)
{
status=InterpolatePixelChannels(canvas_image,canvas_image_view,
wave_image,method,(double) x,(double) (y-sine_map[x]),q,exception);
if (status == MagickFalse)
break;
p+=GetPixelChannels(canvas_image);
q+=GetPixelChannels(wave_image);
}
if (SyncCacheViewAuthenticPixels(wave_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(canvas_image,WaveImageTag,progress,
canvas_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
wave_view=DestroyCacheView(wave_view);
canvas_image_view=DestroyCacheView(canvas_image_view);
canvas_image=DestroyImage(canvas_image);
sine_map=(float *) RelinquishMagickMemory(sine_map);
if (status == MagickFalse)
wave_image=DestroyImage(wave_image);
return(wave_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W a v e l e t D e n o i s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WaveletDenoiseImage() removes noise from the image using a wavelet
% transform. The wavelet transform is a fast hierarchical scheme for
% processing an image using a set of consecutive lowpass and high_pass filters,
% followed by a decimation. This results in a decomposition into different
% scales which can be regarded as different “frequency bands”, determined by
% the mother wavelet. Adapted from dcraw.c by David Coffin.
%
% The format of the WaveletDenoiseImage method is:
%
% Image *WaveletDenoiseImage(const Image *image,const double threshold,
% const double softness,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: set the threshold for smoothing.
%
% o softness: attenuate the smoothing threshold.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void HatTransform(const float *magick_restrict pixels,
const size_t stride,const size_t extent,const size_t scale,float *kernel)
{
const float
*magick_restrict p,
*magick_restrict q,
*magick_restrict r;
ssize_t
i;
p=pixels;
q=pixels+scale*stride;
r=pixels+scale*stride;
for (i=0; i < (ssize_t) scale; i++)
{
kernel[i]=0.25f*(*p+(*p)+(*q)+(*r));
p+=stride;
q-=stride;
r+=stride;
}
for ( ; i < (ssize_t) (extent-scale); i++)
{
kernel[i]=0.25f*(2.0f*(*p)+*(p-scale*stride)+*(p+scale*stride));
p+=stride;
}
q=p-scale*stride;
r=pixels+stride*(extent-2);
for ( ; i < (ssize_t) extent; i++)
{
kernel[i]=0.25f*(*p+(*p)+(*q)+(*r));
p+=stride;
q+=stride;
r-=stride;
}
}
MagickExport Image *WaveletDenoiseImage(const Image *image,
const double threshold,const double softness,ExceptionInfo *exception)
{
CacheView
*image_view,
*noise_view;
float
*kernel,
*pixels;
Image
*noise_image;
MagickBooleanType
status;
MagickSizeType
number_pixels;
MemoryInfo
*pixels_info;
ssize_t
channel;
static const float
noise_levels[] = { 0.8002f, 0.2735f, 0.1202f, 0.0585f, 0.0291f, 0.0152f,
0.0080f, 0.0044f };
/*
Initialize noise image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
noise_image=AccelerateWaveletDenoiseImage(image,threshold,exception);
if (noise_image != (Image *) NULL)
return(noise_image);
#endif
noise_image=CloneImage(image,0,0,MagickTrue,exception);
if (noise_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(noise_image,DirectClass,exception) == MagickFalse)
{
noise_image=DestroyImage(noise_image);
return((Image *) NULL);
}
if (AcquireMagickResource(WidthResource,4*image->columns) == MagickFalse)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
pixels_info=AcquireVirtualMemory(3*image->columns,image->rows*
sizeof(*pixels));
kernel=(float *) AcquireQuantumMemory(MagickMax(image->rows,image->columns)+1,
GetOpenMPMaximumThreads()*sizeof(*kernel));
if ((pixels_info == (MemoryInfo *) NULL) || (kernel == (float *) NULL))
{
if (kernel != (float *) NULL)
kernel=(float *) RelinquishMagickMemory(kernel);
if (pixels_info != (MemoryInfo *) NULL)
pixels_info=RelinquishVirtualMemory(pixels_info);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
pixels=(float *) GetVirtualMemoryBlob(pixels_info);
status=MagickTrue;
number_pixels=(MagickSizeType) image->columns*image->rows;
image_view=AcquireAuthenticCacheView(image,exception);
noise_view=AcquireAuthenticCacheView(noise_image,exception);
for (channel=0; channel < (ssize_t) GetPixelChannels(image); channel++)
{
ssize_t
i;
size_t
high_pass,
low_pass;
ssize_t
level,
y;
PixelChannel
pixel_channel;
PixelTrait
traits;
if (status == MagickFalse)
continue;
traits=GetPixelChannelTraits(image,(PixelChannel) channel);
if (traits == UndefinedPixelTrait)
continue;
pixel_channel=GetPixelChannelChannel(image,channel);
if ((pixel_channel != RedPixelChannel) &&
(pixel_channel != GreenPixelChannel) &&
(pixel_channel != BluePixelChannel))
continue;
/*
Copy channel from image to wavelet pixel array.
*/
i=0;
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
p=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
pixels[i++]=(float) p[channel];
p+=GetPixelChannels(image);
}
}
/*
Low pass filter outputs are called approximation kernel & high pass
filters are referred to as detail kernel. The detail kernel
have high values in the noisy parts of the signal.
*/
high_pass=0;
for (level=0; level < 5; level++)
{
double
magnitude;
ssize_t
x,
y;
low_pass=(size_t) (number_pixels*((level & 0x01)+1));
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,1) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
float
*magick_restrict p,
*magick_restrict q;
ssize_t
x;
p=kernel+id*image->columns;
q=pixels+y*image->columns;
HatTransform(q+high_pass,1,image->columns,(size_t) (1UL << level),p);
q+=low_pass;
for (x=0; x < (ssize_t) image->columns; x++)
*q++=(*p++);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,1) \
magick_number_threads(image,image,image->columns,1)
#endif
for (x=0; x < (ssize_t) image->columns; x++)
{
const int
id = GetOpenMPThreadId();
float
*magick_restrict p,
*magick_restrict q;
ssize_t
y;
p=kernel+id*image->rows;
q=pixels+x+low_pass;
HatTransform(q,image->columns,image->rows,(size_t) (1UL << level),p);
for (y=0; y < (ssize_t) image->rows; y++)
{
*q=(*p++);
q+=image->columns;
}
}
/*
To threshold, each coefficient is compared to a threshold value and
attenuated / shrunk by some factor.
*/
magnitude=threshold*noise_levels[level];
for (i=0; i < (ssize_t) number_pixels; ++i)
{
pixels[high_pass+i]-=pixels[low_pass+i];
if (pixels[high_pass+i] < -magnitude)
pixels[high_pass+i]+=magnitude-softness*magnitude;
else
if (pixels[high_pass+i] > magnitude)
pixels[high_pass+i]-=magnitude-softness*magnitude;
else
pixels[high_pass+i]*=softness;
if (high_pass != 0)
pixels[i]+=pixels[high_pass+i];
}
high_pass=low_pass;
}
/*
Reconstruct image from the thresholded wavelet kernel.
*/
i=0;
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
Quantum
*magick_restrict q;
ssize_t
x;
ssize_t
offset;
q=GetCacheViewAuthenticPixels(noise_view,0,y,noise_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
break;
}
offset=GetPixelChannelOffset(noise_image,pixel_channel);
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
pixel;
pixel=(MagickRealType) pixels[i]+pixels[low_pass+i];
q[offset]=ClampToQuantum(pixel);
i++;
q+=GetPixelChannels(noise_image);
}
sync=SyncCacheViewAuthenticPixels(noise_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,AddNoiseImageTag,(MagickOffsetType)
channel,GetPixelChannels(image));
if (proceed == MagickFalse)
status=MagickFalse;
}
}
noise_view=DestroyCacheView(noise_view);
image_view=DestroyCacheView(image_view);
kernel=(float *) RelinquishMagickMemory(kernel);
pixels_info=RelinquishVirtualMemory(pixels_info);
if (status == MagickFalse)
noise_image=DestroyImage(noise_image);
return(noise_image);
}
|
GraftInteriorForestsSetTransferIterationWorklet.h | //============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
//
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//============================================================================
// Copyright (c) 2018, The Regents of the University of California, through
// Lawrence Berkeley National Laboratory (subject to receipt of any required approvals
// from the U.S. Dept. of Energy). All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// (1) Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// (2) Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// (3) Neither the name of the University of California, Lawrence Berkeley National
// Laboratory, U.S. Dept. of Energy nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
// IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
//
//=============================================================================
//
// This code is an extension of the algorithm presented in the paper:
// Parallel Peak Pruning for Scalable SMP Contour Tree Computation.
// Hamish Carr, Gunther Weber, Christopher Sewell, and James Ahrens.
// Proceedings of the IEEE Symposium on Large Data Analysis and Visualization
// (LDAV), October 2016, Baltimore, Maryland.
//
// The PPP2 algorithm and software were jointly developed by
// Hamish Carr (University of Leeds), Gunther H. Weber (LBNL), and
// Oliver Ruebel (LBNL)
//==============================================================================
#ifndef vtk_m_worklet_contourtree_distributed_tree_grafter_graft_interior_forests_set_trandfer_iteration_worklet_h
#define vtk_m_worklet_contourtree_distributed_tree_grafter_graft_interior_forests_set_trandfer_iteration_worklet_h
#include <vtkm/worklet/WorkletMapField.h>
#include <vtkm/worklet/contourtree_augmented/Types.h>
namespace vtkm
{
namespace worklet
{
namespace contourtree_distributed
{
namespace tree_grafter
{
/// Now set the transfer iteration for all attachment points
/// If there were no supernodes to transfer, their types are all NO_SUCH_ELEMENT
class GraftInteriorForestsSetTransferIterationWorklet : public vtkm::worklet::WorkletMapField
{
public:
// NOTE: supernodeType is sized to ContourTree.Supernodes.GetNumberOfValues() so we can use it for our iteration
// NOTE: for whenTransferred we neeed need FieldInOut type to avoid overwrite of existing value as not all values will be updated
using ControlSignature = void(FieldIn supernodeType, // input
FieldIn hierarchicalSuperId, // input
FieldInOut whenTransferred // output
);
using ExecutionSignature = void(_1, _2, _3);
using InputDomain = _1;
// Default Constructor
VTKM_EXEC_CONT
GraftInteriorForestsSetTransferIterationWorklet(const vtkm::Id& numTransferIterations)
: NumTransferIterations(numTransferIterations)
{
}
VTKM_EXEC void operator()(const vtkm::Id& supernodeType,
const vtkm::Id& hierarchicalSuperId,
vtkm::Id& whenTransferred) const
{ // operator ()
if ((supernodeType == vtkm::worklet::contourtree_augmented::IS_ATTACHMENT) &&
vtkm::worklet::contourtree_augmented::NoSuchElement(hierarchicalSuperId))
{ // not a supernode in the hierarchical tree yet
whenTransferred =
(this->NumTransferIterations | vtkm::worklet::contourtree_augmented::IS_SUPERNODE);
} // not a supernode in the hierarchical tree yet
// In serial this worklet implements the following operation
/*
// Now set the transfer iteration for all attachment points
// If there were no supernodes to transfer, their types are all NO_SUCH_ELEMENT
#pragma omp parallel for
for (indexType supernode = 0; supernode < contourTree->supernodes.size(); supernode++)
{ // per supernode
// std::cout << "Supernode " << supernode << std::endl;
if ((supernodeType[supernode] == IS_ATTACHMENT) && noSuchElement(hierarchicalSuperID[supernode]))
{ // not a supernode in the hierarchical tree yet
whenTransferred[supernode] = nTransferIterations | IS_SUPERNODE;
} // not a supernode in the hierarchical tree yet
} // per supernode
*/
} // operator ()
private:
vtkm::Id NumTransferIterations;
}; // BoundaryVerticiesPerSuperArcStepOneWorklet
} // namespace tree_grafter
} // namespace contourtree_distributed
} // namespace worklet
} // namespace vtkm
#endif
|
GB_unaryop__lnot_uint64_int64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_uint64_int64
// op(A') function: GB_tran__lnot_uint64_int64
// C type: uint64_t
// A type: int64_t
// cast: uint64_t cij = (uint64_t) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
uint64_t z = (uint64_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_UINT64 || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_uint64_int64
(
uint64_t *restrict Cx,
const int64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_uint64_int64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
FeldmanDkg.h | #pragma once
#include <memory>
#include <polycrypto/PolyCrypto.h>
#include <polycrypto/DkgCommon.h>
#include <polycrypto/AbstractPlayer.h>
#include <xutils/Log.h>
#include <xutils/Timer.h>
namespace Dkg {
using libpolycrypto::Fr;
using libpolycrypto::G1;
using libpolycrypto::multiExp;
class FeldmanPublicParameters {
public:
const DkgParams& params;
public:
FeldmanPublicParameters(const DkgParams& params)
: params(params)
{
}
public:
void getPlayerExps(size_t id, std::vector<Fr>& exps) const {
assertStrictlyLessThan(id, params.n);
exps.clear();
exps.push_back(Fr::one());
size_t k = id;
const auto& omegas = params.omegas;
// the degree of the polynomial is t-1
for(size_t j = 1; j < params.t; j++) {
exps.push_back(omegas[k]);
k = (k + id) % params.N;
}
assertEqual(exps.size(), params.t);
//logdbg << "w_n^i's for player " << id << ": ";
//for(auto w : exps) {
// // identify if the exp is a root of unity
// auto it = std::find(omegas.begin(), omegas.end(), w);
// if(it == omegas.end()) {
// throw std::runtime_error("This is not what I expected");
// }
// size_t pos = static_cast<size_t>(it - omegas.begin());
// std::cout << pos << ", ";
//}
//std::cout << endl;
}
};
class FeldmanPlayer : public AbstractPlayer {
public:
const FeldmanPublicParameters& fpp;
std::vector<G1> comm; // commitment to f_id(.) of degree t-1 and t coefficients
std::vector<G1> commFinal; // commitment to the final polynomial f(x) = \sum_{j \in IDs} f_j(x)
size_t numBits; // the number of bits of all (w_n^id)^k exponents
public:
/**
* A player in a Feldman DKG.
*/
FeldmanPlayer(const DkgParams& params, const FeldmanPublicParameters& fpp, size_t id, bool isSimulated, bool isDkgPlayer)
: AbstractPlayer(params, id, isSimulated, isDkgPlayer), fpp(fpp), commFinal(params.t), numBits(0)
{
assertInclusiveRange(0, id, params.n - 1);
// get ((w_N^id)^k)_{k=0}^{t-1} exponents to verify the shares against the Feldman commitment
std::vector<Fr> exps;
fpp.getPlayerExps(id, exps);
for(size_t i = 0; i <= params.t - 1; i++) {
numBits += exps[i].as_bigint().num_bits();
}
}
public:
/**
* Evaluates f_id(x) at the first n Nth roots of unity, computing the shares
* for each player j.
*/
virtual void evaluate() {
libpolycrypto::poly_fft(f_id, params.N, shares);
shares.resize(params.n);
}
/**
* Feldman commits to the polynomial f_id(x).
*/
void dealImpl() {
// NOTE: shares[i] are already precomputed in AbstractPlayer by now
// commit to polynomial using Feldman commitments
// i.e., given c_i's, return g^{c_i}'s
comm.resize(f_id.size());
#ifdef USE_MULTITHREADING
#pragma omp parallel for
#endif
for(size_t i = 0; i < f_id.size(); i++) {
comm[i] = f_id[i] * G1::one();
}
}
/**
* Picks Feldman commitment randomly.
*/
virtual void simulatedDealImpl() {
dealImpl();
}
/**
* Verifies this player j's f_i(j) share against f_i(x), for all i
*/
bool verifySharesIndividually(const std::vector<AbstractPlayer*>& allPlayers) {
std::vector<Fr> exps;
for(size_t i = 0; i < allPlayers.size(); i++) {
const FeldmanPlayer& p = *dynamic_cast<FeldmanPlayer*>(allPlayers[i]);
// do not verify your own share, when used as DKG
if(isDkgPlayer && p.id == id) {
continue;
}
// verify your share of f_i(x) against player i's commitment of f_i(x)
assertNotEqual(p.shares[id], Fr::zero());
G1 shareVk = p.shares[id] * G1::one();
fpp.getPlayerExps(id, exps);
G1 result = multiExp<G1>(p.comm, exps);
if(shareVk != result) {
logerror << "The share from player #" << p.id << " did not verify against the Feldman commitment" << endl;
return false;
}
}
return true;
}
/**
* Verifies each player i's share against the final polynomial f(x).
*/
bool verifySharesWorstCaseReconstruction() {
std::vector<Fr> exps;
for(size_t pid = 0; pid < params.n; pid++) {
// verify player i's share against the final commitment of f(x)
G1 shareVk = shares[pid] * G1::one();
fpp.getPlayerExps(pid, exps);
G1 result = multiExp<G1>(comm, exps);
if(shareVk != result) {
logerror << "The share from player #" << pid << " did not verify against the Feldman commitment" << endl;
return false;
}
}
return true;
}
/**
* PERF: The performance of this call might depend on the signer ID of the current player.
* This is because the exponents used to verify the share might be smaller when the signer ID
* is smaller. When using roots of unity, this will only be true for player 0 with ID w_N^0 = 1.
* Even so, we still notice that exponentiation by roots of unity is much faster than by random exponents.
* See bench/BenchMultiexp.cpp
*/
virtual bool verifyOtherPlayers(const std::vector<AbstractPlayer*>& allPlayers, bool aggregate) {
pk = G1::zero();
share = Fr::zero();
commFinal.clear();
commFinal.resize(params.t, G1::zero());
// compute your final share f(id) of f(x) = \sum_{i \in IDs} f_i(x), the final public key g^{f(0)} and final commitment
if(isDkgPlayer) {
for(size_t i = 0; i < allPlayers.size(); i++) {
const FeldmanPlayer& p = *dynamic_cast<FeldmanPlayer*>(allPlayers[i]);
share = share + p.shares[id];
pk = pk + p.comm[0];
for(size_t j = 0; j < f_id.size(); j++) {
commFinal[j] = commFinal[j] + p.comm[j];
}
}
} else {
assertEqual(allPlayers.size(), 1); // VSS players only verify the dealer
}
// verify share from each player i individually, or verify aggregate shares
if(aggregate) {
if(isDkgPlayer) {
return verifyFinalShareProof();
} else {
throw std::runtime_error("What are you aggregating for if you're doing VSS?");
}
} else {
return verifySharesIndividually(allPlayers);
}
}
virtual bool reconstructionVerify(const std::vector<size_t>& subset, bool fastTrack) {
(void)subset;
assertEqual(subset.size(), params.t);
if(!fastTrack) {
// verify all $n$ shares manually to simulate finding good subset of shares
return verifySharesWorstCaseReconstruction();
}
// Feldman fast-track requires no verification
return true;
}
/**
* NOTE: Should only be called after verifyOtherPlayers has been called on all players.
*
* Verifies this player's share of the final agreed-upon secret f(0) against the final commitment to the polynomial f(x) \sum_{j \in IDs} f_j(x)
*/
virtual bool verifyFinalShareProof() {
assertNotEqual(share, Fr::zero());
std::vector<Fr> exps;
G1 shareVk = share * G1::one();
fpp.getPlayerExps(id, exps);
G1 result = multiExp<G1>(commFinal, exps);
if(shareVk != result) {
logerror << "Feldman player " << id << "'s aggregated share did not verify against the aggregated Feldman commitment" << endl;
return false;
}
return true;
}
};
}
|
main_simulator.c | #include "alg/grid/grid.h"
#include "ini_parser/ini.h"
#include "monodomain/monodomain_solver.h"
#include "monodomain/ode_solver.h"
#include "string/sds.h"
#include "utils/file_utils.h"
#include <string.h>
#include "config_helpers/config_helpers.h"
#ifdef COMPILE_OPENGL
#include "draw/draw.h"
#endif
void configure_simulation(int argc, char **argv, struct user_options **options, struct monodomain_solver **monodomain_solver, struct ode_solver **ode_solver, struct grid **the_grid ) {
*options = new_user_options();
*the_grid = new_grid();
*monodomain_solver = new_monodomain_solver();
*ode_solver = new_ode_solver();
// First we have to get the config file path
get_config_file(argc, argv, *options);
if((*(options))->config_file) {
// Here we parse the config file
if(ini_parse((*(options))->config_file, parse_config_file, *options) < 0) {
fprintf(stderr, "Error: Can't load the config file %s\n", (*(options))->config_file);
exit(EXIT_FAILURE);
}
}
else {
fprintf(stderr, "\nError: The config file is mandatory.\n\n");
display_usage(argv);
exit(EXIT_FAILURE);
}
// The command line options always overwrite the config file
parse_options(argc, argv, *options);
//This variable is from file_utils.h
no_stdout = (*(options))->quiet;
// Create the output dir and the logfile
if((*(options))->save_mesh_config) {
char *out_dir_name = NULL;
GET_PARAMETER_VALUE_CHAR_OR_USE_DEFAULT(out_dir_name, (*(options))->save_mesh_config->config_data, "output_dir");
if(out_dir_name) {
sds buffer_log = sdsnew("");
sds buffer_ini = sdsnew("");
bool remove_older_simulation_dir = false;
GET_PARAMETER_BOOLEAN_VALUE_OR_USE_DEFAULT(remove_older_simulation_dir, (*(options))->save_mesh_config->config_data, "remove_older_simulation");
if (remove_older_simulation_dir) {
remove_directory(out_dir_name);
}
create_dir(out_dir_name);
buffer_log = sdscatfmt(buffer_log, "%s/outputlog.txt", out_dir_name);
open_logfile(buffer_log);
print_to_stdout_and_file("Command line to reproduce this simulation:\n");
for (int i = 0; i < argc; i++) {
print_to_stdout_and_file("%s ", argv[i]);
}
print_to_stdout_and_file("\n");
buffer_ini = sdscatfmt(buffer_ini, "%s/original_configuration.ini", out_dir_name);
print_to_stdout_and_file("For reproducibility purposes the configuration file was copied to file: %s\n",
buffer_ini);
cp_file(buffer_ini, (*(options))->config_file);
sdsfree(buffer_log);
sdsfree(buffer_ini);
}
}
configure_ode_solver_from_options(*ode_solver, *options);
configure_monodomain_solver_from_options(*monodomain_solver, *options);
configure_grid_from_options(*the_grid, *options);
};
void free_current_simulation_resources(struct user_options *options, struct monodomain_solver *monodomain_solver, struct ode_solver *ode_solver, struct grid *the_grid) {
clean_and_free_grid(the_grid);
free_ode_solver(ode_solver);
free(monodomain_solver);
free_user_options(options);
close_logfile();
}
#ifdef COMPILE_OPENGL
void init_draw_config(struct draw_config *draw_config, struct user_options *options) {
draw_config->config_name = strdup(options->config_file);
draw_config->grid_info.grid_to_draw = NULL;
draw_config->max_v = options->max_v;
draw_config->min_v = options->min_v;
if(draw_config->min_v == 0) draw_config->min_v = 0.1f;
draw_config->simulating = false;
draw_config->time = 0.0;
draw_config->adaptive = options->adaptive;
draw_config->final_time = options->final_time;
draw_config->dt = options->dt_pde;
draw_config->exit = false;
draw_config->restart = false;
draw_config->draw_type = DRAW_SIMULATION;
draw_config->error_message = NULL;
draw_config->grid_info.loaded = false;
}
#endif
int main(int argc, char **argv) {
struct user_options *options = NULL;
struct grid *the_grid;
struct monodomain_solver *monodomain_solver = NULL;
struct ode_solver *ode_solver = NULL;
configure_simulation(argc, argv, &options, &monodomain_solver, &ode_solver, &the_grid);
#ifndef COMPILE_CUDA
if(ode_solver->gpu) {
print_to_stdout_and_file("Cuda runtime not found in this system. Fallbacking to CPU solver!!\n");
ode_solver->gpu = false;
}
#endif
#ifndef COMPILE_OPENGL
if(options->draw) {
print_to_stdout_and_file("OpenGL not found. The output will not be draw!!\n");
options->draw = false;
}
#endif
int np = monodomain_solver->num_threads;
if(np == 0)
np = 1;
#if defined(_OPENMP)
omp_set_num_threads(np);
#endif
//If COMPILE_OPENGL is not set this is always false. See above.
if(options->draw) {
#ifdef COMPILE_OPENGL //If this is defined so OMP is also defined
omp_set_nested(true);
#pragma omp parallel sections num_threads(2)
{
#pragma omp section
{
omp_init_lock(&draw_config.draw_lock);
omp_init_lock(&draw_config.sleep_lock);
init_draw_config(&draw_config, options);
init_and_open_visualization_window();
}
#pragma omp section
{
int result = solve_monodomain(monodomain_solver, ode_solver, the_grid, options);
while (result == RESTART_SIMULATION || result == SIMULATION_FINISHED) {
if(result == RESTART_SIMULATION) {
free_current_simulation_resources(options, monodomain_solver, ode_solver, the_grid);
configure_simulation(argc, argv, &options, &monodomain_solver, &ode_solver, &the_grid);
init_draw_config(&draw_config, options);
result = solve_monodomain(monodomain_solver, ode_solver, the_grid, options);
}
if(draw_config.restart) result = RESTART_SIMULATION;
if(draw_config.exit) {
free_current_simulation_resources(options, monodomain_solver, ode_solver, the_grid);
break;
}
}
}
}
#endif //COMPILE_OPENGL
} else {
solve_monodomain(monodomain_solver, ode_solver, the_grid, options);
free_current_simulation_resources(options, monodomain_solver, ode_solver, the_grid);
}
return EXIT_SUCCESS;
}
|
GB_binop__fmod_fp64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__fmod_fp64)
// A.*B function (eWiseMult): GB (_AemultB_01__fmod_fp64)
// A.*B function (eWiseMult): GB (_AemultB_02__fmod_fp64)
// A.*B function (eWiseMult): GB (_AemultB_03__fmod_fp64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__fmod_fp64)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__fmod_fp64)
// C+=b function (dense accum): GB (_Cdense_accumb__fmod_fp64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__fmod_fp64)
// C=scalar+B GB (_bind1st__fmod_fp64)
// C=scalar+B' GB (_bind1st_tran__fmod_fp64)
// C=A+scalar GB (_bind2nd__fmod_fp64)
// C=A'+scalar GB (_bind2nd_tran__fmod_fp64)
// C type: double
// A type: double
// B,b type: double
// BinaryOp: cij = fmod (aij, bij)
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
double
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
double aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
double bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
double t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = fmod (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_FMOD || GxB_NO_FP64 || GxB_NO_FMOD_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__fmod_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__fmod_fp64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__fmod_fp64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__fmod_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__fmod_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__fmod_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__fmod_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__fmod_fp64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__fmod_fp64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *Cx = (double *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = GBX (Bx, p, false) ;
Cx [p] = fmod (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__fmod_fp64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
double *Cx = (double *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
double aij = GBX (Ax, p, false) ;
Cx [p] = fmod (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = fmod (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__fmod_fp64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = fmod (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__fmod_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
elemwise_binary_scalar_op.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2016 by Contributors
* \file elemwise_binary_scalar_op.h
* \brief Function definition of elementwise binary scalar operators
*/
#ifndef MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_SCALAR_OP_H_
#define MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_SCALAR_OP_H_
#include <mxnet/operator_util.h>
#include <vector>
#include <utility>
#include "../mshadow_op.h"
#include "../elemwise_op_common.h"
#include "elemwise_unary_op.h"
namespace mxnet {
namespace op {
class BinaryScalarOp : public UnaryOp {
/*! \brief Tensor operation against a scalar with a dense result */
template<typename OP, typename DType, typename IType>
static void ComputeExDenseResultRsp(mshadow::Stream<cpu> *stream,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &input,
const OpReqType req,
const NDArray &output) {
const double alpha = nnvm::get<double>(attrs.parsed);
CHECK_EQ(output.shape(), input.shape());
const int64_t row_count = output.shape()[0];
const int64_t items_per_row = output.shape().Size() / row_count;
const DType result_for_zero = OP::Map(DType(0), DType(alpha));
mshadow::Tensor<cpu, 1, DType> input_data = input.data().FlatTo1D<cpu, DType>(stream);
mshadow::Tensor<cpu, 1, DType> output_data = output.data().FlatTo1D<cpu, DType>(stream);
const int64_t sparse_row_count = input.aux_shape(rowsparse::kIdx).Size();
if (sparse_row_count != row_count) {
mshadow::Tensor<cpu, 1, IType> row_indexes = input.aux_data(
rowsparse::kIdx).FlatTo1D<cpu, IType>(stream);
int64_t input_iter = 0;
int64_t output_row = 0;
IType next_input_row = 0;
while (output_row < row_count) {
next_input_row = input_iter < sparse_row_count ? int64_t(row_indexes[input_iter])
: row_count;
// Split up into blocks of contiguous data and do those together
// Do contiguous dense blocks
const int64_t dense_block_count = next_input_row - output_row;
if (dense_block_count > 0) {
MXNET_ASSIGN_REQ_SWITCH(req, Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::identity, Req>, cpu>::Launch(
stream,
items_per_row * dense_block_count,
output_data.dptr_ + items_per_row * output_row,
result_for_zero);
});
output_row += dense_block_count;
continue;
}
// Do contiguous sparse blocks
int64_t next_non_contiguous_sparse = input_iter;
while (next_non_contiguous_sparse < sparse_row_count - 1) {
if (row_indexes[next_non_contiguous_sparse + 1]
!= row_indexes[next_non_contiguous_sparse] + 1) {
break;
}
++next_non_contiguous_sparse;
}
const int64_t sparse_block_count = next_non_contiguous_sparse - input_iter + 1;
if (sparse_block_count > 0) {
MXNET_ASSIGN_REQ_SWITCH(req, Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, cpu>::Launch(
stream,
items_per_row * sparse_block_count,
&output_data.dptr_[items_per_row * output_row],
&input_data.dptr_[items_per_row * input_iter],
DType(alpha));
});
output_row += sparse_block_count;
input_iter += sparse_block_count;
continue;
}
}
} else {
// All rows exist (eventually we don't have to do complex
// things to call GPU kernels because we don't need to access row indices)
MXNET_ASSIGN_REQ_SWITCH(req, Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, cpu>::Launch(
stream,
items_per_row * row_count,
output_data.dptr_,
input_data.dptr_,
DType(alpha));
});
}
}
/*! \brief Tensor operation against a scalar with a dense result */
template<typename OP, typename DType, typename IType>
static void ComputeExDenseResultRsp(mshadow::Stream<gpu> *stream,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &input,
const OpReqType req,
const NDArray &output) {
LOG(FATAL) << "NOT IMPLEMENTED";
}
/*! \brief Tensor operation against a scalar with a dense result */
template<typename OP, typename DType, typename IType, typename CType>
static void ComputeExDenseResultCsr(mshadow::Stream<cpu> *stream,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &input,
const OpReqType req,
const NDArray &output) {
CHECK_EQ(output.shape(), input.shape());
const double alpha = nnvm::get<double>(attrs.parsed);
const DType dense_fill_val = OP::Map(DType(0), DType(alpha));
const TBlob column_indexes = input.aux_data(csr::kIdx);
const size_t item_count = column_indexes.Size();
// Pre-fill dense with 0-input/output value
FillDense<DType>(stream, output.shape().Size(), dense_fill_val,
req, output.data().dptr<DType>());
mshadow::Tensor<cpu, 2, DType> out = AsRowise2D<DType>(stream, output.data());
if (item_count) {
const DType *in = input.data().dptr<DType>();
const IType *column_indexes_ptr = column_indexes.dptr<IType>();
const auto row_count = static_cast<size_t>(input.shape()[0]);
const TBlob row_starts = input.aux_data(csr::kIndPtr);
const CType *row_starts_ptr = row_starts.dptr<CType>();
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(row_count); ++i) {
const bool last_row = i == static_cast<int>(row_count) - 1;
// Split up into blocks of contiguous data and do those together
const size_t row_item_start_iter = row_starts_ptr[i];
const size_t input_items_this_row = !last_row
? static_cast<size_t>(row_starts_ptr[i + 1])
- row_item_start_iter
: item_count - row_item_start_iter;
if (input_items_this_row) {
const IType *this_row_column_indexes = column_indexes_ptr + row_item_start_iter;
const DType *row_data_start = in + row_item_start_iter;
DType *output_this_row = out[i].dptr_;
// More overhead to use OMP for small loops, so don't
if (input_items_this_row > 1000) {
#pragma omp parallel for
for (CType j = 0; j < static_cast<CType>(input_items_this_row); ++j) {
const IType col = this_row_column_indexes[j];
const DType val = row_data_start[j];
output_this_row[col] = OP::Map(val, DType(alpha));
}
} else {
for (CType j = 0; j < static_cast<CType>(input_items_this_row); ++j) {
const IType col = this_row_column_indexes[j];
const DType val = row_data_start[j];
output_this_row[col] = OP::Map(val, DType(alpha));
}
}
}
}
}
}
/*! \brief Tensor operation against a scalar with a dense result */
template<typename OP, typename DType, typename IType, typename CType>
static void ComputeExDenseResultCsr(mshadow::Stream<gpu> *stream,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &input,
const OpReqType req,
const NDArray &output) {
LOG(FATAL) << "NOT IMPLEMENTED";
}
template<typename xpu, typename OP, typename DType, typename IType>
static void ComputeExDenseResult(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &input,
const OpReqType req,
const NDArray output) {
mshadow::Stream<xpu> *stream = ctx.get_stream<xpu>();
CHECK_EQ(output.storage_type(), kDefaultStorage);
switch (input.storage_type()) {
case kRowSparseStorage: {
ComputeExDenseResultRsp<OP, DType, IType>(stream, attrs, ctx, input, req, output);
break;
}
case kCSRStorage: {
MSHADOW_IDX_TYPE_SWITCH(input.aux_data(csr::kIndPtr).type_flag_, CType, {
ComputeExDenseResultCsr<OP, DType, IType, CType>(stream, attrs, ctx, input, req, output);
});
break;
}
default:
CHECK(false) << "Unsupported sparse storage type";
break;
}
}
public:
template<typename OP>
static void Compute_(const nnvm::NodeAttrs &attrs,
mshadow::Stream<cpu>* s,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
DCHECK_EQ(inputs.size(), 1);
DCHECK_EQ(outputs.size(), 1);
using namespace mshadow;
using namespace mshadow::expr;
const double alpha = nnvm::get<double>(attrs.parsed);
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, cpu>::Launch(
s, inputs[0].Size(), outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), DType(alpha));
});
});
}
#if MXNET_USE_CUDA
template<typename OP>
static void Compute_(const nnvm::NodeAttrs &attrs,
mshadow::Stream<gpu>* s,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs);
#endif
template<typename xpu, typename OP>
static void Compute(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
Compute_<OP>(attrs, s, inputs, req, outputs);
}
template<typename xpu, typename OP>
static void ComputeInt(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
DCHECK_EQ(inputs.size(), 1);
DCHECK_EQ(outputs.size(), 1);
using namespace mshadow;
using namespace mshadow::expr;
Stream<xpu> *s = ctx.get_stream<xpu>();
const double alpha = nnvm::get<double>(attrs.parsed);
MXNET_INT_TYPE_SWITCH(outputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(
s, inputs[0].Size(), outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), DType(alpha));
});
});
}
template<typename xpu, typename OP>
static void ComputeLogic(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
DCHECK_EQ(inputs.size(), 1);
DCHECK_EQ(outputs.size(), 1);
using namespace mshadow;
using namespace mshadow::expr;
Stream<xpu> *s = ctx.get_stream<xpu>();
const double alpha = nnvm::get<double>(attrs.parsed);
MSHADOW_TYPE_SWITCH_WITH_BOOL(inputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(
s, inputs[0].Size(), outputs[0].dptr<bool>(), inputs[0].dptr<DType>(), DType(alpha));
});
});
}
template<typename xpu, typename OP>
static void ComputeEx(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
DCHECK_EQ(inputs.size(), 1);
DCHECK_EQ(outputs.size(), 1);
const auto in_stype = inputs[0].storage_type();
const auto out_stype = outputs[0].storage_type();
if (req[0] == kNullOp) {
return;
}
if ((in_stype == kRowSparseStorage && out_stype == kRowSparseStorage) ||
(in_stype == kCSRStorage && out_stype == kCSRStorage)) {
// csr -> csr, or rsp -> rsp
UnaryOp::MapToFCompute<xpu>(attrs, ctx, inputs, req, outputs, Compute<xpu, OP>);
} else if (out_stype == kDefaultStorage &&
(in_stype == kRowSparseStorage || in_stype == kCSRStorage)) {
MSHADOW_TYPE_SWITCH(outputs[0].data().type_flag_, DType, {
MSHADOW_IDX_TYPE_SWITCH(inputs[0].aux_type(rowsparse::kIdx), IType, {
ComputeExDenseResult<xpu, OP, DType, IType>(attrs, ctx, inputs[0], req[0], outputs[0]);
});
});
} else {
LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
}
}
template<typename xpu, typename OP>
static void LogicComputeEx(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
DCHECK_EQ(inputs.size(), 1);
DCHECK_EQ(outputs.size(), 1);
const auto in_stype = inputs[0].storage_type();
const auto out_stype = outputs[0].storage_type();
if (req[0] == kNullOp) {
return;
}
if ((in_stype == kRowSparseStorage && out_stype == kRowSparseStorage) ||
(in_stype == kCSRStorage && out_stype == kCSRStorage)) {
// csr -> csr, or rsp -> rsp
UnaryOp::MapToFCompute<xpu>(attrs, ctx, inputs, req, outputs, Compute<xpu, OP>);
} else {
LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
}
}
template<typename OP>
static void Backward_(const nnvm::NodeAttrs &attrs,
mshadow::Stream<cpu>* s,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
using namespace mshadow::expr;
const double alpha = nnvm::get<double>(attrs.parsed);
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
mxnet::op::mxnet_op::Kernel<mxnet::op::mxnet_op::op_with_req<
mxnet::op::mxnet_op::backward_grad_tuned<OP>, Req>, cpu>::
Launch(s, inputs[0].Size(), outputs[0].dptr<DType>(),
inputs[0].dptr<DType>(), inputs[1].dptr<DType>(),
DType(alpha));
});
});
}
#if MXNET_USE_CUDA
template<typename OP>
static void Backward_(const nnvm::NodeAttrs &attrs,
mshadow::Stream<gpu>* s,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs);
#endif
template<typename xpu, typename OP>
static void Backward(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
using namespace mshadow::expr;
Stream<xpu> *s = ctx.get_stream<xpu>();
Backward_<OP>(attrs, s, inputs, req, outputs);
}
};
#define MXNET_OPERATOR_REGISTER_BINARY_SCALAR(name) \
NNVM_REGISTER_OP(name) \
.set_num_inputs(1) \
.set_num_outputs(1) \
.set_attr_parser([](NodeAttrs* attrs) { \
attrs->parsed = std::stod(attrs->dict["scalar"]); \
}) \
.set_attr<mxnet::FInferShape>("FInferShape", ElemwiseShape<1, 1>) \
.set_attr<nnvm::FInferType>("FInferType", ElemwiseType<1, 1>) \
.set_attr<nnvm::FInplaceOption>("FInplaceOption", \
[](const NodeAttrs& attrs){ \
return std::vector<std::pair<int, int> >{{0, 0}}; \
}) \
.add_argument("data", "NDArray-or-Symbol", "source input") \
.add_argument("scalar", "float", "scalar input")
} // namespace op
} // namespace mxnet
#ifdef __CUDACC__
#include "elemwise_binary_scalar_op.cuh"
#endif
#endif // MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_SCALAR_OP_H_
|
DES_bs_b.c | /*
* This file is part of John the Ripper password cracker,
* Copyright (c) 1996-2001,2003,2010-2013,2015 by Solar Designer
*
* Addition of single DES encryption with no salt by
* Deepika Dutta Mishra <dipikadutta at gmail.com> in 2012, no
* rights reserved.
*/
#ifdef _MSC_VER
#undef _OPENMP
#endif
#include "arch.h"
#include "common.h"
#include "DES_bs.h"
#include "memdbg.h"
#if DES_BS_ASM && defined(_OPENMP) && defined(__GNUC__)
#warning Assembly code and OpenMP are both requested - will provide the former, but not the latter (for DES-based hashes). This may likely be corrected by enabling SIMD intrinsics with the C compiler (try adding -msse2 to OMPFLAGS).
#endif
#if !DES_BS_ASM
#define vzero (*(vtype *)&DES_bs_all.zero)
#if DES_bs_mt
#define vones (*(vtype *)&DES_bs_all_by_tnum(-1).ones)
#else
#define vones (*(vtype *)&DES_bs_all.ones)
#endif
#define DES_BS_VECTOR_LOOPS 0
#if defined(__ARM_NEON) && DES_BS_DEPTH == 64
#include <arm_neon.h>
typedef uint32x2_t vtype;
#define vst(dst, ofs, src) \
vst1_u32((uint32_t *)((DES_bs_vector *)&(dst) + (ofs)), (src))
#define vxorf(a, b) \
veor_u32((a), (b))
#define vnot(dst, a) \
(dst) = vmvn_u32((a))
#define vand(dst, a, b) \
(dst) = vand_u32((a), (b))
#define vor(dst, a, b) \
(dst) = vorr_u32((a), (b))
#define vandn(dst, a, b) \
(dst) = vbic_u32((a), (b))
#define vsel(dst, a, b, c) \
(dst) = vbsl_u32((c), (b), (a))
#if 0
#define vshl1(dst, src) \
(dst) = vadd_u32((src), (src))
#endif
#define vshl(dst, src, shift) \
(dst) = vshl_n_u32((src), (shift))
#define vshr(dst, src, shift) \
(dst) = vshr_n_u32((src), (shift))
#elif defined(__ARM_NEON) && ARCH_BITS == 32 && DES_BS_DEPTH == 96
#include <arm_neon.h>
typedef struct {
uint32x2_t f;
unsigned ARCH_WORD g;
} vtype;
#define vst(dst, ofs, src) \
vst1_u32( \
(uint32_t *)&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->f, \
(src).f); \
((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g = (src).g
#define vxor(dst, a, b) \
(dst).f = veor_u32((a).f, (b).f); \
(dst).g = (a).g ^ (b).g
#define vnot(dst, a) \
(dst).f = vmvn_u32((a).f); \
(dst).g = ~(a).g
#define vand(dst, a, b) \
(dst).f = vand_u32((a).f, (b).f); \
(dst).g = (a).g & (b).g
#define vor(dst, a, b) \
(dst).f = vorr_u32((a).f, (b).f); \
(dst).g = (a).g | (b).g
#define vandn(dst, a, b) \
(dst).f = vbic_u32((a).f, (b).f); \
(dst).g = (a).g & ~(b).g
#define vsel(dst, a, b, c) \
(dst).f = vbsl_u32((c).f, (b).f, (a).f); \
(dst).g = (((a).g & ~(c).g) ^ ((b).g & (c).g))
#elif defined(__ARM_NEON) && DES_BS_DEPTH == 128 && defined(DES_BS_2X64)
#include <arm_neon.h>
typedef struct {
uint32x2_t f, g;
} vtype;
#define vst(dst, ofs, src) \
vst1_u32( \
(uint32_t *)&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->f, \
(src).f); \
vst1_u32( \
(uint32_t *)&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g, \
(src).g)
#define vxor(dst, a, b) \
(dst).f = veor_u32((a).f, (b).f); \
(dst).g = veor_u32((a).g, (b).g)
#define vnot(dst, a) \
(dst).f = vmvn_u32((a).f); \
(dst).g = vmvn_u32((a).g)
#define vand(dst, a, b) \
(dst).f = vand_u32((a).f, (b).f); \
(dst).g = vand_u32((a).g, (b).g)
#define vor(dst, a, b) \
(dst).f = vorr_u32((a).f, (b).f); \
(dst).g = vorr_u32((a).g, (b).g)
#define vandn(dst, a, b) \
(dst).f = vbic_u32((a).f, (b).f); \
(dst).g = vbic_u32((a).g, (b).g)
#define vsel(dst, a, b, c) \
(dst).f = vbsl_u32((c).f, (b).f, (a).f); \
(dst).g = vbsl_u32((c).g, (b).g, (a).g)
#elif defined(__ARM_NEON) && DES_BS_DEPTH == 128
#include <arm_neon.h>
typedef uint32x4_t vtype;
#define vst(dst, ofs, src) \
vst1q_u32((uint32_t *)((DES_bs_vector *)&(dst) + (ofs)), (src))
#define vxorf(a, b) \
veorq_u32((a), (b))
#define vnot(dst, a) \
(dst) = vmvnq_u32((a))
#define vand(dst, a, b) \
(dst) = vandq_u32((a), (b))
#define vor(dst, a, b) \
(dst) = vorrq_u32((a), (b))
#define vandn(dst, a, b) \
(dst) = vbicq_u32((a), (b))
#define vsel(dst, a, b, c) \
(dst) = vbslq_u32((c), (b), (a))
#if 0
#define vshl1(dst, src) \
(dst) = vaddq_u32((src), (src))
#endif
#define vshl(dst, src, shift) \
(dst) = vshlq_n_u32((src), (shift))
#define vshr(dst, src, shift) \
(dst) = vshrq_n_u32((src), (shift))
#elif defined(__ARM_NEON) && \
((ARCH_BITS == 64 && DES_BS_DEPTH == 192) || \
(ARCH_BITS == 32 && DES_BS_DEPTH == 160))
#include <arm_neon.h>
typedef struct {
uint32x4_t f;
unsigned ARCH_WORD g;
} vtype;
#define vst(dst, ofs, src) \
vst1q_u32( \
(uint32_t *)&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->f, \
(src).f); \
((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g = (src).g
#define vxor(dst, a, b) \
(dst).f = veorq_u32((a).f, (b).f); \
(dst).g = (a).g ^ (b).g
#define vnot(dst, a) \
(dst).f = vmvnq_u32((a).f); \
(dst).g = ~(a).g
#define vand(dst, a, b) \
(dst).f = vandq_u32((a).f, (b).f); \
(dst).g = (a).g & (b).g
#define vor(dst, a, b) \
(dst).f = vorrq_u32((a).f, (b).f); \
(dst).g = (a).g | (b).g
#define vandn(dst, a, b) \
(dst).f = vbicq_u32((a).f, (b).f); \
(dst).g = (a).g & ~(b).g
#define vsel(dst, a, b, c) \
(dst).f = vbslq_u32((c).f, (b).f, (a).f); \
(dst).g = (((a).g & ~(c).g) ^ ((b).g & (c).g))
#elif defined(__ARM_NEON) && DES_BS_DEPTH == 256
#include <arm_neon.h>
typedef struct {
uint32x4_t f, g;
} vtype;
#define vst(dst, ofs, src) \
vst1q_u32( \
(uint32_t *)&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->f, \
(src).f); \
vst1q_u32( \
(uint32_t *)&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g, \
(src).g)
#define vxor(dst, a, b) \
(dst).f = veorq_u32((a).f, (b).f); \
(dst).g = veorq_u32((a).g, (b).g)
#define vnot(dst, a) \
(dst).f = vmvnq_u32((a).f); \
(dst).g = vmvnq_u32((a).g)
#define vand(dst, a, b) \
(dst).f = vandq_u32((a).f, (b).f); \
(dst).g = vandq_u32((a).g, (b).g)
#define vor(dst, a, b) \
(dst).f = vorrq_u32((a).f, (b).f); \
(dst).g = vorrq_u32((a).g, (b).g)
#define vandn(dst, a, b) \
(dst).f = vbicq_u32((a).f, (b).f); \
(dst).g = vbicq_u32((a).g, (b).g)
#define vsel(dst, a, b, c) \
(dst).f = vbslq_u32((c).f, (b).f, (a).f); \
(dst).g = vbslq_u32((c).g, (b).g, (a).g)
#elif defined(__ALTIVEC__) && DES_BS_DEPTH == 128
#ifdef __linux__
#include <altivec.h>
#endif
typedef vector signed int vtype;
#define vst(dst, ofs, src) \
vec_st((src), (ofs) * sizeof(DES_bs_vector), (dst))
#define vxorf(a, b) \
vec_xor((a), (b))
#define vnot(dst, a) \
(dst) = vec_nor((a), (a))
#define vand(dst, a, b) \
(dst) = vec_and((a), (b))
#define vor(dst, a, b) \
(dst) = vec_or((a), (b))
#define vandn(dst, a, b) \
(dst) = vec_andc((a), (b))
#define vsel(dst, a, b, c) \
(dst) = vec_sel((a), (b), (vector bool int)(c))
#elif defined(__ALTIVEC__) && \
((ARCH_BITS == 64 && DES_BS_DEPTH == 192) || \
(ARCH_BITS == 32 && DES_BS_DEPTH == 160))
#ifdef __linux__
#include <altivec.h>
#endif
typedef struct {
vector signed int f;
unsigned ARCH_WORD g;
} vtype;
#define vst(dst, ofs, src) \
vec_st((src).f, (ofs) * sizeof(DES_bs_vector), ((vtype *)&(dst))->f); \
((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g = (src).g
#define vxor(dst, a, b) \
(dst).f = vec_xor((a).f, (b).f); \
(dst).g = (a).g ^ (b).g
#define vnot(dst, a) \
(dst).f = vec_nor((a).f, (a).f); \
(dst).g = ~(a).g
#define vand(dst, a, b) \
(dst).f = vec_and((a).f, (b).f); \
(dst).g = (a).g & (b).g
#define vor(dst, a, b) \
(dst).f = vec_or((a).f, (b).f); \
(dst).g = (a).g | (b).g
#define vandn(dst, a, b) \
(dst).f = vec_andc((a).f, (b).f); \
(dst).g = (a).g & ~(b).g
#define vsel(dst, a, b, c) \
(dst).f = vec_sel((a).f, (b).f, (vector bool int)(c).f); \
(dst).g = (((a).g & ~(c).g) ^ ((b).g & (c).g))
#elif defined(__ALTIVEC__) && DES_BS_DEPTH == 256
#ifdef __linux__
#include <altivec.h>
#endif
typedef struct {
vector signed int f, g;
} vtype;
#define vst(dst, ofs, src) \
vec_st((src).f, (ofs) * sizeof(DES_bs_vector), ((vtype *)&(dst))->f); \
vec_st((src).g, (ofs) * sizeof(DES_bs_vector), ((vtype *)&(dst))->g)
#define vxor(dst, a, b) \
(dst).f = vec_xor((a).f, (b).f); \
(dst).g = vec_xor((a).g, (b).g)
#define vnot(dst, a) \
(dst).f = vec_nor((a).f, (a).f); \
(dst).g = vec_nor((a).g, (a).g)
#define vand(dst, a, b) \
(dst).f = vec_and((a).f, (b).f); \
(dst).g = vec_and((a).g, (b).g)
#define vor(dst, a, b) \
(dst).f = vec_or((a).f, (b).f); \
(dst).g = vec_or((a).g, (b).g)
#define vandn(dst, a, b) \
(dst).f = vec_andc((a).f, (b).f); \
(dst).g = vec_andc((a).g, (b).g)
#define vsel(dst, a, b, c) \
(dst).f = vec_sel((a).f, (b).f, (vector bool int)(c).f); \
(dst).g = vec_sel((a).g, (b).g, (vector bool int)(c).g)
#elif defined(__MIC__) && DES_BS_DEPTH == 512
#include <immintrin.h>
typedef __m512i vtype;
#define vst(dst, ofs, src) \
_mm512_store_epi32((vtype *)((DES_bs_vector *)&(dst) + (ofs)), (src))
#define vxorf(a, b) \
_mm512_xor_epi32((a), (b))
#define vand(dst, a, b) \
(dst) = _mm512_and_epi32((a), (b))
#define vor(dst, a, b) \
(dst) = _mm512_or_epi32((a), (b))
#define vandn(dst, a, b) \
(dst) = _mm512_andnot_epi32((b), (a))
#define vshl1(dst, src) \
(dst) = _mm512_add_epi32((src), (src))
#define vshl(dst, src, shift) \
(dst) = _mm512_slli_epi32((src), (shift))
#define vshr(dst, src, shift) \
(dst) = _mm512_srli_epi32((src), (shift))
#elif defined(__AVX__) && DES_BS_DEPTH == 256 && !defined(DES_BS_NO_AVX256)
#include <immintrin.h>
typedef __m256i vtype;
#define vst(dst, ofs, src) \
_mm256_store_si256((vtype *)((DES_bs_vector *)&(dst) + (ofs)), (src))
#define vxorf(a, b) \
_mm256_xor_si256((a), (b))
#define vand(dst, a, b) \
(dst) = _mm256_and_si256((a), (b))
#define vor(dst, a, b) \
(dst) = _mm256_or_si256((a), (b))
#define vandn(dst, a, b) \
(dst) = _mm256_andnot_si256((b), (a))
#ifdef __XOP__
/* This could be _mm256_cmov_si256(), but it does not exist (yet?) */
#define vsel(dst, a, b, c) \
(dst) = __builtin_ia32_vpcmov_v8sf256((b), (a), (c))
#endif
#define vshl1(dst, src) \
(dst) = _mm256_add_epi8((src), (src))
#define vshl(dst, src, shift) \
(dst) = _mm256_slli_epi64((src), (shift))
#define vshr(dst, src, shift) \
(dst) = _mm256_srli_epi64((src), (shift))
#elif defined(__AVX__) && DES_BS_DEPTH == 384 && !defined(DES_BS_NO_AVX128)
#include <immintrin.h>
#ifdef __XOP__
#include <x86intrin.h>
#endif
typedef struct {
__m256i f;
__m128i g;
} vtype;
#define vst(dst, ofs, src) \
_mm256_store_si256(&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->f, \
(src).f); \
_mm_store_si128(&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g, \
(src).g)
#define vxor(dst, a, b) \
(dst).f = _mm256_xor_si256((a).f, (b).f); \
(dst).g = _mm_xor_si128((a).g, (b).g)
#define vand(dst, a, b) \
(dst).f = _mm256_and_si256((a).f, (b).f); \
(dst).g = _mm_and_si128((a).g, (b).g)
#define vor(dst, a, b) \
(dst).f = _mm256_or_si256((a).f, (b).f); \
(dst).g = _mm_or_si128((a).g, (b).g)
#define vandn(dst, a, b) \
(dst).f = _mm256_andnot_si256((b).f, (a).f); \
(dst).g = _mm_andnot_si128((b).g, (a).g)
#ifdef __XOP__
/* This could be _mm256_cmov_ps(), but it does not exist (yet?) */
#define vsel(dst, a, b, c) \
(dst).f = __builtin_ia32_vpcmov_v8sf256((b).f, (a).f, (c).f); \
(dst).g = _mm_cmov_si128((b).g, (a).g, (c).g)
#endif
#define vshl(dst, src, shift) \
(dst).f = _mm256_slli_epi64((src).f, (shift)); \
(dst).g = _mm_slli_epi64((src).g, (shift))
#define vshr(dst, src, shift) \
(dst).f = _mm256_srli_epi64((src).f, (shift)); \
(dst).g = _mm_srli_epi64((src).g, (shift))
#elif defined(__AVX__) && DES_BS_DEPTH == 512
#include <immintrin.h>
typedef struct {
__m256i f, g;
} vtype;
#define vst(dst, ofs, src) \
_mm256_store_si256(&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->f, \
(src).f); \
_mm256_store_si256(&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g, \
(src).g)
#define vxor(dst, a, b) \
(dst).f = _mm256_xor_si256((a).f, (b).f); \
(dst).g = _mm256_xor_si256((a).g, (b).g)
#define vand(dst, a, b) \
(dst).f = _mm256_and_si256((a).f, (b).f); \
(dst).g = _mm256_and_si256((a).g, (b).g)
#define vor(dst, a, b) \
(dst).f = _mm256_or_si256((a).f, (b).f); \
(dst).g = _mm256_or_si256((a).g, (b).g)
#define vandn(dst, a, b) \
(dst).f = _mm256_andnot_si256((b).f, (a).f); \
(dst).g = _mm256_andnot_si256((b).g, (a).g)
#ifdef __XOP__
/* This could be _mm256_cmov_ps(), but it does not exist (yet?) */
#define vsel(dst, a, b, c) \
(dst).f = __builtin_ia32_vpcmov_v8sf256((b).f, (a).f, (c).f); \
(dst).g = __builtin_ia32_vpcmov_v8sf256((b).g, (a).g, (c).g)
#endif
#define vshl(dst, src, shift) \
(dst).f = _mm256_slli_epi64((src).f, (shift)); \
(dst).g = _mm256_slli_epi64((src).g, (shift))
#define vshr(dst, src, shift) \
(dst).f = _mm256_srli_epi64((src).f, (shift)); \
(dst).g = _mm256_srli_epi64((src).g, (shift))
#elif defined(__AVX__) && defined(__MMX__) && DES_BS_DEPTH == 320 && \
!defined(DES_BS_NO_MMX)
#include <immintrin.h>
#include <mmintrin.h>
typedef struct {
__m256i f;
__m64 g;
} vtype;
#define vst(dst, ofs, src) \
_mm256_store_si256(&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->f, \
(src).f); \
((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g = (src).g
#define vxor(dst, a, b) \
(dst).f = _mm256_xor_si256((a).f, (b).f); \
(dst).g = _mm_xor_si64((a).g, (b).g)
#define vand(dst, a, b) \
(dst).f = _mm256_and_si256((a).f, (b).f); \
(dst).g = _mm_and_si64((a).g, (b).g)
#define vor(dst, a, b) \
(dst).f = _mm256_or_si256((a).f, (b).f); \
(dst).g = _mm_or_si64((a).g, (b).g)
#define vandn(dst, a, b) \
(dst).f = _mm256_andnot_si256((b).f, (a).f); \
(dst).g = _mm_andnot_si64((b).g, (a).g)
#define vshl(dst, src, shift) \
(dst).f = _mm256_slli_epi64((src).f, (shift)); \
(dst).g = _mm_slli_si64((src).g, (shift))
#define vshr(dst, src, shift) \
(dst).f = _mm256_srli_epi64((src).f, (shift)); \
(dst).g = _mm_srli_si64((src).g, (shift))
#elif defined(__AVX__) && \
((ARCH_BITS == 64 && DES_BS_DEPTH == 320) || \
(ARCH_BITS == 32 && DES_BS_DEPTH == 288))
#include <immintrin.h>
#include <mmintrin.h>
typedef struct {
__m256i f;
unsigned ARCH_WORD g;
} vtype;
#define vst(dst, ofs, src) \
_mm256_store_si256(&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->f, \
(src).f); \
((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g = (src).g
#define vxor(dst, a, b) \
(dst).f = _mm256_xor_si256((a).f, (b).f); \
(dst).g = (a).g ^ (b).g
#define vnot(dst, a) \
(dst).f = _mm256_xor_si256((a).f, vones.f); \
(dst).g = ~(a).g
#define vand(dst, a, b) \
(dst).f = _mm256_and_si256((a).f, (b).f); \
(dst).g = (a).g & (b).g
#define vor(dst, a, b) \
(dst).f = _mm256_or_si256((a).f, (b).f); \
(dst).g = (a).g | (b).g
#define vandn(dst, a, b) \
(dst).f = _mm256_andnot_si256((b).f, (a).f); \
(dst).g = (a).g & ~(b).g
#define vshl(dst, src, shift) \
(dst).f = _mm256_slli_epi64((src).f, (shift)); \
(dst).g = (src).g << (shift)
#define vshr(dst, src, shift) \
(dst).f = _mm256_srli_epi64((src).f, (shift)); \
(dst).g = (src).g >> (shift)
#elif defined(__AVX__) && defined(__MMX__) && \
((ARCH_BITS == 64 && DES_BS_DEPTH == 384) || \
(ARCH_BITS == 32 && DES_BS_DEPTH == 352))
#include <immintrin.h>
#include <mmintrin.h>
typedef struct {
__m256i f;
__m64 g;
unsigned ARCH_WORD h;
} vtype;
#define vst(dst, ofs, src) \
_mm256_store_si256(&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->f, \
(src).f); \
((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g = (src).g; \
((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->h = (src).h
#define vxor(dst, a, b) \
(dst).f = _mm256_xor_si256((a).f, (b).f); \
(dst).g = _mm_xor_si64((a).g, (b).g); \
(dst).h = (a).h ^ (b).h
#define vnot(dst, a) \
(dst).f = _mm256_xor_si256((a).f, vones.f); \
(dst).g = _mm_xor_si64((a).g, vones.g); \
(dst).h = ~(a).h
#define vand(dst, a, b) \
(dst).f = _mm256_and_si256((a).f, (b).f); \
(dst).g = _mm_and_si64((a).g, (b).g); \
(dst).h = (a).h & (b).h
#define vor(dst, a, b) \
(dst).f = _mm256_or_si256((a).f, (b).f); \
(dst).g = _mm_or_si64((a).g, (b).g); \
(dst).h = (a).h | (b).h
#define vandn(dst, a, b) \
(dst).f = _mm256_andnot_si256((b).f, (a).f); \
(dst).g = _mm_andnot_si64((b).g, (a).g); \
(dst).h = (a).h & ~(b).h
#define vshl(dst, src, shift) \
(dst).f = _mm256_slli_epi64((src).f, (shift)); \
(dst).g = _mm_slli_si64((src).g, (shift)); \
(dst).h = (src).h << (shift)
#define vshr(dst, src, shift) \
(dst).f = _mm256_srli_epi64((src).f, (shift)); \
(dst).g = _mm_srli_si64((src).g, (shift)); \
(dst).h = (src).h >> (shift)
#elif defined(__SSE2__) && DES_BS_DEPTH == 128
#ifdef __AVX__
#include <immintrin.h>
#ifdef __XOP__
#include <x86intrin.h>
#endif
#else
#include <emmintrin.h>
#endif
typedef __m128i vtype;
#define vst(dst, ofs, src) \
_mm_store_si128((vtype *)((DES_bs_vector *)&(dst) + (ofs)), (src))
#define vxorf(a, b) \
_mm_xor_si128((a), (b))
#define vand(dst, a, b) \
(dst) = _mm_and_si128((a), (b))
#define vor(dst, a, b) \
(dst) = _mm_or_si128((a), (b))
#define vandn(dst, a, b) \
(dst) = _mm_andnot_si128((b), (a))
#ifdef __XOP__
#define vsel(dst, a, b, c) \
(dst) = _mm_cmov_si128((b), (a), (c))
#else
#define vsel(dst, a, b, c) \
(dst) = _mm_xor_si128(_mm_andnot_si128((c), (a)), \
_mm_and_si128((c), (b)))
#endif
#define vshl1(dst, src) \
(dst) = _mm_add_epi8((src), (src))
#define vshl(dst, src, shift) \
(dst) = _mm_slli_epi64((src), (shift))
#define vshr(dst, src, shift) \
(dst) = _mm_srli_epi64((src), (shift))
#elif defined(__SSE2__) && DES_BS_DEPTH == 256 && defined(DES_BS_NO_MMX)
#ifdef __AVX__
#include <immintrin.h>
#ifdef __XOP__
#include <x86intrin.h>
#endif
#else
#include <emmintrin.h>
#endif
typedef struct {
__m128i f, g;
} vtype;
#define vst(dst, ofs, src) \
_mm_store_si128(&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->f, \
(src).f); \
_mm_store_si128(&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g, \
(src).g)
#define vxor(dst, a, b) \
(dst).f = _mm_xor_si128((a).f, (b).f); \
(dst).g = _mm_xor_si128((a).g, (b).g)
#define vand(dst, a, b) \
(dst).f = _mm_and_si128((a).f, (b).f); \
(dst).g = _mm_and_si128((a).g, (b).g)
#define vor(dst, a, b) \
(dst).f = _mm_or_si128((a).f, (b).f); \
(dst).g = _mm_or_si128((a).g, (b).g)
#define vandn(dst, a, b) \
(dst).f = _mm_andnot_si128((b).f, (a).f); \
(dst).g = _mm_andnot_si128((b).g, (a).g)
#ifdef __XOP__
#define vsel(dst, a, b, c) \
(dst).f = _mm_cmov_si128((b).f, (a).f, (c).f); \
(dst).g = _mm_cmov_si128((b).g, (a).g, (c).g)
#endif
#define vshl1(dst, src) \
(dst).f = _mm_add_epi8((src).f, (src).f); \
(dst).g = _mm_add_epi8((src).g, (src).g)
#define vshl(dst, src, shift) \
(dst).f = _mm_slli_epi64((src).f, (shift)); \
(dst).g = _mm_slli_epi64((src).g, (shift))
#define vshr(dst, src, shift) \
(dst).f = _mm_srli_epi64((src).f, (shift)); \
(dst).g = _mm_srli_epi64((src).g, (shift))
#elif defined(__SSE2__) && defined(__MMX__) && DES_BS_DEPTH == 192 && \
!defined(DES_BS_NO_MMX)
#include <emmintrin.h>
#include <mmintrin.h>
typedef struct {
__m128i f;
__m64 g;
} vtype;
#define vst(dst, ofs, src) \
_mm_store_si128(&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->f, \
(src).f); \
((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g = (src).g
#define vxor(dst, a, b) \
(dst).f = _mm_xor_si128((a).f, (b).f); \
(dst).g = _mm_xor_si64((a).g, (b).g)
#define vand(dst, a, b) \
(dst).f = _mm_and_si128((a).f, (b).f); \
(dst).g = _mm_and_si64((a).g, (b).g)
#define vor(dst, a, b) \
(dst).f = _mm_or_si128((a).f, (b).f); \
(dst).g = _mm_or_si64((a).g, (b).g)
#define vandn(dst, a, b) \
(dst).f = _mm_andnot_si128((b).f, (a).f); \
(dst).g = _mm_andnot_si64((b).g, (a).g)
#define vshl1(dst, src) \
(dst).f = _mm_add_epi8((src).f, (src).f); \
(dst).g = _mm_add_pi8((src).g, (src).g)
#define vshl(dst, src, shift) \
(dst).f = _mm_slli_epi64((src).f, (shift)); \
(dst).g = _mm_slli_si64((src).g, (shift))
#define vshr(dst, src, shift) \
(dst).f = _mm_srli_epi64((src).f, (shift)); \
(dst).g = _mm_srli_si64((src).g, (shift))
#elif defined(__SSE2__) && \
((ARCH_BITS == 64 && DES_BS_DEPTH == 192) || \
(ARCH_BITS == 32 && DES_BS_DEPTH == 160))
#include <emmintrin.h>
typedef struct {
__m128i f;
unsigned ARCH_WORD g;
} vtype;
#define vst(dst, ofs, src) \
_mm_store_si128(&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->f, \
(src).f); \
((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g = (src).g
#define vxor(dst, a, b) \
(dst).f = _mm_xor_si128((a).f, (b).f); \
(dst).g = (a).g ^ (b).g
#define vnot(dst, a) \
(dst).f = _mm_xor_si128((a).f, vones.f); \
(dst).g = ~(a).g
#define vand(dst, a, b) \
(dst).f = _mm_and_si128((a).f, (b).f); \
(dst).g = (a).g & (b).g
#define vor(dst, a, b) \
(dst).f = _mm_or_si128((a).f, (b).f); \
(dst).g = (a).g | (b).g
#define vandn(dst, a, b) \
(dst).f = _mm_andnot_si128((b).f, (a).f); \
(dst).g = (a).g & ~(b).g
#define vshl1(dst, src) \
(dst).f = _mm_add_epi8((src).f, (src).f); \
(dst).g = (src).g << 1
#define vshl(dst, src, shift) \
(dst).f = _mm_slli_epi64((src).f, (shift)); \
(dst).g = (src).g << (shift)
#define vshr(dst, src, shift) \
(dst).f = _mm_srli_epi64((src).f, (shift)); \
(dst).g = (src).g >> (shift)
#elif defined(__SSE2__) && defined(__MMX__) && \
((ARCH_BITS == 64 && DES_BS_DEPTH == 256) || \
(ARCH_BITS == 32 && DES_BS_DEPTH == 224))
#include <emmintrin.h>
#include <mmintrin.h>
typedef struct {
__m128i f;
__m64 g;
unsigned ARCH_WORD h;
} vtype;
#define vst(dst, ofs, src) \
_mm_store_si128(&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->f, \
(src).f); \
((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g = (src).g; \
((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->h = (src).h
#define vxor(dst, a, b) \
(dst).f = _mm_xor_si128((a).f, (b).f); \
(dst).g = _mm_xor_si64((a).g, (b).g); \
(dst).h = (a).h ^ (b).h
#define vnot(dst, a) \
(dst).f = _mm_xor_si128((a).f, vones.f); \
(dst).g = _mm_xor_si64((a).g, vones.g); \
(dst).h = ~(a).h
#define vand(dst, a, b) \
(dst).f = _mm_and_si128((a).f, (b).f); \
(dst).g = _mm_and_si64((a).g, (b).g); \
(dst).h = (a).h & (b).h
#define vor(dst, a, b) \
(dst).f = _mm_or_si128((a).f, (b).f); \
(dst).g = _mm_or_si64((a).g, (b).g); \
(dst).h = (a).h | (b).h
#define vandn(dst, a, b) \
(dst).f = _mm_andnot_si128((b).f, (a).f); \
(dst).g = _mm_andnot_si64((b).g, (a).g); \
(dst).h = (a).h & ~(b).h
#define vshl1(dst, src) \
(dst).f = _mm_add_epi8((src).f, (src).f); \
(dst).g = _mm_add_pi8((src).g, (src).g); \
(dst).h = (src).h << 1
#define vshl(dst, src, shift) \
(dst).f = _mm_slli_epi64((src).f, (shift)); \
(dst).g = _mm_slli_si64((src).g, (shift)); \
(dst).h = (src).h << (shift)
#define vshr(dst, src, shift) \
(dst).f = _mm_srli_epi64((src).f, (shift)); \
(dst).g = _mm_srli_si64((src).g, (shift)); \
(dst).h = (src).h >> (shift)
#elif defined(__MMX__) && ARCH_BITS != 64 && DES_BS_DEPTH == 64
#include <mmintrin.h>
typedef __m64 vtype;
#define vxorf(a, b) \
_mm_xor_si64((a), (b))
#define vand(dst, a, b) \
(dst) = _mm_and_si64((a), (b))
#define vor(dst, a, b) \
(dst) = _mm_or_si64((a), (b))
#define vandn(dst, a, b) \
(dst) = _mm_andnot_si64((b), (a))
#define vshl1(dst, src) \
(dst) = _mm_add_pi8((src), (src))
#define vshl(dst, src, shift) \
(dst) = _mm_slli_si64((src), (shift))
#define vshr(dst, src, shift) \
(dst) = _mm_srli_si64((src), (shift))
#elif defined(__MMX__) && ARCH_BITS == 32 && DES_BS_DEPTH == 96
#include <mmintrin.h>
typedef struct {
__m64 f;
unsigned ARCH_WORD g;
} vtype;
#define vst(dst, ofs, src) \
((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->f = (src).f; \
((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g = (src).g
#define vxor(dst, a, b) \
(dst).f = _mm_xor_si64((a).f, (b).f); \
(dst).g = (a).g ^ (b).g
#define vnot(dst, a) \
(dst).f = _mm_xor_si64((a).f, vones.f); \
(dst).g = ~(a).g
#define vand(dst, a, b) \
(dst).f = _mm_and_si64((a).f, (b).f); \
(dst).g = (a).g & (b).g
#define vor(dst, a, b) \
(dst).f = _mm_or_si64((a).f, (b).f); \
(dst).g = (a).g | (b).g
#define vandn(dst, a, b) \
(dst).f = _mm_andnot_si64((b).f, (a).f); \
(dst).g = (a).g & ~(b).g
#define vshl1(dst, src) \
(dst).f = _mm_add_pi8((src).f, (src).f); \
(dst).g = (src).g << 1
#define vshl(dst, src, shift) \
(dst).f = _mm_slli_si64((src).f, (shift)); \
(dst).g = (src).g << (shift)
#define vshr(dst, src, shift) \
(dst).f = _mm_srli_si64((src).f, (shift)); \
(dst).g = (src).g >> (shift)
#else
#if DES_BS_VECTOR
#undef DES_BS_VECTOR_LOOPS
#define DES_BS_VECTOR_LOOPS 1
#endif
typedef unsigned ARCH_WORD vtype;
#define vxorf(a, b) \
((a) ^ (b))
#define vnot(dst, a) \
(dst) = ~(a)
#define vand(dst, a, b) \
(dst) = (a) & (b)
#define vor(dst, a, b) \
(dst) = (a) | (b)
#define vandn(dst, a, b) \
(dst) = (a) & ~(b)
#define vsel(dst, a, b, c) \
(dst) = (((a) & ~(c)) ^ ((b) & (c)))
#define vshl(dst, src, shift) \
(dst) = (src) << (shift)
#define vshr(dst, src, shift) \
(dst) = (src) >> (shift)
/* Assume that 0 always fits in one load immediate instruction */
#undef vzero
#define vzero 0
/* Archs friendly to use of immediate values */
#if defined(__x86_64__) || defined(__i386__)
#undef vones
#define vones (~(vtype)0)
#endif
#endif
#ifndef vst
#define vst(dst, ofs, src) \
*((vtype *)((DES_bs_vector *)&(dst) + (ofs))) = (src)
#endif
#if !defined(vxor) && defined(vxorf)
#define vxor(dst, a, b) \
(dst) = vxorf((a), (b))
#endif
#if !defined(vxorf) && defined(vxor)
/*
* This requires gcc's "Statement Exprs" extension (also supported by a number
* of other C compilers).
*/
#define vxorf(a, b) \
({ vtype tmp; vxor(tmp, (a), (b)); tmp; })
#endif
#ifndef vnot
#define vnot(dst, a) \
vxor((dst), (a), vones)
#endif
#ifndef vshl1
#define vshl1(dst, src) \
vshl((dst), (src), 1)
#endif
#if !DES_BS_VECTOR_LOOPS && defined(vshl) && defined(vshr)
#define DES_BS_VECTOR_LOOPS_K 0
#define DEPTH_K
#define for_each_depth_k()
#define kvtype vtype
#define kvand vand
#define kvor vor
#define kvshl1 vshl1
#define kvshl vshl
#define kvshr vshr
#else
#if DES_BS_VECTOR
#define DES_BS_VECTOR_LOOPS_K 1
#define DEPTH_K [depth]
#define for_each_depth_k() \
for (depth = 0; depth < DES_BS_VECTOR; depth++)
#else
#define DES_BS_VECTOR_LOOPS_K 0
#endif
typedef unsigned ARCH_WORD kvtype;
#define kvand(dst, a, b) \
(dst) = (a) & (b)
#define kvor(dst, a, b) \
(dst) = (a) | (b)
#define kvshl1(dst, src) \
(dst) = (src) << 1
#define kvshl(dst, src, shift) \
(dst) = (src) << (shift)
#define kvshr(dst, src, shift) \
(dst) = (src) >> (shift)
#endif
#if !DES_BS_VECTOR || DES_BS_VECTOR_LOOPS_K
#ifdef __x86_64__
#define mask01 0x0101010101010101UL
#elif __i386__
#define mask01 0x01010101UL
#else
#undef mask01
#endif
#ifdef mask01
#define mask02 (mask01 << 1)
#define mask04 (mask01 << 2)
#define mask08 (mask01 << 3)
#define mask10 (mask01 << 4)
#define mask20 (mask01 << 5)
#define mask40 (mask01 << 6)
#define mask80 (mask01 << 7)
#endif
#endif
#ifndef mask01
#define mask01 (*(kvtype *)&DES_bs_all.masks[0])
#define mask02 (*(kvtype *)&DES_bs_all.masks[1])
#define mask04 (*(kvtype *)&DES_bs_all.masks[2])
#define mask08 (*(kvtype *)&DES_bs_all.masks[3])
#define mask10 (*(kvtype *)&DES_bs_all.masks[4])
#define mask20 (*(kvtype *)&DES_bs_all.masks[5])
#define mask40 (*(kvtype *)&DES_bs_all.masks[6])
#define mask80 (*(kvtype *)&DES_bs_all.masks[7])
#endif
#ifdef __i386__
/* register-starved */
#define LOAD_V \
kvtype v0 = *(kvtype *)&vp[0]; \
kvtype v4 = *(kvtype *)&vp[4];
#define v1 *(kvtype *)&vp[1]
#define v2 *(kvtype *)&vp[2]
#define v3 *(kvtype *)&vp[3]
#define v5 *(kvtype *)&vp[5]
#define v6 *(kvtype *)&vp[6]
#define v7 *(kvtype *)&vp[7]
#else
#define LOAD_V \
kvtype v0 = *(kvtype *)&vp[0]; \
kvtype v1 = *(kvtype *)&vp[1]; \
kvtype v2 = *(kvtype *)&vp[2]; \
kvtype v3 = *(kvtype *)&vp[3]; \
kvtype v4 = *(kvtype *)&vp[4]; \
kvtype v5 = *(kvtype *)&vp[5]; \
kvtype v6 = *(kvtype *)&vp[6]; \
kvtype v7 = *(kvtype *)&vp[7];
#endif
#define kvand_shl1_or(dst, src, mask) \
kvand(tmp, src, mask); \
kvshl1(tmp, tmp); \
kvor(dst, dst, tmp)
#define kvand_shl_or(dst, src, mask, shift) \
kvand(tmp, src, mask); \
kvshl(tmp, tmp, shift); \
kvor(dst, dst, tmp)
#define kvand_shl1(dst, src, mask) \
kvand(tmp, src, mask); \
kvshl1(dst, tmp)
#define kvand_or(dst, src, mask) \
kvand(tmp, src, mask); \
kvor(dst, dst, tmp)
#define kvand_shr_or(dst, src, mask, shift) \
kvand(tmp, src, mask); \
kvshr(tmp, tmp, shift); \
kvor(dst, dst, tmp)
#define kvand_shr(dst, src, mask, shift) \
kvand(tmp, src, mask); \
kvshr(dst, tmp, shift)
#define FINALIZE_NEXT_KEY_BIT_0 { \
kvtype m = mask01, va, vb, tmp; \
kvand(va, v0, m); \
kvand_shl1(vb, v1, m); \
kvand_shl_or(va, v2, m, 2); \
kvand_shl_or(vb, v3, m, 3); \
kvand_shl_or(va, v4, m, 4); \
kvand_shl_or(vb, v5, m, 5); \
kvand_shl_or(va, v6, m, 6); \
kvand_shl_or(vb, v7, m, 7); \
kvor(*(kvtype *)kp, va, vb); \
kp++; \
}
#define FINALIZE_NEXT_KEY_BIT_1 { \
kvtype m = mask02, va, vb, tmp; \
kvand_shr(va, v0, m, 1); \
kvand(vb, v1, m); \
kvand_shl1_or(va, v2, m); \
kvand_shl_or(vb, v3, m, 2); \
kvand_shl_or(va, v4, m, 3); \
kvand_shl_or(vb, v5, m, 4); \
kvand_shl_or(va, v6, m, 5); \
kvand_shl_or(vb, v7, m, 6); \
kvor(*(kvtype *)kp, va, vb); \
kp++; \
}
#define FINALIZE_NEXT_KEY_BIT_2 { \
kvtype m = mask04, va, vb, tmp; \
kvand_shr(va, v0, m, 2); \
kvand_shr(vb, v1, m, 1); \
kvand_or(va, v2, m); \
kvand_shl1_or(vb, v3, m); \
kvand_shl_or(va, v4, m, 2); \
kvand_shl_or(vb, v5, m, 3); \
kvand_shl_or(va, v6, m, 4); \
kvand_shl_or(vb, v7, m, 5); \
kvor(*(kvtype *)kp, va, vb); \
kp++; \
}
#define FINALIZE_NEXT_KEY_BIT_3 { \
kvtype m = mask08, va, vb, tmp; \
kvand_shr(va, v0, m, 3); \
kvand_shr(vb, v1, m, 2); \
kvand_shr_or(va, v2, m, 1); \
kvand_or(vb, v3, m); \
kvand_shl1_or(va, v4, m); \
kvand_shl_or(vb, v5, m, 2); \
kvand_shl_or(va, v6, m, 3); \
kvand_shl_or(vb, v7, m, 4); \
kvor(*(kvtype *)kp, va, vb); \
kp++; \
}
#define FINALIZE_NEXT_KEY_BIT_4 { \
kvtype m = mask10, va, vb, tmp; \
kvand_shr(va, v0, m, 4); \
kvand_shr(vb, v1, m, 3); \
kvand_shr_or(va, v2, m, 2); \
kvand_shr_or(vb, v3, m, 1); \
kvand_or(va, v4, m); \
kvand_shl1_or(vb, v5, m); \
kvand_shl_or(va, v6, m, 2); \
kvand_shl_or(vb, v7, m, 3); \
kvor(*(kvtype *)kp, va, vb); \
kp++; \
}
#define FINALIZE_NEXT_KEY_BIT_5 { \
kvtype m = mask20, va, vb, tmp; \
kvand_shr(va, v0, m, 5); \
kvand_shr(vb, v1, m, 4); \
kvand_shr_or(va, v2, m, 3); \
kvand_shr_or(vb, v3, m, 2); \
kvand_shr_or(va, v4, m, 1); \
kvand_or(vb, v5, m); \
kvand_shl1_or(va, v6, m); \
kvand_shl_or(vb, v7, m, 2); \
kvor(*(kvtype *)kp, va, vb); \
kp++; \
}
#define FINALIZE_NEXT_KEY_BIT_6 { \
kvtype m = mask40, va, vb, tmp; \
kvand_shr(va, v0, m, 6); \
kvand_shr(vb, v1, m, 5); \
kvand_shr_or(va, v2, m, 4); \
kvand_shr_or(vb, v3, m, 3); \
kvand_shr_or(va, v4, m, 2); \
kvand_shr_or(vb, v5, m, 1); \
kvand_or(va, v6, m); \
kvand_shl1_or(vb, v7, m); \
kvor(*(kvtype *)kp, va, vb); \
kp++; \
}
#define FINALIZE_NEXT_KEY_BIT_7 { \
kvtype m = mask80, va, vb, tmp; \
kvand_shr(va, v0, m, 7); \
kvand_shr(vb, v1, m, 6); \
kvand_shr_or(va, v2, m, 5); \
kvand_shr_or(vb, v3, m, 4); \
kvand_shr_or(va, v4, m, 3); \
kvand_shr_or(vb, v5, m, 2); \
kvand_shr_or(va, v6, m, 1); \
kvand_or(vb, v7, m); \
kvor(*(kvtype *)kp, va, vb); \
kp++; \
}
#if DES_bs_mt
static MAYBE_INLINE void DES_bs_finalize_keys(int t)
#else
static MAYBE_INLINE void DES_bs_finalize_keys(void)
#endif
{
#if DES_BS_VECTOR_LOOPS_K
int depth;
#endif
for_each_depth_k() {
DES_bs_vector *kp = (DES_bs_vector *)&DES_bs_all.K[0] DEPTH_K;
int ic;
for (ic = 0; ic < 8; ic++) {
DES_bs_vector *vp =
(DES_bs_vector *)&DES_bs_all.xkeys.v[ic][0] DEPTH_K;
LOAD_V
FINALIZE_NEXT_KEY_BIT_0
FINALIZE_NEXT_KEY_BIT_1
FINALIZE_NEXT_KEY_BIT_2
FINALIZE_NEXT_KEY_BIT_3
FINALIZE_NEXT_KEY_BIT_4
FINALIZE_NEXT_KEY_BIT_5
FINALIZE_NEXT_KEY_BIT_6
}
}
#if DES_BS_EXPAND
{
int index;
for (index = 0; index < 0x300; index++)
for_each_depth_k() {
#if DES_BS_VECTOR_LOOPS_K
DES_bs_all.KS.v[index] DEPTH_K =
DES_bs_all.KSp[index] DEPTH_K;
#else
vst(*(kvtype *)&DES_bs_all.KS.v[index], 0,
*(kvtype *)DES_bs_all.KSp[index]);
#endif
}
}
#endif
}
#endif
#if DES_bs_mt
MAYBE_INLINE void DES_bs_set_salt_for_thread(int t, unsigned int salt)
#else
void DES_bs_set_salt(ARCH_WORD salt)
#endif
{
unsigned int new = salt;
unsigned int old = DES_bs_all.salt;
int dst;
DES_bs_all.salt = new;
for (dst = 0; dst < 24; dst++) {
if ((new ^ old) & 1) {
DES_bs_vector *sp1, *sp2;
int src1 = dst;
int src2 = dst + 24;
if (new & 1) {
src1 = src2;
src2 = dst;
}
sp1 = DES_bs_all.Ens[src1];
sp2 = DES_bs_all.Ens[src2];
DES_bs_all.E.E[dst] = (ARCH_WORD *)sp1;
DES_bs_all.E.E[dst + 24] = (ARCH_WORD *)sp2;
DES_bs_all.E.E[dst + 48] = (ARCH_WORD *)(sp1 + 32);
DES_bs_all.E.E[dst + 72] = (ARCH_WORD *)(sp2 + 32);
}
new >>= 1;
old >>= 1;
if (new == old)
break;
}
}
#if !DES_BS_ASM
/* Include the S-boxes here so that the compiler can inline them */
#if DES_BS == 3
#include "sboxes-s.c"
#elif DES_BS == 2
#include "sboxes.c"
#else
#undef andn
#include "nonstd.c"
#endif
#define b DES_bs_all.B
#define e DES_bs_all.E.E
#if DES_BS_VECTOR_LOOPS
#define kd [depth]
#define bd [depth]
#define ed [depth]
#define DEPTH [depth]
#define for_each_depth() \
for (depth = 0; depth < DES_BS_VECTOR; depth++)
#else
#if DES_BS_EXPAND
#define kd
#else
#define kd [0]
#endif
#define bd
#define ed [0]
#define DEPTH
#define for_each_depth()
#endif
#define DES_bs_clear_block_8(i) \
for_each_depth() { \
vst(b[i] bd, 0, zero); \
vst(b[i] bd, 1, zero); \
vst(b[i] bd, 2, zero); \
vst(b[i] bd, 3, zero); \
vst(b[i] bd, 4, zero); \
vst(b[i] bd, 5, zero); \
vst(b[i] bd, 6, zero); \
vst(b[i] bd, 7, zero); \
}
#define DES_bs_clear_block \
DES_bs_clear_block_8(0); \
DES_bs_clear_block_8(8); \
DES_bs_clear_block_8(16); \
DES_bs_clear_block_8(24); \
DES_bs_clear_block_8(32); \
DES_bs_clear_block_8(40); \
DES_bs_clear_block_8(48); \
DES_bs_clear_block_8(56);
#define DES_bs_set_block_8(i, v0, v1, v2, v3, v4, v5, v6, v7) \
for_each_depth() { \
vst(b[i] bd, 0, v0); \
vst(b[i] bd, 1, v1); \
vst(b[i] bd, 2, v2); \
vst(b[i] bd, 3, v3); \
vst(b[i] bd, 4, v4); \
vst(b[i] bd, 5, v5); \
vst(b[i] bd, 6, v6); \
vst(b[i] bd, 7, v7); \
}
#define x(p) vxorf(*(vtype *)&e[p] ed, *(vtype *)&k[p] kd)
#define y(p, q) vxorf(*(vtype *)&b[p] bd, *(vtype *)&k[q] kd)
#define z(r) ((vtype *)&b[r] bd)
void DES_bs_crypt_25(int keys_count)
{
#if DES_bs_mt
int t, n = (keys_count + (DES_BS_DEPTH - 1)) / DES_BS_DEPTH;
#endif
#ifdef _OPENMP
#pragma omp parallel for default(none) private(t) shared(n, DES_bs_all_p, keys_count)
#endif
for_each_t(n) {
#if DES_BS_EXPAND
DES_bs_vector *k;
#else
ARCH_WORD **k;
#endif
int iterations, rounds_and_swapped;
#if DES_BS_VECTOR_LOOPS
int depth;
#endif
if (DES_bs_all.keys_changed)
goto finalize_keys;
body:
#if DES_bs_mt
DES_bs_set_salt_for_thread(t, DES_bs_all_by_tnum(-1).salt);
#endif
{
vtype zero = vzero;
DES_bs_clear_block
}
#if DES_BS_EXPAND
k = DES_bs_all.KS.v;
#else
k = DES_bs_all.KS.p;
#endif
rounds_and_swapped = 8;
iterations = 25;
start:
for_each_depth()
s1(x(0), x(1), x(2), x(3), x(4), x(5),
z(40), z(48), z(54), z(62));
for_each_depth()
s2(x(6), x(7), x(8), x(9), x(10), x(11),
z(44), z(59), z(33), z(49));
for_each_depth()
s3(y(7, 12), y(8, 13), y(9, 14),
y(10, 15), y(11, 16), y(12, 17),
z(55), z(47), z(61), z(37));
for_each_depth()
s4(y(11, 18), y(12, 19), y(13, 20),
y(14, 21), y(15, 22), y(16, 23),
z(57), z(51), z(41), z(32));
for_each_depth()
s5(x(24), x(25), x(26), x(27), x(28), x(29),
z(39), z(45), z(56), z(34));
for_each_depth()
s6(x(30), x(31), x(32), x(33), x(34), x(35),
z(35), z(60), z(42), z(50));
for_each_depth()
s7(y(23, 36), y(24, 37), y(25, 38),
y(26, 39), y(27, 40), y(28, 41),
z(63), z(43), z(53), z(38));
for_each_depth()
s8(y(27, 42), y(28, 43), y(29, 44),
y(30, 45), y(31, 46), y(0, 47),
z(36), z(58), z(46), z(52));
if (rounds_and_swapped == 0x100) goto next;
swap:
for_each_depth()
s1(x(48), x(49), x(50), x(51), x(52), x(53),
z(8), z(16), z(22), z(30));
for_each_depth()
s2(x(54), x(55), x(56), x(57), x(58), x(59),
z(12), z(27), z(1), z(17));
for_each_depth()
s3(y(39, 60), y(40, 61), y(41, 62),
y(42, 63), y(43, 64), y(44, 65),
z(23), z(15), z(29), z(5));
for_each_depth()
s4(y(43, 66), y(44, 67), y(45, 68),
y(46, 69), y(47, 70), y(48, 71),
z(25), z(19), z(9), z(0));
for_each_depth()
s5(x(72), x(73), x(74), x(75), x(76), x(77),
z(7), z(13), z(24), z(2));
for_each_depth()
s6(x(78), x(79), x(80), x(81), x(82), x(83),
z(3), z(28), z(10), z(18));
for_each_depth()
s7(y(55, 84), y(56, 85), y(57, 86),
y(58, 87), y(59, 88), y(60, 89),
z(31), z(11), z(21), z(6));
for_each_depth()
s8(y(59, 90), y(60, 91), y(61, 92),
y(62, 93), y(63, 94), y(32, 95),
z(4), z(26), z(14), z(20));
k += 96;
if (--rounds_and_swapped) goto start;
k -= (0x300 + 48);
rounds_and_swapped = 0x108;
if (--iterations) goto swap;
#if DES_bs_mt
continue;
#else
return;
#endif
next:
k -= (0x300 - 48);
rounds_and_swapped = 8;
iterations--;
goto start;
finalize_keys:
DES_bs_all.keys_changed = 0;
#if DES_bs_mt
DES_bs_finalize_keys(t);
#else
DES_bs_finalize_keys();
#endif
goto body;
}
}
void DES_bs_crypt(int count, int keys_count)
{
#if DES_bs_mt
int t, n = (keys_count + (DES_BS_DEPTH - 1)) / DES_BS_DEPTH;
#endif
#ifdef _OPENMP
#pragma omp parallel for default(none) private(t) shared(n, DES_bs_all_p, count, keys_count)
#endif
for_each_t(n) {
#if DES_BS_EXPAND
DES_bs_vector *k;
#else
ARCH_WORD **k;
#endif
int iterations, rounds_and_swapped;
#if DES_BS_VECTOR_LOOPS
int depth;
#endif
if (DES_bs_all.keys_changed)
goto finalize_keys;
body:
#if DES_bs_mt
DES_bs_set_salt_for_thread(t, DES_bs_all_by_tnum(-1).salt);
#endif
{
vtype zero = vzero;
DES_bs_clear_block
}
#if DES_BS_EXPAND
k = DES_bs_all.KS.v;
#else
k = DES_bs_all.KS.p;
#endif
rounds_and_swapped = 8;
iterations = count;
start:
for_each_depth()
s1(x(0), x(1), x(2), x(3), x(4), x(5),
z(40), z(48), z(54), z(62));
for_each_depth()
s2(x(6), x(7), x(8), x(9), x(10), x(11),
z(44), z(59), z(33), z(49));
for_each_depth()
s3(x(12), x(13), x(14), x(15), x(16), x(17),
z(55), z(47), z(61), z(37));
for_each_depth()
s4(x(18), x(19), x(20), x(21), x(22), x(23),
z(57), z(51), z(41), z(32));
for_each_depth()
s5(x(24), x(25), x(26), x(27), x(28), x(29),
z(39), z(45), z(56), z(34));
for_each_depth()
s6(x(30), x(31), x(32), x(33), x(34), x(35),
z(35), z(60), z(42), z(50));
for_each_depth()
s7(x(36), x(37), x(38), x(39), x(40), x(41),
z(63), z(43), z(53), z(38));
for_each_depth()
s8(x(42), x(43), x(44), x(45), x(46), x(47),
z(36), z(58), z(46), z(52));
if (rounds_and_swapped == 0x100) goto next;
swap:
for_each_depth()
s1(x(48), x(49), x(50), x(51), x(52), x(53),
z(8), z(16), z(22), z(30));
for_each_depth()
s2(x(54), x(55), x(56), x(57), x(58), x(59),
z(12), z(27), z(1), z(17));
for_each_depth()
s3(x(60), x(61), x(62), x(63), x(64), x(65),
z(23), z(15), z(29), z(5));
for_each_depth()
s4(x(66), x(67), x(68), x(69), x(70), x(71),
z(25), z(19), z(9), z(0));
for_each_depth()
s5(x(72), x(73), x(74), x(75), x(76), x(77),
z(7), z(13), z(24), z(2));
for_each_depth()
s6(x(78), x(79), x(80), x(81), x(82), x(83),
z(3), z(28), z(10), z(18));
for_each_depth()
s7(x(84), x(85), x(86), x(87), x(88), x(89),
z(31), z(11), z(21), z(6));
for_each_depth()
s8(x(90), x(91), x(92), x(93), x(94), x(95),
z(4), z(26), z(14), z(20));
k += 96;
if (--rounds_and_swapped) goto start;
k -= (0x300 + 48);
rounds_and_swapped = 0x108;
if (--iterations) goto swap;
#if DES_bs_mt
continue;
#else
return;
#endif
next:
k -= (0x300 - 48);
rounds_and_swapped = 8;
if (--iterations) goto start;
#if DES_bs_mt
continue;
#else
return;
#endif
finalize_keys:
DES_bs_all.keys_changed = 0;
#if DES_bs_mt
DES_bs_finalize_keys(t);
#else
DES_bs_finalize_keys();
#endif
goto body;
}
}
#undef x
#if DES_bs_mt
static MAYBE_INLINE void DES_bs_finalize_keys_LM(int t)
#else
static MAYBE_INLINE void DES_bs_finalize_keys_LM(void)
#endif
{
#if DES_BS_VECTOR_LOOPS_K
int depth;
#endif
for_each_depth_k() {
DES_bs_vector *kp = (DES_bs_vector *)&DES_bs_all.K[0] DEPTH_K;
int ic;
for (ic = 0; ic < 7; ic++) {
DES_bs_vector *vp =
(DES_bs_vector *)&DES_bs_all.xkeys.v[ic][0] DEPTH_K;
LOAD_V
FINALIZE_NEXT_KEY_BIT_0
FINALIZE_NEXT_KEY_BIT_1
FINALIZE_NEXT_KEY_BIT_2
FINALIZE_NEXT_KEY_BIT_3
FINALIZE_NEXT_KEY_BIT_4
FINALIZE_NEXT_KEY_BIT_5
FINALIZE_NEXT_KEY_BIT_6
FINALIZE_NEXT_KEY_BIT_7
}
}
}
#undef kd
#if DES_BS_VECTOR_LOOPS
#define kd [depth]
#else
#define kd [0]
#endif
int DES_bs_crypt_LM(int *pcount, struct db_salt *salt)
{
int keys_count = *pcount;
#if DES_bs_mt
int t, n = (keys_count + (DES_BS_DEPTH - 1)) / DES_BS_DEPTH;
#endif
#ifdef _OPENMP
#pragma omp parallel for default(none) private(t) shared(n, DES_bs_all_p, keys_count)
#endif
for_each_t(n) {
ARCH_WORD **k;
int rounds;
#if DES_BS_VECTOR_LOOPS
int depth;
#endif
{
vtype z = vzero, o = vones;
DES_bs_set_block_8(0, z, z, z, z, z, z, z, z);
DES_bs_set_block_8(8, o, o, o, z, o, z, z, z);
DES_bs_set_block_8(16, z, z, z, z, z, z, z, o);
DES_bs_set_block_8(24, z, z, o, z, z, o, o, o);
DES_bs_set_block_8(32, z, z, z, o, z, o, o, o);
DES_bs_set_block_8(40, z, z, z, z, z, o, z, z);
DES_bs_set_block_8(48, o, o, z, z, z, z, o, z);
DES_bs_set_block_8(56, o, z, o, z, o, o, o, o);
}
#if DES_bs_mt
DES_bs_finalize_keys_LM(t);
#else
DES_bs_finalize_keys_LM();
#endif
k = DES_bs_all.KS.p;
rounds = 8;
do {
for_each_depth()
s1(y(31, 0), y(0, 1), y(1, 2),
y(2, 3), y(3, 4), y(4, 5),
z(40), z(48), z(54), z(62));
for_each_depth()
s2(y(3, 6), y(4, 7), y(5, 8),
y(6, 9), y(7, 10), y(8, 11),
z(44), z(59), z(33), z(49));
for_each_depth()
s3(y(7, 12), y(8, 13), y(9, 14),
y(10, 15), y(11, 16), y(12, 17),
z(55), z(47), z(61), z(37));
for_each_depth()
s4(y(11, 18), y(12, 19), y(13, 20),
y(14, 21), y(15, 22), y(16, 23),
z(57), z(51), z(41), z(32));
for_each_depth()
s5(y(15, 24), y(16, 25), y(17, 26),
y(18, 27), y(19, 28), y(20, 29),
z(39), z(45), z(56), z(34));
for_each_depth()
s6(y(19, 30), y(20, 31), y(21, 32),
y(22, 33), y(23, 34), y(24, 35),
z(35), z(60), z(42), z(50));
for_each_depth()
s7(y(23, 36), y(24, 37), y(25, 38),
y(26, 39), y(27, 40), y(28, 41),
z(63), z(43), z(53), z(38));
for_each_depth()
s8(y(27, 42), y(28, 43), y(29, 44),
y(30, 45), y(31, 46), y(0, 47),
z(36), z(58), z(46), z(52));
for_each_depth()
s1(y(63, 48), y(32, 49), y(33, 50),
y(34, 51), y(35, 52), y(36, 53),
z(8), z(16), z(22), z(30));
for_each_depth()
s2(y(35, 54), y(36, 55), y(37, 56),
y(38, 57), y(39, 58), y(40, 59),
z(12), z(27), z(1), z(17));
for_each_depth()
s3(y(39, 60), y(40, 61), y(41, 62),
y(42, 63), y(43, 64), y(44, 65),
z(23), z(15), z(29), z(5));
for_each_depth()
s4(y(43, 66), y(44, 67), y(45, 68),
y(46, 69), y(47, 70), y(48, 71),
z(25), z(19), z(9), z(0));
for_each_depth()
s5(y(47, 72), y(48, 73), y(49, 74),
y(50, 75), y(51, 76), y(52, 77),
z(7), z(13), z(24), z(2));
for_each_depth()
s6(y(51, 78), y(52, 79), y(53, 80),
y(54, 81), y(55, 82), y(56, 83),
z(3), z(28), z(10), z(18));
for_each_depth()
s7(y(55, 84), y(56, 85), y(57, 86),
y(58, 87), y(59, 88), y(60, 89),
z(31), z(11), z(21), z(6));
for_each_depth()
s8(y(59, 90), y(60, 91), y(61, 92),
y(62, 93), y(63, 94), y(32, 95),
z(4), z(26), z(14), z(20));
k += 96;
} while (--rounds);
}
return keys_count;
}
#if DES_bs_mt
static MAYBE_INLINE void DES_bs_finalize_keys_plain(int t)
#else
static MAYBE_INLINE void DES_bs_finalize_keys_plain(void)
#endif
{
#if DES_BS_VECTOR_LOOPS_K
int depth;
#endif
for_each_depth_k() {
DES_bs_vector *kp = (DES_bs_vector *)&DES_bs_all.K[0] DEPTH_K;
int ic;
for (ic = 0; ic < 8; ic++) {
DES_bs_vector *vp =
(DES_bs_vector *)&DES_bs_all.xkeys.v[ic][0] DEPTH_K;
LOAD_V
FINALIZE_NEXT_KEY_BIT_0
FINALIZE_NEXT_KEY_BIT_1
FINALIZE_NEXT_KEY_BIT_2
FINALIZE_NEXT_KEY_BIT_3
FINALIZE_NEXT_KEY_BIT_4
FINALIZE_NEXT_KEY_BIT_5
FINALIZE_NEXT_KEY_BIT_6
}
}
}
#undef v1
#undef v2
#undef v3
#undef v5
#undef v6
#undef v7
/* Single Des Encryption with no salt */
#undef kd
#if DES_BS_VECTOR_LOOPS
#define kd [depth]
#else
#define kd [0]
#endif
#if DES_BS_VECTOR
#define INDX [index]
#else
#define INDX
#endif
void DES_bs_crypt_plain(int keys_count)
{
#if DES_bs_mt
int t, n = (keys_count + (DES_BS_DEPTH - 1)) / DES_BS_DEPTH;
#endif
#ifdef _OPENMP
#pragma omp parallel for default(none) private(t) shared(n, DES_bs_all_p, keys_count, DES_bs_P)
#endif
for_each_t(n) {
ARCH_WORD **k;
int rounds;
#if DES_BS_VECTOR_LOOPS
int depth;
#endif
int i;
#if DES_BS_VECTOR
int index;
#endif
for(i=0; i<64; i++)
{
#if DES_BS_VECTOR
for(index=0; index<DES_BS_VECTOR_SIZE; index++)
#endif
DES_bs_all.B[i]INDX = DES_bs_P[i]INDX;
}
#if DES_bs_mt
DES_bs_finalize_keys_plain(t);
#else
DES_bs_finalize_keys_plain();
#endif
k = DES_bs_all.KS.p;
rounds = 8;
do {
for_each_depth()
s1(y(31, 0), y(0, 1), y(1, 2),
y(2, 3), y(3, 4), y(4, 5),
z(40), z(48), z(54), z(62));
for_each_depth()
s2(y(3, 6), y(4, 7), y(5, 8),
y(6, 9), y(7, 10), y(8, 11),
z(44), z(59), z(33), z(49));
for_each_depth()
s3(y(7, 12), y(8, 13), y(9, 14),
y(10, 15), y(11, 16), y(12, 17),
z(55), z(47), z(61), z(37));
for_each_depth()
s4(y(11, 18), y(12, 19), y(13, 20),
y(14, 21), y(15, 22), y(16, 23),
z(57), z(51), z(41), z(32));
for_each_depth()
s5(y(15, 24), y(16, 25), y(17, 26),
y(18, 27), y(19, 28), y(20, 29),
z(39), z(45), z(56), z(34));
for_each_depth()
s6(y(19, 30), y(20, 31), y(21, 32),
y(22, 33), y(23, 34), y(24, 35),
z(35), z(60), z(42), z(50));
for_each_depth()
s7(y(23, 36), y(24, 37), y(25, 38),
y(26, 39), y(27, 40), y(28, 41),
z(63), z(43), z(53), z(38));
for_each_depth()
s8(y(27, 42), y(28, 43), y(29, 44),
y(30, 45), y(31, 46), y(0, 47),
z(36), z(58), z(46), z(52));
for_each_depth()
s1(y(63, 48), y(32, 49), y(33, 50),
y(34, 51), y(35, 52), y(36, 53),
z(8), z(16), z(22), z(30));
for_each_depth()
s2(y(35, 54), y(36, 55), y(37, 56),
y(38, 57), y(39, 58), y(40, 59),
z(12), z(27), z(1), z(17));
for_each_depth()
s3(y(39, 60), y(40, 61), y(41, 62),
y(42, 63), y(43, 64), y(44, 65),
z(23), z(15), z(29), z(5));
for_each_depth()
s4(y(43, 66), y(44, 67), y(45, 68),
y(46, 69), y(47, 70), y(48, 71),
z(25), z(19), z(9), z(0));
for_each_depth()
s5(y(47, 72), y(48, 73), y(49, 74),
y(50, 75), y(51, 76), y(52, 77),
z(7), z(13), z(24), z(2));
for_each_depth()
s6(y(51, 78), y(52, 79), y(53, 80),
y(54, 81), y(55, 82), y(56, 83),
z(3), z(28), z(10), z(18));
for_each_depth()
s7(y(55, 84), y(56, 85), y(57, 86),
y(58, 87), y(59, 88), y(60, 89),
z(31), z(11), z(21), z(6));
for_each_depth()
s8(y(59, 90), y(60, 91), y(61, 92),
y(62, 93), y(63, 94), y(32, 95),
z(4), z(26), z(14), z(20));
k += 96;
} while (--rounds);
}}
#endif
#ifdef INDX
#undef INDX
#endif
#if DES_BS_VECTOR
#define INDX [k]
#else
#define INDX
#endif
void DES_bs_generate_plaintext(unsigned char *plaintext)
{
int i, j;
#if DES_BS_VECTOR
int k;
#endif
/* Set same plaintext for all bit layers */
for (i = 0; i < 64; i++) {
j = (int) (plaintext[i/8] >> (7-(i%8))) & 0x01;
if(j==1)
j = -1;
#if DES_BS_VECTOR
for(k=0; k<DES_BS_VECTOR_SIZE; k++)
#endif
DES_bs_P[i]INDX = j;
}
}
|
sse.h | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2017-2020 Evan Nemerson <evan@nemerson.com>
* 2015-2017 John W. Ratcliff <jratcliffscarab@gmail.com>
* 2015 Brandon Rowlett <browlett@nvidia.com>
* 2015 Ken Fast <kfast@gdeb.com>
*/
#if !defined(SIMDE_X86_SSE_H)
#define SIMDE_X86_SSE_H
#include "mmx.h"
#if defined(_WIN32)
#include <windows.h>
#endif
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
typedef union {
#if defined(SIMDE_VECTOR_SUBSCRIPT)
SIMDE_ALIGN_TO_16 int8_t i8 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 int16_t i16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 int32_t i32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 int64_t i64 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 uint8_t u8 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 uint16_t u16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 uint32_t u32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 uint64_t u64 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
#if defined(SIMDE_HAVE_INT128_)
SIMDE_ALIGN_TO_16 simde_int128 i128 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 simde_uint128 u128 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
#endif
SIMDE_ALIGN_TO_16 simde_float32 f32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 int_fast32_t i32f SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 uint_fast32_t u32f SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
#else
SIMDE_ALIGN_TO_16 int8_t i8[16];
SIMDE_ALIGN_TO_16 int16_t i16[8];
SIMDE_ALIGN_TO_16 int32_t i32[4];
SIMDE_ALIGN_TO_16 int64_t i64[2];
SIMDE_ALIGN_TO_16 uint8_t u8[16];
SIMDE_ALIGN_TO_16 uint16_t u16[8];
SIMDE_ALIGN_TO_16 uint32_t u32[4];
SIMDE_ALIGN_TO_16 uint64_t u64[2];
#if defined(SIMDE_HAVE_INT128_)
SIMDE_ALIGN_TO_16 simde_int128 i128[1];
SIMDE_ALIGN_TO_16 simde_uint128 u128[1];
#endif
SIMDE_ALIGN_TO_16 simde_float32 f32[4];
SIMDE_ALIGN_TO_16 int_fast32_t i32f[16 / sizeof(int_fast32_t)];
SIMDE_ALIGN_TO_16 uint_fast32_t u32f[16 / sizeof(uint_fast32_t)];
#endif
SIMDE_ALIGN_TO_16 simde__m64_private m64_private[2];
SIMDE_ALIGN_TO_16 simde__m64 m64[2];
#if defined(SIMDE_X86_SSE_NATIVE)
SIMDE_ALIGN_TO_16 __m128 n;
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_ALIGN_TO_16 int8x16_t neon_i8;
SIMDE_ALIGN_TO_16 int16x8_t neon_i16;
SIMDE_ALIGN_TO_16 int32x4_t neon_i32;
SIMDE_ALIGN_TO_16 int64x2_t neon_i64;
SIMDE_ALIGN_TO_16 uint8x16_t neon_u8;
SIMDE_ALIGN_TO_16 uint16x8_t neon_u16;
SIMDE_ALIGN_TO_16 uint32x4_t neon_u32;
SIMDE_ALIGN_TO_16 uint64x2_t neon_u64;
SIMDE_ALIGN_TO_16 float32x4_t neon_f32;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
SIMDE_ALIGN_TO_16 float64x2_t neon_f64;
#endif
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
SIMDE_ALIGN_TO_16 v128_t wasm_v128;
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) altivec_u8;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) altivec_u16;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) altivec_u32;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed char) altivec_i8;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed short) altivec_i16;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed int) altivec_i32;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(float) altivec_f32;
#if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long) altivec_u64;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed long long) altivec_i64;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(double) altivec_f64;
#endif
#endif
} simde__m128_private;
#if defined(SIMDE_X86_SSE_NATIVE)
typedef __m128 simde__m128;
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
typedef float32x4_t simde__m128;
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
typedef v128_t simde__m128;
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
typedef SIMDE_POWER_ALTIVEC_VECTOR(float) simde__m128;
#elif defined(SIMDE_VECTOR_SUBSCRIPT)
typedef simde_float32 simde__m128 SIMDE_ALIGN_TO_16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
#else
typedef simde__m128_private simde__m128;
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
typedef simde__m128 __m128;
#endif
HEDLEY_STATIC_ASSERT(16 == sizeof(simde__m128), "simde__m128 size incorrect");
HEDLEY_STATIC_ASSERT(16 == sizeof(simde__m128_private), "simde__m128_private size incorrect");
#if defined(SIMDE_CHECK_ALIGNMENT) && defined(SIMDE_ALIGN_OF)
HEDLEY_STATIC_ASSERT(SIMDE_ALIGN_OF(simde__m128) == 16, "simde__m128 is not 16-byte aligned");
HEDLEY_STATIC_ASSERT(SIMDE_ALIGN_OF(simde__m128_private) == 16, "simde__m128_private is not 16-byte aligned");
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde__m128_from_private(simde__m128_private v) {
simde__m128 r;
simde_memcpy(&r, &v, sizeof(r));
return r;
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128_private
simde__m128_to_private(simde__m128 v) {
simde__m128_private r;
simde_memcpy(&r, &v, sizeof(r));
return r;
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int8x16_t, neon, i8)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int16x8_t, neon, i16)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int32x4_t, neon, i32)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int64x2_t, neon, i64)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint8x16_t, neon, u8)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint16x8_t, neon, u16)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint32x4_t, neon, u32)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint64x2_t, neon, u64)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, float32x4_t, neon, f32)
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, float64x2_t, neon, f64)
#endif
#endif /* defined(SIMDE_ARM_NEON_A32V7_NATIVE) */
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed char), altivec, i8)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed short), altivec, i16)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed int), altivec, i32)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), altivec, u8)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned short), altivec, u16)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), altivec, u32)
#if defined(SIMDE_BUG_GCC_95782)
SIMDE_FUNCTION_ATTRIBUTES
SIMDE_POWER_ALTIVEC_VECTOR(float)
simde__m128_to_altivec_f32(simde__m128 value) {
simde__m128_private r_ = simde__m128_to_private(value);
return r_.altivec_f32;
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde__m128_from_altivec_f32(SIMDE_POWER_ALTIVEC_VECTOR(float) value) {
simde__m128_private r_;
r_.altivec_f32 = value;
return simde__m128_from_private(r_);
}
#else
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(float), altivec, f32)
#endif
#if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed long long), altivec, i64)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long), altivec, u64)
#endif
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, v128_t, wasm, v128);
#endif /* defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) */
enum {
#if defined(SIMDE_X86_SSE_NATIVE)
SIMDE_MM_ROUND_NEAREST = _MM_ROUND_NEAREST,
SIMDE_MM_ROUND_DOWN = _MM_ROUND_DOWN,
SIMDE_MM_ROUND_UP = _MM_ROUND_UP,
SIMDE_MM_ROUND_TOWARD_ZERO = _MM_ROUND_TOWARD_ZERO
#else
SIMDE_MM_ROUND_NEAREST = 0x0000,
SIMDE_MM_ROUND_DOWN = 0x2000,
SIMDE_MM_ROUND_UP = 0x4000,
SIMDE_MM_ROUND_TOWARD_ZERO = 0x6000
#endif
};
#if defined(_MM_FROUND_TO_NEAREST_INT)
# define SIMDE_MM_FROUND_TO_NEAREST_INT _MM_FROUND_TO_NEAREST_INT
# define SIMDE_MM_FROUND_TO_NEG_INF _MM_FROUND_TO_NEG_INF
# define SIMDE_MM_FROUND_TO_POS_INF _MM_FROUND_TO_POS_INF
# define SIMDE_MM_FROUND_TO_ZERO _MM_FROUND_TO_ZERO
# define SIMDE_MM_FROUND_CUR_DIRECTION _MM_FROUND_CUR_DIRECTION
# define SIMDE_MM_FROUND_RAISE_EXC _MM_FROUND_RAISE_EXC
# define SIMDE_MM_FROUND_NO_EXC _MM_FROUND_NO_EXC
#else
# define SIMDE_MM_FROUND_TO_NEAREST_INT 0x00
# define SIMDE_MM_FROUND_TO_NEG_INF 0x01
# define SIMDE_MM_FROUND_TO_POS_INF 0x02
# define SIMDE_MM_FROUND_TO_ZERO 0x03
# define SIMDE_MM_FROUND_CUR_DIRECTION 0x04
# define SIMDE_MM_FROUND_RAISE_EXC 0x00
# define SIMDE_MM_FROUND_NO_EXC 0x08
#endif
#define SIMDE_MM_FROUND_NINT \
(SIMDE_MM_FROUND_TO_NEAREST_INT | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_FLOOR \
(SIMDE_MM_FROUND_TO_NEG_INF | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_CEIL \
(SIMDE_MM_FROUND_TO_POS_INF | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_TRUNC \
(SIMDE_MM_FROUND_TO_ZERO | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_RINT \
(SIMDE_MM_FROUND_CUR_DIRECTION | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_NEARBYINT \
(SIMDE_MM_FROUND_CUR_DIRECTION | SIMDE_MM_FROUND_NO_EXC)
#if defined(SIMDE_X86_SSE4_1_ENABLE_NATIVE_ALIASES) && !defined(_MM_FROUND_TO_NEAREST_INT)
# define _MM_FROUND_TO_NEAREST_INT SIMDE_MM_FROUND_TO_NEAREST_INT
# define _MM_FROUND_TO_NEG_INF SIMDE_MM_FROUND_TO_NEG_INF
# define _MM_FROUND_TO_POS_INF SIMDE_MM_FROUND_TO_POS_INF
# define _MM_FROUND_TO_ZERO SIMDE_MM_FROUND_TO_ZERO
# define _MM_FROUND_CUR_DIRECTION SIMDE_MM_FROUND_CUR_DIRECTION
# define _MM_FROUND_RAISE_EXC SIMDE_MM_FROUND_RAISE_EXC
# define _MM_FROUND_NINT SIMDE_MM_FROUND_NINT
# define _MM_FROUND_FLOOR SIMDE_MM_FROUND_FLOOR
# define _MM_FROUND_CEIL SIMDE_MM_FROUND_CEIL
# define _MM_FROUND_TRUNC SIMDE_MM_FROUND_TRUNC
# define _MM_FROUND_RINT SIMDE_MM_FROUND_RINT
# define _MM_FROUND_NEARBYINT SIMDE_MM_FROUND_NEARBYINT
#endif
#if defined(_MM_EXCEPT_INVALID)
# define SIMDE_MM_EXCEPT_INVALID _MM_EXCEPT_INVALID
#else
# define SIMDE_MM_EXCEPT_INVALID (0x0001)
#endif
#if defined(_MM_EXCEPT_DENORM)
# define SIMDE_MM_EXCEPT_DENORM _MM_EXCEPT_DENORM
#else
# define SIMDE_MM_EXCEPT_DENORM (0x0002)
#endif
#if defined(_MM_EXCEPT_DIV_ZERO)
# define SIMDE_MM_EXCEPT_DIV_ZERO _MM_EXCEPT_DIV_ZERO
#else
# define SIMDE_MM_EXCEPT_DIV_ZERO (0x0004)
#endif
#if defined(_MM_EXCEPT_OVERFLOW)
# define SIMDE_MM_EXCEPT_OVERFLOW _MM_EXCEPT_OVERFLOW
#else
# define SIMDE_MM_EXCEPT_OVERFLOW (0x0008)
#endif
#if defined(_MM_EXCEPT_UNDERFLOW)
# define SIMDE_MM_EXCEPT_UNDERFLOW _MM_EXCEPT_UNDERFLOW
#else
# define SIMDE_MM_EXCEPT_UNDERFLOW (0x0010)
#endif
#if defined(_MM_EXCEPT_INEXACT)
# define SIMDE_MM_EXCEPT_INEXACT _MM_EXCEPT_INEXACT
#else
# define SIMDE_MM_EXCEPT_INEXACT (0x0020)
#endif
#if defined(_MM_EXCEPT_MASK)
# define SIMDE_MM_EXCEPT_MASK _MM_EXCEPT_MASK
#else
# define SIMDE_MM_EXCEPT_MASK \
(SIMDE_MM_EXCEPT_INVALID | SIMDE_MM_EXCEPT_DENORM | \
SIMDE_MM_EXCEPT_DIV_ZERO | SIMDE_MM_EXCEPT_OVERFLOW | \
SIMDE_MM_EXCEPT_UNDERFLOW | SIMDE_MM_EXCEPT_INEXACT)
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _MM_EXCEPT_INVALID SIMDE_MM_EXCEPT_INVALID
#define _MM_EXCEPT_DENORM SIMDE_MM_EXCEPT_DENORM
#define _MM_EXCEPT_DIV_ZERO SIMDE_MM_EXCEPT_DIV_ZERO
#define _MM_EXCEPT_OVERFLOW SIMDE_MM_EXCEPT_OVERFLOW
#define _MM_EXCEPT_UNDERFLOW SIMDE_MM_EXCEPT_UNDERFLOW
#define _MM_EXCEPT_INEXACT SIMDE_MM_EXCEPT_INEXACT
#define _MM_EXCEPT_MASK SIMDE_MM_EXCEPT_MASK
#endif
#if defined(_MM_MASK_INVALID)
# define SIMDE_MM_MASK_INVALID _MM_MASK_INVALID
#else
# define SIMDE_MM_MASK_INVALID (0x0080)
#endif
#if defined(_MM_MASK_DENORM)
# define SIMDE_MM_MASK_DENORM _MM_MASK_DENORM
#else
# define SIMDE_MM_MASK_DENORM (0x0100)
#endif
#if defined(_MM_MASK_DIV_ZERO)
# define SIMDE_MM_MASK_DIV_ZERO _MM_MASK_DIV_ZERO
#else
# define SIMDE_MM_MASK_DIV_ZERO (0x0200)
#endif
#if defined(_MM_MASK_OVERFLOW)
# define SIMDE_MM_MASK_OVERFLOW _MM_MASK_OVERFLOW
#else
# define SIMDE_MM_MASK_OVERFLOW (0x0400)
#endif
#if defined(_MM_MASK_UNDERFLOW)
# define SIMDE_MM_MASK_UNDERFLOW _MM_MASK_UNDERFLOW
#else
# define SIMDE_MM_MASK_UNDERFLOW (0x0800)
#endif
#if defined(_MM_MASK_INEXACT)
# define SIMDE_MM_MASK_INEXACT _MM_MASK_INEXACT
#else
# define SIMDE_MM_MASK_INEXACT (0x1000)
#endif
#if defined(_MM_MASK_MASK)
# define SIMDE_MM_MASK_MASK _MM_MASK_MASK
#else
# define SIMDE_MM_MASK_MASK \
(SIMDE_MM_MASK_INVALID | SIMDE_MM_MASK_DENORM | \
SIMDE_MM_MASK_DIV_ZERO | SIMDE_MM_MASK_OVERFLOW | \
SIMDE_MM_MASK_UNDERFLOW | SIMDE_MM_MASK_INEXACT)
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _MM_MASK_INVALID SIMDE_MM_MASK_INVALID
#define _MM_MASK_DENORM SIMDE_MM_MASK_DENORM
#define _MM_MASK_DIV_ZERO SIMDE_MM_MASK_DIV_ZERO
#define _MM_MASK_OVERFLOW SIMDE_MM_MASK_OVERFLOW
#define _MM_MASK_UNDERFLOW SIMDE_MM_MASK_UNDERFLOW
#define _MM_MASK_INEXACT SIMDE_MM_MASK_INEXACT
#define _MM_MASK_MASK SIMDE_MM_MASK_MASK
#endif
#if defined(_MM_FLUSH_ZERO_MASK)
# define SIMDE_MM_FLUSH_ZERO_MASK _MM_FLUSH_ZERO_MASK
#else
# define SIMDE_MM_FLUSH_ZERO_MASK (0x8000)
#endif
#if defined(_MM_FLUSH_ZERO_ON)
# define SIMDE_MM_FLUSH_ZERO_ON _MM_FLUSH_ZERO_ON
#else
# define SIMDE_MM_FLUSH_ZERO_ON (0x8000)
#endif
#if defined(_MM_FLUSH_ZERO_OFF)
# define SIMDE_MM_FLUSH_ZERO_OFF _MM_FLUSH_ZERO_OFF
#else
# define SIMDE_MM_FLUSH_ZERO_OFF (0x0000)
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _MM_FLUSH_ZERO_MASK SIMDE_MM_FLUSH_ZERO_MASK
#define _MM_FLUSH_ZERO_ON SIMDE_MM_FLUSH_ZERO_ON
#define _MM_FLUSH_ZERO_OFF SIMDE_MM_FLUSH_ZERO_OFF
#endif
SIMDE_FUNCTION_ATTRIBUTES
unsigned int
SIMDE_MM_GET_ROUNDING_MODE(void) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _MM_GET_ROUNDING_MODE();
#elif defined(SIMDE_HAVE_FENV_H)
unsigned int vfe_mode;
switch (fegetround()) {
#if defined(FE_TONEAREST)
case FE_TONEAREST:
vfe_mode = SIMDE_MM_ROUND_NEAREST;
break;
#endif
#if defined(FE_TOWARDZERO)
case FE_TOWARDZERO:
vfe_mode = SIMDE_MM_ROUND_DOWN;
break;
#endif
#if defined(FE_UPWARD)
case FE_UPWARD:
vfe_mode = SIMDE_MM_ROUND_UP;
break;
#endif
#if defined(FE_DOWNWARD)
case FE_DOWNWARD:
vfe_mode = SIMDE_MM_ROUND_TOWARD_ZERO;
break;
#endif
default:
vfe_mode = SIMDE_MM_ROUND_NEAREST;
break;
}
return vfe_mode;
#else
return SIMDE_MM_ROUND_NEAREST;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _MM_GET_ROUNDING_MODE() SIMDE_MM_GET_ROUNDING_MODE()
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
SIMDE_MM_SET_ROUNDING_MODE(unsigned int a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_MM_SET_ROUNDING_MODE(a);
#elif defined(SIMDE_HAVE_FENV_H)
int fe_mode = FE_TONEAREST;
switch (a) {
#if defined(FE_TONEAREST)
case SIMDE_MM_ROUND_NEAREST:
fe_mode = FE_TONEAREST;
break;
#endif
#if defined(FE_TOWARDZERO)
case SIMDE_MM_ROUND_TOWARD_ZERO:
fe_mode = FE_TOWARDZERO;
break;
#endif
#if defined(FE_DOWNWARD)
case SIMDE_MM_ROUND_DOWN:
fe_mode = FE_DOWNWARD;
break;
#endif
#if defined(FE_UPWARD)
case SIMDE_MM_ROUND_UP:
fe_mode = FE_UPWARD;
break;
#endif
default:
return;
}
fesetround(fe_mode);
#else
(void) a;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _MM_SET_ROUNDING_MODE(a) SIMDE_MM_SET_ROUNDING_MODE(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint32_t
SIMDE_MM_GET_FLUSH_ZERO_MODE (void) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_getcsr() & _MM_FLUSH_ZERO_MASK;
#else
return SIMDE_MM_FLUSH_ZERO_OFF;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _MM_SET_FLUSH_ZERO_MODE(a) SIMDE_MM_SET_FLUSH_ZERO_MODE(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
SIMDE_MM_SET_FLUSH_ZERO_MODE (uint32_t a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_MM_SET_FLUSH_ZERO_MODE(a);
#else
(void) a;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _MM_SET_FLUSH_ZERO_MODE(a) SIMDE_MM_SET_FLUSH_ZERO_MODE(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint32_t
simde_mm_getcsr (void) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_getcsr();
#else
return SIMDE_MM_GET_ROUNDING_MODE();
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_getcsr() simde_mm_getcsr()
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_setcsr (uint32_t a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_setcsr(a);
#else
SIMDE_MM_SET_ROUNDING_MODE(HEDLEY_STATIC_CAST(unsigned int, a));
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_setcsr(a) simde_mm_setcsr(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_round_ps (simde__m128 a, int rounding, int lax_rounding)
SIMDE_REQUIRE_CONSTANT_RANGE(rounding, 0, 15)
SIMDE_REQUIRE_CONSTANT_RANGE(lax_rounding, 0, 1) {
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
(void) lax_rounding;
/* For architectures which lack a current direction SIMD instruction.
*
* Note that NEON actually has a current rounding mode instruction,
* but in ARMv8+ the rounding mode is ignored and nearest is always
* used, so we treat ARMv7 as having a rounding mode but ARMv8 as
* not. */
#if \
defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || \
defined(SIMDE_ARM_NEON_A32V8)
if ((rounding & 7) == SIMDE_MM_FROUND_CUR_DIRECTION)
rounding = HEDLEY_STATIC_CAST(int, SIMDE_MM_GET_ROUNDING_MODE()) << 13;
#endif
switch (rounding & ~SIMDE_MM_FROUND_NO_EXC) {
case SIMDE_MM_FROUND_CUR_DIRECTION:
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_round(a_.altivec_f32));
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95399)
r_.neon_f32 = vrndiq_f32(a_.neon_f32);
#elif defined(simde_math_nearbyintf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_nearbyintf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
#endif
break;
case SIMDE_MM_FROUND_TO_NEAREST_INT:
#if defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_rint(a_.altivec_f32));
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE)
r_.neon_f32 = vrndnq_f32(a_.neon_f32);
#elif defined(simde_math_roundevenf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_roundevenf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
#endif
break;
case SIMDE_MM_FROUND_TO_NEG_INF:
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_floor(a_.altivec_f32));
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE)
r_.neon_f32 = vrndmq_f32(a_.neon_f32);
#elif defined(simde_math_floorf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_floorf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
#endif
break;
case SIMDE_MM_FROUND_TO_POS_INF:
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_ceil(a_.altivec_f32));
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE)
r_.neon_f32 = vrndpq_f32(a_.neon_f32);
#elif defined(simde_math_ceilf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_ceilf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
#endif
break;
case SIMDE_MM_FROUND_TO_ZERO:
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_trunc(a_.altivec_f32));
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE)
r_.neon_f32 = vrndq_f32(a_.neon_f32);
#elif defined(simde_math_truncf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_truncf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
#endif
break;
default:
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
}
return simde__m128_from_private(r_);
}
#if defined(SIMDE_X86_SSE4_1_NATIVE)
#define simde_mm_round_ps(a, rounding) _mm_round_ps((a), (rounding))
#else
#define simde_mm_round_ps(a, rounding) simde_x_mm_round_ps((a), (rounding), 0)
#endif
#if defined(SIMDE_X86_SSE4_1_ENABLE_NATIVE_ALIASES)
#define _mm_round_ps(a, rounding) simde_mm_round_ps((a), (rounding))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_set_ps (simde_float32 e3, simde_float32 e2, simde_float32 e1, simde_float32 e0) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_set_ps(e3, e2, e1, e0);
#else
simde__m128_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_ALIGN_TO_16 simde_float32 data[4] = { e0, e1, e2, e3 };
r_.neon_f32 = vld1q_f32(data);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_make(e0, e1, e2, e3);
#else
r_.f32[0] = e0;
r_.f32[1] = e1;
r_.f32[2] = e2;
r_.f32[3] = e3;
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_set_ps(e3, e2, e1, e0) simde_mm_set_ps(e3, e2, e1, e0)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_set_ps1 (simde_float32 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_set_ps1(a);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vdupq_n_f32(a);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
(void) a;
return vec_splats(a);
#else
return simde_mm_set_ps(a, a, a, a);
#endif
}
#define simde_mm_set1_ps(a) simde_mm_set_ps1(a)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_set_ps1(a) simde_mm_set_ps1(a)
# define _mm_set1_ps(a) simde_mm_set1_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_move_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_move_ss(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(vgetq_lane_f32(b_.neon_f32, 0), a_.neon_f32, 0);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) m = {
16, 17, 18, 19,
4, 5, 6, 7,
8, 9, 10, 11,
12, 13, 14, 15
};
r_.altivec_f32 = vec_perm(a_.altivec_f32, b_.altivec_f32, m);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v8x16_shuffle(b_.wasm_v128, a_.wasm_v128, 0, 1, 2, 3, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 4, 1, 2, 3);
#else
r_.f32[0] = b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_move_ss(a, b) simde_mm_move_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_add_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_add_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vaddq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_add(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_add(a_.altivec_f32, b_.altivec_f32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.f32 = a_.f32 + b_.f32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = a_.f32[i] + b_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_add_ps(a, b) simde_mm_add_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_add_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_add_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_add_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32_t b0 = vgetq_lane_f32(b_.neon_f32, 0);
float32x4_t value = vsetq_lane_f32(b0, vdupq_n_f32(0), 0);
// the upper values in the result must be the remnants of <a>.
r_.neon_f32 = vaddq_f32(a_.neon_f32, value);
#else
r_.f32[0] = a_.f32[0] + b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_add_ss(a, b) simde_mm_add_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_and_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_and_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vandq_s32(a_.neon_i32, b_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_and(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = a_.i32 & b_.i32;
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_and(a_.altivec_f32, b_.altivec_f32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = a_.i32[i] & b_.i32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_and_ps(a, b) simde_mm_and_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_andnot_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_andnot_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vbicq_s32(b_.neon_i32, a_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_andnot(b_.wasm_v128, a_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r_.altivec_f32 = vec_andc(b_.altivec_f32, a_.altivec_f32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = ~a_.i32 & b_.i32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = ~(a_.i32[i]) & b_.i32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_andnot_ps(a, b) simde_mm_andnot_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_xor_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_xor_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = veorq_s32(a_.neon_i32, b_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_xor(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_i32 = vec_xor(a_.altivec_i32, b_.altivec_i32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32f = a_.i32f ^ b_.i32f;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) {
r_.u32[i] = a_.u32[i] ^ b_.u32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_xor_ps(a, b) simde_mm_xor_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_or_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_or_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vorrq_s32(a_.neon_i32, b_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_or(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_i32 = vec_or(a_.altivec_i32, b_.altivec_i32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32f = a_.i32f | b_.i32f;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) {
r_.u32[i] = a_.u32[i] | b_.u32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_or_ps(a, b) simde_mm_or_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_not_ps(simde__m128 a) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
__m128i ai = _mm_castps_si128(a);
return _mm_castsi128_ps(_mm_ternarylogic_epi32(ai, ai, ai, 0x55));
#elif defined(SIMDE_X86_SSE2_NATIVE)
/* Note: we use ints instead of floats because we don't want cmpeq
* to return false for (NaN, NaN) */
__m128i ai = _mm_castps_si128(a);
return _mm_castsi128_ps(_mm_andnot_si128(ai, _mm_cmpeq_epi32(ai, ai)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vmvnq_s32(a_.neon_i32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_i32 = vec_nor(a_.altivec_i32, a_.altivec_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_not(a_.wasm_v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = ~a_.i32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = ~(a_.i32[i]);
}
#endif
return simde__m128_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_select_ps(simde__m128 a, simde__m128 b, simde__m128 mask) {
/* This function is for when you want to blend two elements together
* according to a mask. It is similar to _mm_blendv_ps, except that
* it is undefined whether the blend is based on the highest bit in
* each lane (like blendv) or just bitwise operations. This allows
* us to implement the function efficiently everywhere.
*
* Basically, you promise that all the lanes in mask are either 0 or
* ~0. */
#if defined(SIMDE_X86_SSE4_1_NATIVE)
return _mm_blendv_ps(a, b, mask);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b),
mask_ = simde__m128_to_private(mask);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vbslq_s32(mask_.neon_u32, b_.neon_i32, a_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_bitselect(b_.wasm_v128, a_.wasm_v128, mask_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_i32 = vec_sel(a_.altivec_i32, b_.altivec_i32, mask_.altivec_u32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = a_.i32 ^ ((a_.i32 ^ b_.i32) & mask_.i32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = a_.i32[i] ^ ((a_.i32[i] ^ b_.i32[i]) & mask_.i32[i]);
}
#endif
return simde__m128_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_avg_pu16 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_avg_pu16(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u16 = vrhadd_u16(b_.neon_u16, a_.neon_u16);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && defined(SIMDE_CONVERT_VECTOR_)
uint32_t wa SIMDE_VECTOR(16);
uint32_t wb SIMDE_VECTOR(16);
uint32_t wr SIMDE_VECTOR(16);
SIMDE_CONVERT_VECTOR_(wa, a_.u16);
SIMDE_CONVERT_VECTOR_(wb, b_.u16);
wr = (wa + wb + 1) >> 1;
SIMDE_CONVERT_VECTOR_(r_.u16, wr);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) {
r_.u16[i] = (a_.u16[i] + b_.u16[i] + 1) >> 1;
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pavgw(a, b) simde_mm_avg_pu16(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_avg_pu16(a, b) simde_mm_avg_pu16(a, b)
# define _m_pavgw(a, b) simde_mm_avg_pu16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_avg_pu8 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_avg_pu8(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u8 = vrhadd_u8(b_.neon_u8, a_.neon_u8);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && defined(SIMDE_CONVERT_VECTOR_)
uint16_t wa SIMDE_VECTOR(16);
uint16_t wb SIMDE_VECTOR(16);
uint16_t wr SIMDE_VECTOR(16);
SIMDE_CONVERT_VECTOR_(wa, a_.u8);
SIMDE_CONVERT_VECTOR_(wb, b_.u8);
wr = (wa + wb + 1) >> 1;
SIMDE_CONVERT_VECTOR_(r_.u8, wr);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
r_.u8[i] = (a_.u8[i] + b_.u8[i] + 1) >> 1;
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pavgb(a, b) simde_mm_avg_pu8(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_avg_pu8(a, b) simde_mm_avg_pu8(a, b)
# define _m_pavgb(a, b) simde_mm_avg_pu8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_abs_ps(simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
simde_float32 mask_;
uint32_t u32_ = UINT32_C(0x7FFFFFFF);
simde_memcpy(&mask_, &u32_, sizeof(u32_));
return _mm_and_ps(_mm_set1_ps(mask_), a);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vabsq_f32(a_.neon_f32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_abs(a_.altivec_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_abs(a_.wasm_v128);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_fabsf(a_.f32[i]);
}
#endif
return simde__m128_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpeq_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpeq_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vceqq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_eq(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpeq(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), a_.f32 == b_.f32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] == b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpeq_ps(a, b) simde_mm_cmpeq_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpeq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpeq_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpeq_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] == b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpeq_ss(a, b) simde_mm_cmpeq_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpge_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpge_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vcgeq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_ge(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpge(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 >= b_.f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] >= b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpge_ps(a, b) simde_mm_cmpge_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpge_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI)
return _mm_cmpge_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpge_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] >= b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpge_ss(a, b) simde_mm_cmpge_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpgt_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpgt_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vcgtq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_gt(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpgt(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 > b_.f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] > b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpgt_ps(a, b) simde_mm_cmpgt_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpgt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI)
return _mm_cmpgt_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpgt_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] > b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpgt_ss(a, b) simde_mm_cmpgt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmple_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmple_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vcleq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_le(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmple(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 <= b_.f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] <= b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmple_ps(a, b) simde_mm_cmple_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmple_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmple_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmple_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] <= b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmple_ss(a, b) simde_mm_cmple_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmplt_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmplt_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vcltq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_lt(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmplt(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 < b_.f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] < b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmplt_ps(a, b) simde_mm_cmplt_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmplt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmplt_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmplt_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] < b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmplt_ss(a, b) simde_mm_cmplt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpneq_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpneq_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vmvnq_u32(vceqq_f32(a_.neon_f32, b_.neon_f32));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_ne(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P9_NATIVE) && SIMDE_ARCH_POWER_CHECK(900) && !defined(HEDLEY_IBM_VERSION)
/* vec_cmpne(SIMDE_POWER_ALTIVEC_VECTOR(float), SIMDE_POWER_ALTIVEC_VECTOR(float))
is missing from XL C/C++ v16.1.1,
though the documentation (table 89 on page 432 of the IBM XL C/C++ for
Linux Compiler Reference, Version 16.1.1) shows that it should be
present. Both GCC and clang support it. */
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpne(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpeq(a_.altivec_f32, b_.altivec_f32));
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_nor(r_.altivec_f32, r_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 != b_.f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] != b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpneq_ps(a, b) simde_mm_cmpneq_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpneq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpneq_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpneq_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] != b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpneq_ss(a, b) simde_mm_cmpneq_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnge_ps (simde__m128 a, simde__m128 b) {
return simde_mm_cmplt_ps(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnge_ps(a, b) simde_mm_cmpnge_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnge_ss (simde__m128 a, simde__m128 b) {
return simde_mm_cmplt_ss(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnge_ss(a, b) simde_mm_cmpnge_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpngt_ps (simde__m128 a, simde__m128 b) {
return simde_mm_cmple_ps(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpngt_ps(a, b) simde_mm_cmpngt_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpngt_ss (simde__m128 a, simde__m128 b) {
return simde_mm_cmple_ss(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpngt_ss(a, b) simde_mm_cmpngt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnle_ps (simde__m128 a, simde__m128 b) {
return simde_mm_cmpgt_ps(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnle_ps(a, b) simde_mm_cmpnle_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnle_ss (simde__m128 a, simde__m128 b) {
return simde_mm_cmpgt_ss(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnle_ss(a, b) simde_mm_cmpnle_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnlt_ps (simde__m128 a, simde__m128 b) {
return simde_mm_cmpge_ps(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnlt_ps(a, b) simde_mm_cmpnlt_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnlt_ss (simde__m128 a, simde__m128 b) {
return simde_mm_cmpge_ss(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnlt_ss(a, b) simde_mm_cmpnlt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpord_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpord_ps(a, b);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
return wasm_v128_and(wasm_f32x4_eq(a, a), wasm_f32x4_eq(b, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
/* Note: NEON does not have ordered compare builtin
Need to compare a eq a and b eq b to check for NaN
Do AND of results to get final */
uint32x4_t ceqaa = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t ceqbb = vceqq_f32(b_.neon_f32, b_.neon_f32);
r_.neon_u32 = vandq_u32(ceqaa, ceqbb);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_and(wasm_f32x4_eq(a_.wasm_v128, a_.wasm_v128), wasm_f32x4_eq(b_.wasm_v128, b_.wasm_v128));
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float),
vec_and(vec_cmpeq(a_.altivec_f32, a_.altivec_f32), vec_cmpeq(b_.altivec_f32, b_.altivec_f32)));
#elif defined(simde_math_isnanf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (simde_math_isnanf(a_.f32[i]) || simde_math_isnanf(b_.f32[i])) ? UINT32_C(0) : ~UINT32_C(0);
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpord_ps(a, b) simde_mm_cmpord_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpunord_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpunord_ps(a, b);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
return wasm_v128_or(wasm_f32x4_ne(a, a), wasm_f32x4_ne(b, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t ceqaa = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t ceqbb = vceqq_f32(b_.neon_f32, b_.neon_f32);
r_.neon_u32 = vmvnq_u32(vandq_u32(ceqaa, ceqbb));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_or(wasm_f32x4_ne(a_.wasm_v128, a_.wasm_v128), wasm_f32x4_ne(b_.wasm_v128, b_.wasm_v128));
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float),
vec_nand(vec_cmpeq(a_.altivec_f32, a_.altivec_f32), vec_cmpeq(b_.altivec_f32, b_.altivec_f32)));
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float),
vec_and(vec_cmpeq(a_.altivec_f32, a_.altivec_f32), vec_cmpeq(b_.altivec_f32, b_.altivec_f32)));
r_.altivec_f32 = vec_nor(r_.altivec_f32, r_.altivec_f32);
#elif defined(simde_math_isnanf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (simde_math_isnanf(a_.f32[i]) || simde_math_isnanf(b_.f32[i])) ? ~UINT32_C(0) : UINT32_C(0);
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpunord_ps(a, b) simde_mm_cmpunord_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpunord_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI)
return _mm_cmpunord_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpunord_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(simde_math_isnanf)
r_.u32[0] = (simde_math_isnanf(a_.f32[0]) || simde_math_isnanf(b_.f32[0])) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpunord_ss(a, b) simde_mm_cmpunord_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comieq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comieq_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_eq_b = vceqq_f32(a_.neon_f32, b_.neon_f32);
return !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_eq_b), 0) != 0);
#else
return a_.f32[0] == b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comieq_ss(a, b) simde_mm_comieq_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comige_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comige_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_ge_b = vcgeq_f32(a_.neon_f32, b_.neon_f32);
return !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_ge_b), 0) != 0);
#else
return a_.f32[0] >= b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comige_ss(a, b) simde_mm_comige_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comigt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comigt_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_gt_b = vcgtq_f32(a_.neon_f32, b_.neon_f32);
return !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_gt_b), 0) != 0);
#else
return a_.f32[0] > b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comigt_ss(a, b) simde_mm_comigt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comile_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comile_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_le_b = vcleq_f32(a_.neon_f32, b_.neon_f32);
return !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_le_b), 0) != 0);
#else
return a_.f32[0] <= b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comile_ss(a, b) simde_mm_comile_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comilt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comilt_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_lt_b = vcltq_f32(a_.neon_f32, b_.neon_f32);
return !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_lt_b), 0) != 0);
#else
return a_.f32[0] < b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comilt_ss(a, b) simde_mm_comilt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comineq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comineq_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_neq_b = vmvnq_u32(vceqq_f32(a_.neon_f32, b_.neon_f32));
return !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_neq_b), 0) != 0);
#else
return a_.f32[0] != b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comineq_ss(a, b) simde_mm_comineq_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_copysign_ps(simde__m128 dest, simde__m128 src) {
simde__m128_private
r_,
dest_ = simde__m128_to_private(dest),
src_ = simde__m128_to_private(src);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
const uint32x4_t sign_pos = vreinterpretq_u32_f32(vdupq_n_f32(-SIMDE_FLOAT32_C(0.0)));
r_.neon_u32 = vbslq_u32(sign_pos, src_.neon_u32, dest_.neon_u32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
const v128_t sign_pos = wasm_f32x4_splat(-0.0f);
r_.wasm_v128 = wasm_v128_bitselect(src_.wasm_v128, dest_.wasm_v128, sign_pos);
#elif defined(SIMDE_POWER_ALTIVEC_P9_NATIVE)
#if !defined(HEDLEY_IBM_VERSION)
r_.altivec_f32 = vec_cpsgn(dest_.altivec_f32, src_.altivec_f32);
#else
r_.altivec_f32 = vec_cpsgn(src_.altivec_f32, dest_.altivec_f32);
#endif
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
const SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) sign_pos = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), vec_splats(-0.0f));
r_.altivec_f32 = vec_sel(dest_.altivec_f32, src_.altivec_f32, sign_pos);
#elif defined(SIMDE_IEEE754_STORAGE)
(void) src_;
(void) dest_;
simde__m128 sign_pos = simde_mm_set1_ps(-0.0f);
r_ = simde__m128_to_private(simde_mm_xor_ps(dest, simde_mm_and_ps(simde_mm_xor_ps(dest, src), sign_pos)));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_copysignf(dest_.f32[i], src_.f32[i]);
}
#endif
return simde__m128_from_private(r_);
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_xorsign_ps(simde__m128 dest, simde__m128 src) {
return simde_mm_xor_ps(simde_mm_and_ps(simde_mm_set1_ps(-0.0f), src), dest);
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvt_pi2ps (simde__m128 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvt_pi2ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
simde__m64_private b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcombine_f32(vcvt_f32_s32(b_.neon_i32), vget_high_f32(a_.neon_f32));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.m64_private[0].f32, b_.i32);
r_.m64_private[1] = a_.m64_private[1];
#else
r_.f32[0] = (simde_float32) b_.i32[0];
r_.f32[1] = (simde_float32) b_.i32[1];
r_.i32[2] = a_.i32[2];
r_.i32[3] = a_.i32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvt_pi2ps(a, b) simde_mm_cvt_pi2ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cvt_ps2pi (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvt_ps2pi(a);
#else
simde__m64_private r_;
simde__m128_private a_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
a_ = simde__m128_to_private(simde_mm_round_ps(a, SIMDE_MM_FROUND_CUR_DIRECTION));
r_.neon_i32 = vcvt_s32_f32(vget_low_f32(a_.neon_f32));
#elif defined(SIMDE_CONVERT_VECTOR_) && SIMDE_NATURAL_VECTOR_SIZE_GE(128)
a_ = simde__m128_to_private(simde_mm_round_ps(a, SIMDE_MM_FROUND_CUR_DIRECTION));
SIMDE_CONVERT_VECTOR_(r_.i32, a_.m64_private[0].f32);
#else
a_ = simde__m128_to_private(a);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = HEDLEY_STATIC_CAST(int32_t, simde_math_nearbyintf(a_.f32[i]));
}
#endif
return simde__m64_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvt_ps2pi(a) simde_mm_cvt_ps2pi((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvt_si2ss (simde__m128 a, int32_t b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cvt_si2ss(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(HEDLEY_STATIC_CAST(float, b), a_.neon_f32, 0);
#else
r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, b);
r_.i32[1] = a_.i32[1];
r_.i32[2] = a_.i32[2];
r_.i32[3] = a_.i32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvt_si2ss(a, b) simde_mm_cvt_si2ss((a), b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
int32_t
simde_mm_cvt_ss2si (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cvt_ss2si(a);
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && defined(SIMDE_FAST_CONVERSION_RANGE) && !defined(SIMDE_BUG_GCC_95399)
return vgetq_lane_s32(vcvtnq_s32_f32(simde__m128_to_neon_f32(a)), 0);
#else
simde__m128_private a_ = simde__m128_to_private(simde_mm_round_ps(a, SIMDE_MM_FROUND_CUR_DIRECTION));
#if !defined(SIMDE_FAST_CONVERSION_RANGE)
return ((a_.f32[0] > HEDLEY_STATIC_CAST(simde_float32, INT32_MIN)) &&
(a_.f32[0] < HEDLEY_STATIC_CAST(simde_float32, INT32_MAX))) ?
SIMDE_CONVERT_FTOI(int32_t, a_.f32[0]) : INT32_MIN;
#else
return SIMDE_CONVERT_FTOI(int32_t, a_.f32[0]);
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvt_ss2si(a) simde_mm_cvt_ss2si((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpi16_ps (simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpi16_ps(a);
#else
simde__m128_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcvtq_f32_s32(vmovl_s16(a_.neon_i16));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.f32, a_.i16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
simde_float32 v = a_.i16[i];
r_.f32[i] = v;
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpi16_ps(a) simde_mm_cvtpi16_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpi32_ps (simde__m128 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpi32_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
simde__m64_private b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcombine_f32(vcvt_f32_s32(b_.neon_i32), vget_high_f32(a_.neon_f32));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.m64_private[0].f32, b_.i32);
r_.m64_private[1] = a_.m64_private[1];
#else
r_.f32[0] = (simde_float32) b_.i32[0];
r_.f32[1] = (simde_float32) b_.i32[1];
r_.i32[2] = a_.i32[2];
r_.i32[3] = a_.i32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpi32_ps(a, b) simde_mm_cvtpi32_ps((a), b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpi32x2_ps (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpi32x2_ps(a, b);
#else
simde__m128_private r_;
simde__m64_private
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcvtq_f32_s32(vcombine_s32(a_.neon_i32, b_.neon_i32));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.m64_private[0].f32, a_.i32);
SIMDE_CONVERT_VECTOR_(r_.m64_private[1].f32, b_.i32);
#else
r_.f32[0] = (simde_float32) a_.i32[0];
r_.f32[1] = (simde_float32) a_.i32[1];
r_.f32[2] = (simde_float32) b_.i32[0];
r_.f32[3] = (simde_float32) b_.i32[1];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpi32x2_ps(a, b) simde_mm_cvtpi32x2_ps(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpi8_ps (simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpi8_ps(a);
#else
simde__m128_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcvtq_f32_s32(vmovl_s16(vget_low_s16(vmovl_s8(a_.neon_i8))));
#else
r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[0]);
r_.f32[1] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[1]);
r_.f32[2] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[2]);
r_.f32[3] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[3]);
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpi8_ps(a) simde_mm_cvtpi8_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cvtps_pi16 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtps_pi16(a);
#else
simde__m64_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95399)
r_.neon_i16 = vmovn_s32(vcvtq_s32_f32(vrndiq_f32(a_.neon_f32)));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = SIMDE_CONVERT_FTOI(int16_t, simde_math_roundf(a_.f32[i]));
}
#endif
return simde__m64_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtps_pi16(a) simde_mm_cvtps_pi16((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cvtps_pi32 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtps_pi32(a);
#else
simde__m64_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && defined(SIMDE_FAST_CONVERSION_RANGE) && !defined(SIMDE_BUG_GCC_95399)
r_.neon_i32 = vcvt_s32_f32(vget_low_f32(vrndiq_f32(a_.neon_f32)));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
simde_float32 v = simde_math_roundf(a_.f32[i]);
#if !defined(SIMDE_FAST_CONVERSION_RANGE)
r_.i32[i] = ((v > HEDLEY_STATIC_CAST(simde_float32, INT32_MIN)) && (v < HEDLEY_STATIC_CAST(simde_float32, INT32_MAX))) ?
SIMDE_CONVERT_FTOI(int32_t, v) : INT32_MIN;
#else
r_.i32[i] = SIMDE_CONVERT_FTOI(int32_t, v);
#endif
}
#endif
return simde__m64_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtps_pi32(a) simde_mm_cvtps_pi32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cvtps_pi8 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtps_pi8(a);
#else
simde__m64_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95471)
/* Clamp the input to [INT8_MIN, INT8_MAX], round, convert to i32, narrow to
* i16, combine with an all-zero vector of i16 (which will become the upper
* half), narrow to i8. */
float32x4_t max = vdupq_n_f32(HEDLEY_STATIC_CAST(simde_float32, INT8_MAX));
float32x4_t min = vdupq_n_f32(HEDLEY_STATIC_CAST(simde_float32, INT8_MIN));
float32x4_t values = vrndnq_f32(vmaxq_f32(vminq_f32(max, a_.neon_f32), min));
r_.neon_i8 = vmovn_s16(vcombine_s16(vmovn_s32(vcvtq_s32_f32(values)), vdup_n_s16(0)));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.f32) / sizeof(a_.f32[0])) ; i++) {
if (a_.f32[i] > HEDLEY_STATIC_CAST(simde_float32, INT8_MAX))
r_.i8[i] = INT8_MAX;
else if (a_.f32[i] < HEDLEY_STATIC_CAST(simde_float32, INT8_MIN))
r_.i8[i] = INT8_MIN;
else
r_.i8[i] = SIMDE_CONVERT_FTOI(int8_t, simde_math_roundf(a_.f32[i]));
}
/* Note: the upper half is undefined */
#endif
return simde__m64_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtps_pi8(a) simde_mm_cvtps_pi8((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpu16_ps (simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpu16_ps(a);
#else
simde__m128_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcvtq_f32_u32(vmovl_u16(a_.neon_u16));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.f32, a_.u16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = (simde_float32) a_.u16[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpu16_ps(a) simde_mm_cvtpu16_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpu8_ps (simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpu8_ps(a);
#else
simde__m128_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcvtq_f32_u32(vmovl_u16(vget_low_u16(vmovl_u8(a_.neon_u8))));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = HEDLEY_STATIC_CAST(simde_float32, a_.u8[i]);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpu8_ps(a) simde_mm_cvtpu8_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtsi32_ss (simde__m128 a, int32_t b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cvtsi32_ss(a, b);
#else
simde__m128_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(HEDLEY_STATIC_CAST(float32_t, b), a_.neon_f32, 0);
#else
r_ = a_;
r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, b);
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtsi32_ss(a, b) simde_mm_cvtsi32_ss((a), b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtsi64_ss (simde__m128 a, int64_t b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_ARCH_AMD64)
#if !defined(__PGI)
return _mm_cvtsi64_ss(a, b);
#else
return _mm_cvtsi64x_ss(a, b);
#endif
#else
simde__m128_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(HEDLEY_STATIC_CAST(float32_t, b), a_.neon_f32, 0);
#else
r_ = a_;
r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, b);
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) || (defined(SIMDE_ENABLE_NATIVE_ALIASES) && !defined(SIMDE_ARCH_AMD64))
# define _mm_cvtsi64_ss(a, b) simde_mm_cvtsi64_ss((a), b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32
simde_mm_cvtss_f32 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cvtss_f32(a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vgetq_lane_f32(a_.neon_f32, 0);
#else
return a_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtss_f32(a) simde_mm_cvtss_f32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int32_t
simde_mm_cvtss_si32 (simde__m128 a) {
return simde_mm_cvt_ss2si(a);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtss_si32(a) simde_mm_cvtss_si32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int64_t
simde_mm_cvtss_si64 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_ARCH_AMD64)
#if !defined(__PGI)
return _mm_cvtss_si64(a);
#else
return _mm_cvtss_si64x(a);
#endif
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return SIMDE_CONVERT_FTOI(int64_t, simde_math_roundf(vgetq_lane_f32(a_.neon_f32, 0)));
#else
return SIMDE_CONVERT_FTOI(int64_t, simde_math_roundf(a_.f32[0]));
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) || (defined(SIMDE_ENABLE_NATIVE_ALIASES) && !defined(SIMDE_ARCH_AMD64))
# define _mm_cvtss_si64(a) simde_mm_cvtss_si64((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cvtt_ps2pi (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtt_ps2pi(a);
#else
simde__m64_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(SIMDE_FAST_CONVERSION_RANGE)
r_.neon_i32 = vcvt_s32_f32(vget_low_f32(a_.neon_f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
simde_float32 v = a_.f32[i];
#if !defined(SIMDE_FAST_CONVERSION_RANGE)
r_.i32[i] = ((v > HEDLEY_STATIC_CAST(simde_float32, INT32_MIN)) && (v < HEDLEY_STATIC_CAST(simde_float32, INT32_MAX))) ?
SIMDE_CONVERT_FTOI(int32_t, v) : INT32_MIN;
#else
r_.i32[i] = SIMDE_CONVERT_FTOI(int32_t, v);
#endif
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_mm_cvttps_pi32(a) simde_mm_cvtt_ps2pi(a)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtt_ps2pi(a) simde_mm_cvtt_ps2pi((a))
# define _mm_cvttps_pi32(a) simde_mm_cvttps_pi32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int32_t
simde_mm_cvtt_ss2si (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cvtt_ss2si(a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(SIMDE_FAST_CONVERSION_RANGE)
return SIMDE_CONVERT_FTOI(int32_t, vgetq_lane_f32(a_.neon_f32, 0));
#else
simde_float32 v = a_.f32[0];
#if !defined(SIMDE_FAST_CONVERSION_RANGE)
return ((v > HEDLEY_STATIC_CAST(simde_float32, INT32_MIN)) && (v < HEDLEY_STATIC_CAST(simde_float32, INT32_MAX))) ?
SIMDE_CONVERT_FTOI(int32_t, v) : INT32_MIN;
#else
return SIMDE_CONVERT_FTOI(int32_t, v);
#endif
#endif
#endif
}
#define simde_mm_cvttss_si32(a) simde_mm_cvtt_ss2si((a))
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtt_ss2si(a) simde_mm_cvtt_ss2si((a))
# define _mm_cvttss_si32(a) simde_mm_cvtt_ss2si((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int64_t
simde_mm_cvttss_si64 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_ARCH_AMD64) && !defined(_MSC_VER)
#if defined(__PGI)
return _mm_cvttss_si64x(a);
#else
return _mm_cvttss_si64(a);
#endif
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return SIMDE_CONVERT_FTOI(int64_t, vgetq_lane_f32(a_.neon_f32, 0));
#else
return SIMDE_CONVERT_FTOI(int64_t, a_.f32[0]);
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) || (defined(SIMDE_ENABLE_NATIVE_ALIASES) && !defined(SIMDE_ARCH_AMD64))
# define _mm_cvttss_si64(a) simde_mm_cvttss_si64((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpord_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpord_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpord_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(simde_math_isnanf)
r_.u32[0] = (simde_math_isnanf(simde_mm_cvtss_f32(a)) || simde_math_isnanf(simde_mm_cvtss_f32(b))) ? UINT32_C(0) : ~UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpord_ss(a, b) simde_mm_cmpord_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_div_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_div_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f32 = vdivq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x4_t recip0 = vrecpeq_f32(b_.neon_f32);
float32x4_t recip1 = vmulq_f32(recip0, vrecpsq_f32(recip0, b_.neon_f32));
r_.neon_f32 = vmulq_f32(a_.neon_f32, recip1);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_div(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
r_.altivec_f32 = vec_div(a_.altivec_f32, b_.altivec_f32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.f32 = a_.f32 / b_.f32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = a_.f32[i] / b_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_div_ps(a, b) simde_mm_div_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_div_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_div_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_div_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32_t value =
vgetq_lane_f32(simde__m128_to_private(simde_mm_div_ps(a, b)).neon_f32, 0);
r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0);
#else
r_.f32[0] = a_.f32[0] / b_.f32[0];
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = a_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_div_ss(a, b) simde_mm_div_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int16_t
simde_mm_extract_pi16 (simde__m64 a, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 3) {
simde__m64_private a_ = simde__m64_to_private(a);
return a_.i16[imm8];
}
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(HEDLEY_PGI_VERSION)
# if defined(SIMDE_BUG_CLANG_44589)
# define simde_mm_extract_pi16(a, imm8) ( \
HEDLEY_DIAGNOSTIC_PUSH \
_Pragma("clang diagnostic ignored \"-Wvector-conversion\"") \
HEDLEY_STATIC_CAST(int16_t, _mm_extract_pi16((a), (imm8))) \
HEDLEY_DIAGNOSTIC_POP \
)
# else
# define simde_mm_extract_pi16(a, imm8) HEDLEY_STATIC_CAST(int16_t, _mm_extract_pi16(a, imm8))
# endif
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
# define simde_mm_extract_pi16(a, imm8) vget_lane_s16(simde__m64_to_private(a).neon_i16, imm8)
#endif
#define simde_m_pextrw(a, imm8) simde_mm_extract_pi16(a, imm8)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_extract_pi16(a, imm8) simde_mm_extract_pi16((a), (imm8))
# define _m_pextrw(a, imm8) simde_mm_extract_pi16((a), (imm8))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_insert_pi16 (simde__m64 a, int16_t i, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 3) {
simde__m64_private
r_,
a_ = simde__m64_to_private(a);
r_.i64[0] = a_.i64[0];
r_.i16[imm8] = i;
return simde__m64_from_private(r_);
}
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(__PGI)
# if defined(SIMDE_BUG_CLANG_44589)
# define ssimde_mm_insert_pi16(a, i, imm8) ( \
HEDLEY_DIAGNOSTIC_PUSH \
_Pragma("clang diagnostic ignored \"-Wvector-conversion\"") \
(_mm_insert_pi16((a), (i), (imm8))) \
HEDLEY_DIAGNOSTIC_POP \
)
# else
# define simde_mm_insert_pi16(a, i, imm8) _mm_insert_pi16(a, i, imm8)
# endif
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
# define simde_mm_insert_pi16(a, i, imm8) simde__m64_from_neon_i16(vset_lane_s16((i), simde__m64_to_neon_i16(a), (imm8)))
#endif
#define simde_m_pinsrw(a, i, imm8) (simde_mm_insert_pi16(a, i, imm8))
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_insert_pi16(a, i, imm8) simde_mm_insert_pi16(a, i, imm8)
# define _m_pinsrw(a, i, imm8) simde_mm_insert_pi16(a, i, imm8)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_load_ps (simde_float32 const mem_addr[HEDLEY_ARRAY_PARAM(4)]) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_load_ps(mem_addr);
#else
simde__m128_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vld1q_f32(mem_addr);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
r_.altivec_f32 = vec_vsx_ld(0, mem_addr);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_ld(0, mem_addr);
#else
simde_memcpy(&r_, SIMDE_ALIGN_ASSUME_LIKE(mem_addr, simde__m128), sizeof(r_));
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_load_ps(mem_addr) simde_mm_load_ps(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_load1_ps (simde_float32 const* mem_addr) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_load_ps1(mem_addr);
#else
simde__m128_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vld1q_dup_f32(mem_addr);
#else
r_ = simde__m128_to_private(simde_mm_set1_ps(*mem_addr));
#endif
return simde__m128_from_private(r_);
#endif
}
#define simde_mm_load_ps1(mem_addr) simde_mm_load1_ps(mem_addr)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_load_ps1(mem_addr) simde_mm_load1_ps(mem_addr)
# define _mm_load1_ps(mem_addr) simde_mm_load1_ps(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_load_ss (simde_float32 const* mem_addr) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_load_ss(mem_addr);
#else
simde__m128_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(*mem_addr, vdupq_n_f32(0), 0);
#else
r_.f32[0] = *mem_addr;
r_.i32[1] = 0;
r_.i32[2] = 0;
r_.i32[3] = 0;
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_load_ss(mem_addr) simde_mm_load_ss(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_loadh_pi (simde__m128 a, simde__m64 const* mem_addr) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_loadh_pi(a, HEDLEY_REINTERPRET_CAST(__m64 const*, mem_addr));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcombine_f32(vget_low_f32(a_.neon_f32), vld1_f32(HEDLEY_REINTERPRET_CAST(const float32_t*, mem_addr)));
#else
simde__m64_private b_ = *HEDLEY_REINTERPRET_CAST(simde__m64_private const*, mem_addr);
r_.f32[0] = a_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = b_.f32[0];
r_.f32[3] = b_.f32[1];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#if HEDLEY_HAS_WARNING("-Wold-style-cast")
#define _mm_loadh_pi(a, mem_addr) simde_mm_loadh_pi((a), HEDLEY_REINTERPRET_CAST(simde__m64 const*, (mem_addr)))
#else
#define _mm_loadh_pi(a, mem_addr) simde_mm_loadh_pi((a), (simde__m64 const*) (mem_addr))
#endif
#endif
/* The SSE documentation says that there are no alignment requirements
for mem_addr. Unfortunately they used the __m64 type for the argument
which is supposed to be 8-byte aligned, so some compilers (like clang
with -Wcast-align) will generate a warning if you try to cast, say,
a simde_float32* to a simde__m64* for this function.
I think the choice of argument type is unfortunate, but I do think we
need to stick to it here. If there is demand I can always add something
like simde_x_mm_loadl_f32(simde__m128, simde_float32 mem_addr[2]) */
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_loadl_pi (simde__m128 a, simde__m64 const* mem_addr) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_loadl_pi(a, HEDLEY_REINTERPRET_CAST(__m64 const*, mem_addr));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcombine_f32(vld1_f32(
HEDLEY_REINTERPRET_CAST(const float32_t*, mem_addr)), vget_high_f32(a_.neon_f32));
#else
simde__m64_private b_;
simde_memcpy(&b_, mem_addr, sizeof(b_));
r_.i32[0] = b_.i32[0];
r_.i32[1] = b_.i32[1];
r_.i32[2] = a_.i32[2];
r_.i32[3] = a_.i32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#if HEDLEY_HAS_WARNING("-Wold-style-cast")
#define _mm_loadl_pi(a, mem_addr) simde_mm_loadl_pi((a), HEDLEY_REINTERPRET_CAST(simde__m64 const*, (mem_addr)))
#else
#define _mm_loadl_pi(a, mem_addr) simde_mm_loadl_pi((a), (simde__m64 const*) (mem_addr))
#endif
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_loadr_ps (simde_float32 const mem_addr[HEDLEY_ARRAY_PARAM(4)]) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_loadr_ps(mem_addr);
#else
simde__m128_private
r_,
v_ = simde__m128_to_private(simde_mm_load_ps(mem_addr));
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vrev64q_f32(v_.neon_f32);
r_.neon_f32 = vextq_f32(r_.neon_f32, r_.neon_f32, 2);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) && defined(__PPC64__)
r_.altivec_f32 = vec_reve(v_.altivec_f32);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, v_.f32, v_.f32, 3, 2, 1, 0);
#else
r_.f32[0] = v_.f32[3];
r_.f32[1] = v_.f32[2];
r_.f32[2] = v_.f32[1];
r_.f32[3] = v_.f32[0];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_loadr_ps(mem_addr) simde_mm_loadr_ps(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_loadu_ps (simde_float32 const mem_addr[HEDLEY_ARRAY_PARAM(4)]) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_loadu_ps(mem_addr);
#else
simde__m128_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vld1q_f32(HEDLEY_REINTERPRET_CAST(const float32_t*, mem_addr));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_load(mem_addr);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) && defined(__PPC64__)
r_.altivec_f32 = vec_vsx_ld(0, mem_addr);
#else
simde_memcpy(&r_, mem_addr, sizeof(r_));
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_loadu_ps(mem_addr) simde_mm_loadu_ps(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_maskmove_si64 (simde__m64 a, simde__m64 mask, int8_t* mem_addr) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
_mm_maskmove_si64(a, mask, HEDLEY_REINTERPRET_CAST(char*, mem_addr));
#else
simde__m64_private
a_ = simde__m64_to_private(a),
mask_ = simde__m64_to_private(mask);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.i8) / sizeof(a_.i8[0])) ; i++)
if (mask_.i8[i] < 0)
mem_addr[i] = a_.i8[i];
#endif
}
#define simde_m_maskmovq(a, mask, mem_addr) simde_mm_maskmove_si64(a, mask, mem_addr)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_maskmove_si64(a, mask, mem_addr) simde_mm_maskmove_si64((a), (mask), SIMDE_CHECKED_REINTERPRET_CAST(int8_t*, char*, (mem_addr)))
# define _m_maskmovq(a, mask, mem_addr) simde_mm_maskmove_si64((a), (mask), SIMDE_CHECKED_REINTERPRET_CAST(int8_t*, char*, (mem_addr)))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_max_pi16 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_max_pi16(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i16 = vmax_s16(a_.neon_i16, b_.neon_i16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = (a_.i16[i] > b_.i16[i]) ? a_.i16[i] : b_.i16[i];
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pmaxsw(a, b) simde_mm_max_pi16(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_max_pi16(a, b) simde_mm_max_pi16(a, b)
# define _m_pmaxsw(a, b) simde_mm_max_pi16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_max_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_max_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(SIMDE_FAST_NANS)
r_.neon_f32 = vmaxq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vbslq_f32(vcgtq_f32(a_.neon_f32, b_.neon_f32), a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE) && defined(SIMDE_FAST_NANS)
r_.wasm_v128 = wasm_f32x4_max(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_bitselect(a_.wasm_v128, b_.wasm_v128, wasm_f32x4_gt(a_.wasm_v128, b_.wasm_v128));
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) && defined(SIMDE_FAST_NANS)
r_.altivec_f32 = vec_max(a_.altivec_f32, b_.altivec_f32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_sel(b_.altivec_f32, a_.altivec_f32, vec_cmpgt(a_.altivec_f32, b_.altivec_f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = (a_.f32[i] > b_.f32[i]) ? a_.f32[i] : b_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_max_ps(a, b) simde_mm_max_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_max_pu8 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_max_pu8(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u8 = vmax_u8(a_.neon_u8, b_.neon_u8);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
r_.u8[i] = (a_.u8[i] > b_.u8[i]) ? a_.u8[i] : b_.u8[i];
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pmaxub(a, b) simde_mm_max_pu8(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_max_pu8(a, b) simde_mm_max_pu8(a, b)
# define _m_pmaxub(a, b) simde_mm_max_pu8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_max_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_max_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_max_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32_t value = vgetq_lane_f32(maxq_f32(a_.neon_f32, b_.neon_f32), 0);
r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0);
#else
r_.f32[0] = (a_.f32[0] > b_.f32[0]) ? a_.f32[0] : b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_max_ss(a, b) simde_mm_max_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_min_pi16 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_min_pi16(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i16 = vmin_s16(a_.neon_i16, b_.neon_i16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = (a_.i16[i] < b_.i16[i]) ? a_.i16[i] : b_.i16[i];
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pminsw(a, b) simde_mm_min_pi16(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_min_pi16(a, b) simde_mm_min_pi16(a, b)
# define _m_pminsw(a, b) simde_mm_min_pi16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_min_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_min_ps(a, b);
#elif defined(SIMDE_FAST_NANS) && defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return simde__m128_from_neon_f32(vminq_f32(simde__m128_to_neon_f32(a), simde__m128_to_neon_f32(b)));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_FAST_NANS)
r_.wasm_v128 = wasm_f32x4_min(a_.wasm_v128, b_.wasm_v128);
#else
r_.wasm_v128 = wasm_v128_bitselect(a_.wasm_v128, b_.wasm_v128, wasm_f32x4_lt(a_.wasm_v128, b_.wasm_v128));
#endif
return simde__m128_from_private(r_);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_FAST_NANS)
r_.altivec_f32 = vec_min(a_.altivec_f32, b_.altivec_f32);
#else
r_.altivec_f32 = vec_sel(b_.altivec_f32, a_.altivec_f32, vec_cmpgt(b_.altivec_f32, a_.altivec_f32));
#endif
return simde__m128_from_private(r_);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
simde__m128 mask = simde_mm_cmplt_ps(a, b);
return simde_mm_or_ps(simde_mm_and_ps(mask, a), simde_mm_andnot_ps(mask, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = (a_.f32[i] < b_.f32[i]) ? a_.f32[i] : b_.f32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_min_ps(a, b) simde_mm_min_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_min_pu8 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_min_pu8(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u8 = vmin_u8(a_.neon_u8, b_.neon_u8);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
r_.u8[i] = (a_.u8[i] < b_.u8[i]) ? a_.u8[i] : b_.u8[i];
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pminub(a, b) simde_mm_min_pu8(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_min_pu8(a, b) simde_mm_min_pu8(a, b)
# define _m_pminub(a, b) simde_mm_min_pu8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_min_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_min_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_min_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32_t value = vgetq_lane_f32(vminq_f32(a_.neon_f32, b_.neon_f32), 0);
r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0);
#else
r_.f32[0] = (a_.f32[0] < b_.f32[0]) ? a_.f32[0] : b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_min_ss(a, b) simde_mm_min_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_movehl_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_movehl_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x2_t a32 = vget_high_f32(a_.neon_f32);
float32x2_t b32 = vget_high_f32(b_.neon_f32);
r_.neon_f32 = vcombine_f32(b32, a32);
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float),
vec_mergel(b_.altivec_i64, a_.altivec_i64));
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 6, 7, 2, 3);
#else
r_.f32[0] = b_.f32[2];
r_.f32[1] = b_.f32[3];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_movehl_ps(a, b) simde_mm_movehl_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_movelh_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_movelh_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x2_t a10 = vget_low_f32(a_.neon_f32);
float32x2_t b10 = vget_low_f32(b_.neon_f32);
r_.neon_f32 = vcombine_f32(a10, b10);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 0, 1, 4, 5);
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float),
vec_mergeh(a_.altivec_i64, b_.altivec_i64));
#else
r_.f32[0] = a_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = b_.f32[0];
r_.f32[3] = b_.f32[1];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_movelh_ps(a, b) simde_mm_movelh_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_movemask_pi8 (simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_movemask_pi8(a);
#else
simde__m64_private a_ = simde__m64_to_private(a);
int r = 0;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
uint8x8_t input = a_.neon_u8;
const int8_t xr[8] = {-7, -6, -5, -4, -3, -2, -1, 0};
const uint8x8_t mask_and = vdup_n_u8(0x80);
const int8x8_t mask_shift = vld1_s8(xr);
const uint8x8_t mask_result = vshl_u8(vand_u8(input, mask_and), mask_shift);
uint8x8_t lo = mask_result;
r = vaddv_u8(lo);
#else
const size_t nmemb = sizeof(a_.i8) / sizeof(a_.i8[0]);
SIMDE_VECTORIZE_REDUCTION(|:r)
for (size_t i = 0 ; i < nmemb ; i++) {
r |= (a_.u8[nmemb - 1 - i] >> 7) << (nmemb - 1 - i);
}
#endif
return r;
#endif
}
#define simde_m_pmovmskb(a) simde_mm_movemask_pi8(a)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_movemask_pi8(a) simde_mm_movemask_pi8(a)
# define _m_pmovmskb(a) simde_mm_movemask_pi8(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_movemask_ps (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_movemask_ps(a);
#else
int r = 0;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
static const int32_t shift_amount[] = { 0, 1, 2, 3 };
const int32x4_t shift = vld1q_s32(shift_amount);
uint32x4_t tmp = vshrq_n_u32(a_.neon_u32, 31);
return HEDLEY_STATIC_CAST(int, vaddvq_u32(vshlq_u32(tmp, shift)));
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
// Shift out everything but the sign bits with a 32-bit unsigned shift right.
uint64x2_t high_bits = vreinterpretq_u64_u32(vshrq_n_u32(a_.neon_u32, 31));
// Merge the two pairs together with a 64-bit unsigned shift right + add.
uint8x16_t paired = vreinterpretq_u8_u64(vsraq_n_u64(high_bits, high_bits, 31));
// Extract the result.
return vgetq_lane_u8(paired, 0) | (vgetq_lane_u8(paired, 8) << 2);
#else
SIMDE_VECTORIZE_REDUCTION(|:r)
for (size_t i = 0 ; i < sizeof(a_.u32) / sizeof(a_.u32[0]) ; i++) {
r |= (a_.u32[i] >> ((sizeof(a_.u32[i]) * CHAR_BIT) - 1)) << i;
}
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_movemask_ps(a) simde_mm_movemask_ps((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_mul_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_mul_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vmulq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_mul(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.f32 = a_.f32 * b_.f32;
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
r_.altivec_f32 = vec_mul(a_.altivec_f32, b_.altivec_f32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = a_.f32[i] * b_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_mul_ps(a, b) simde_mm_mul_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_mul_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_mul_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_mul_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.f32[0] = a_.f32[0] * b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_mul_ss(a, b) simde_mm_mul_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_mulhi_pu16 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_mulhi_pu16(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
const uint32x4_t t1 = vmull_u16(a_.neon_u16, b_.neon_u16);
const uint32x4_t t2 = vshrq_n_u32(t1, 16);
const uint16x4_t t3 = vmovn_u32(t2);
r_.neon_u16 = t3;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) {
r_.u16[i] = HEDLEY_STATIC_CAST(uint16_t, ((HEDLEY_STATIC_CAST(uint32_t, a_.u16[i]) * HEDLEY_STATIC_CAST(uint32_t, b_.u16[i])) >> UINT32_C(16)));
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pmulhuw(a, b) simde_mm_mulhi_pu16(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_mulhi_pu16(a, b) simde_mm_mulhi_pu16(a, b)
# define _m_pmulhuw(a, b) simde_mm_mulhi_pu16(a, b)
#endif
#if defined(SIMDE_X86_SSE_NATIVE) && defined(HEDLEY_GCC_VERSION)
#define SIMDE_MM_HINT_NTA HEDLEY_STATIC_CAST(enum _mm_hint, 0)
#define SIMDE_MM_HINT_T0 HEDLEY_STATIC_CAST(enum _mm_hint, 1)
#define SIMDE_MM_HINT_T1 HEDLEY_STATIC_CAST(enum _mm_hint, 2)
#define SIMDE_MM_HINT_T2 HEDLEY_STATIC_CAST(enum _mm_hint, 3)
#define SIMDE_MM_HINT_ENTA HEDLEY_STATIC_CAST(enum _mm_hint, 4)
#define SIMDE_MM_HINT_ET0 HEDLEY_STATIC_CAST(enum _mm_hint, 5)
#define SIMDE_MM_HINT_ET1 HEDLEY_STATIC_CAST(enum _mm_hint, 6)
#define SIMDE_MM_HINT_ET2 HEDLEY_STATIC_CAST(enum _mm_hint, 7)
#else
#define SIMDE_MM_HINT_NTA 0
#define SIMDE_MM_HINT_T0 1
#define SIMDE_MM_HINT_T1 2
#define SIMDE_MM_HINT_T2 3
#define SIMDE_MM_HINT_ENTA 4
#define SIMDE_MM_HINT_ET0 5
#define SIMDE_MM_HINT_ET1 6
#define SIMDE_MM_HINT_ET2 7
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
HEDLEY_DIAGNOSTIC_PUSH
#if HEDLEY_HAS_WARNING("-Wreserved-id-macro")
_Pragma("clang diagnostic ignored \"-Wreserved-id-macro\"")
#endif
#undef _MM_HINT_NTA
#define _MM_HINT_NTA SIMDE_MM_HINT_NTA
#undef _MM_HINT_T0
#define _MM_HINT_T0 SIMDE_MM_HINT_T0
#undef _MM_HINT_T1
#define _MM_HINT_T1 SIMDE_MM_HINT_T1
#undef _MM_HINT_T2
#define _MM_HINT_T2 SIMDE_MM_HINT_T2
#undef _MM_HINT_ETNA
#define _MM_HINT_ETNA SIMDE_MM_HINT_ETNA
#undef _MM_HINT_ET0
#define _MM_HINT_ET0 SIMDE_MM_HINT_ET0
#undef _MM_HINT_ET1
#define _MM_HINT_ET1 SIMDE_MM_HINT_ET1
#undef _MM_HINT_ET1
#define _MM_HINT_ET2 SIMDE_MM_HINT_ET2
HEDLEY_DIAGNOSTIC_POP
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_prefetch (char const* p, int i) {
#if defined(HEDLEY_GCC_VERSION)
__builtin_prefetch(p);
#else
(void) p;
#endif
(void) i;
}
#if defined(SIMDE_X86_SSE_NATIVE)
#if defined(__clang__) && !SIMDE_DETECT_CLANG_VERSION_CHECK(10,0,0) /* https://reviews.llvm.org/D71718 */
#define simde_mm_prefetch(p, i) \
(__extension__({ \
HEDLEY_DIAGNOSTIC_PUSH \
HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL \
_mm_prefetch((p), (i)); \
HEDLEY_DIAGNOSTIC_POP \
}))
#else
#define simde_mm_prefetch(p, i) _mm_prefetch(p, i)
#endif
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_prefetch(p, i) simde_mm_prefetch(p, i)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_negate_ps(simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return simde_mm_xor_ps(a, _mm_set1_ps(SIMDE_FLOAT32_C(-0.0)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) && \
(!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(8,1,0))
r_.altivec_f32 = vec_neg(a_.altivec_f32);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vnegq_f32(a_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_neg(a_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
r_.altivec_f32 = vec_neg(a_.altivec_f32);
#elif defined(SIMDE_VECTOR_NEGATE)
r_.f32 = -a_.f32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = -a_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_rcp_ps (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_rcp_ps(a);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x4_t recip = vrecpeq_f32(a_.neon_f32);
#if SIMDE_ACCURACY_PREFERENCE > 0
for (int i = 0; i < SIMDE_ACCURACY_PREFERENCE ; ++i) {
recip = vmulq_f32(recip, vrecpsq_f32(recip, a_.neon_f32));
}
#endif
r_.neon_f32 = recip;
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_div(simde_mm_set1_ps(1.0f), a_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_re(a_.altivec_f32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.f32 = 1.0f / a_.f32;
#elif defined(SIMDE_IEEE754_STORAGE)
/* https://stackoverflow.com/questions/12227126/division-as-multiply-and-lut-fast-float-division-reciprocal/12228234#12228234 */
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
int32_t ix;
simde_float32 fx = a_.f32[i];
simde_memcpy(&ix, &fx, sizeof(ix));
int32_t x = INT32_C(0x7EF311C3) - ix;
simde_float32 temp;
simde_memcpy(&temp, &x, sizeof(temp));
r_.f32[i] = temp * (SIMDE_FLOAT32_C(2.0) - temp * fx);
}
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = 1.0f / a_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_rcp_ps(a) simde_mm_rcp_ps((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_rcp_ss (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_rcp_ss(a);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_rcp_ps(a));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
r_.f32[0] = 1.0f / a_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_rcp_ss(a) simde_mm_rcp_ss((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_rsqrt_ps (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_rsqrt_ps(a);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vrsqrteq_f32(a_.neon_f32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_rsqrte(a_.altivec_f32);
#elif defined(SIMDE_IEEE754_STORAGE)
/* https://basesandframes.files.wordpress.com/2020/04/even_faster_math_functions_green_2020.pdf
Pages 100 - 103 */
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
#if SIMDE_ACCURACY_PREFERENCE <= 0
r_.i32[i] = INT32_C(0x5F37624F) - (a_.i32[i] >> 1);
#else
simde_float32 x = a_.f32[i];
simde_float32 xhalf = SIMDE_FLOAT32_C(0.5) * x;
int32_t ix;
simde_memcpy(&ix, &x, sizeof(ix));
#if SIMDE_ACCURACY_PREFERENCE == 1
ix = INT32_C(0x5F375A82) - (ix >> 1);
#else
ix = INT32_C(0x5F37599E) - (ix >> 1);
#endif
simde_memcpy(&x, &ix, sizeof(x));
#if SIMDE_ACCURACY_PREFERENCE >= 2
x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x);
#endif
x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x);
r_.f32[i] = x;
#endif
}
#elif defined(simde_math_sqrtf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = 1.0f / simde_math_sqrtf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_rsqrt_ps(a) simde_mm_rsqrt_ps((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_rsqrt_ss (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_rsqrt_ss(a);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_rsqrt_ps(a));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(vgetq_lane_f32(simde_mm_rsqrt_ps(a).neon_f32, 0), a_.neon_f32, 0);
#elif defined(SIMDE_IEEE754_STORAGE)
{
#if SIMDE_ACCURACY_PREFERENCE <= 0
r_.i32[0] = INT32_C(0x5F37624F) - (a_.i32[0] >> 1);
#else
simde_float32 x = a_.f32[0];
simde_float32 xhalf = SIMDE_FLOAT32_C(0.5) * x;
int32_t ix;
simde_memcpy(&ix, &x, sizeof(ix));
#if SIMDE_ACCURACY_PREFERENCE == 1
ix = INT32_C(0x5F375A82) - (ix >> 1);
#else
ix = INT32_C(0x5F37599E) - (ix >> 1);
#endif
simde_memcpy(&x, &ix, sizeof(x));
#if SIMDE_ACCURACY_PREFERENCE >= 2
x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x);
#endif
x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x);
r_.f32[0] = x;
#endif
}
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#elif defined(simde_math_sqrtf)
r_.f32[0] = 1.0f / simde_math_sqrtf(a_.f32[0]);
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_rsqrt_ss(a) simde_mm_rsqrt_ss((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_sad_pu8 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_sad_pu8(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint16x4_t t = vpaddl_u8(vabd_u8(a_.neon_u8, b_.neon_u8));
uint16_t r0 = t[0] + t[1] + t[2] + t[3];
r_.neon_u16 = vset_lane_u16(r0, vdup_n_u16(0), 0);
#else
uint16_t sum = 0;
#if defined(SIMDE_HAVE_STDLIB_H)
SIMDE_VECTORIZE_REDUCTION(+:sum)
for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
sum += HEDLEY_STATIC_CAST(uint8_t, abs(a_.u8[i] - b_.u8[i]));
}
r_.i16[0] = HEDLEY_STATIC_CAST(int16_t, sum);
r_.i16[1] = 0;
r_.i16[2] = 0;
r_.i16[3] = 0;
#else
HEDLEY_UNREACHABLE();
#endif
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_psadbw(a, b) simde_mm_sad_pu8(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sad_pu8(a, b) simde_mm_sad_pu8(a, b)
# define _m_psadbw(a, b) simde_mm_sad_pu8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_set_ss (simde_float32 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_set_ss(a);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vsetq_lane_f32(a, vdupq_n_f32(SIMDE_FLOAT32_C(0.0)), 0);
#else
return simde_mm_set_ps(SIMDE_FLOAT32_C(0.0), SIMDE_FLOAT32_C(0.0), SIMDE_FLOAT32_C(0.0), a);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_set_ss(a) simde_mm_set_ss(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_setr_ps (simde_float32 e3, simde_float32 e2, simde_float32 e1, simde_float32 e0) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_setr_ps(e3, e2, e1, e0);
#else
return simde_mm_set_ps(e0, e1, e2, e3);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_setr_ps(e3, e2, e1, e0) simde_mm_setr_ps(e3, e2, e1, e0)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_setzero_ps (void) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_setzero_ps();
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vdupq_n_f32(SIMDE_FLOAT32_C(0.0));
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
return vec_splats(SIMDE_FLOAT32_C(0.0));
#else
simde__m128 r;
simde_memset(&r, 0, sizeof(r));
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_setzero_ps() simde_mm_setzero_ps()
#endif
#if defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_)
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_undefined_ps (void) {
simde__m128_private r_;
#if defined(SIMDE_HAVE_UNDEFINED128)
r_.n = _mm_undefined_ps();
#elif !defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_)
r_ = simde__m128_to_private(simde_mm_setzero_ps());
#endif
return simde__m128_from_private(r_);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_undefined_ps() simde_mm_undefined_ps()
#endif
#if defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_)
HEDLEY_DIAGNOSTIC_POP
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_setone_ps (void) {
simde__m128 t = simde_mm_setzero_ps();
return simde_mm_cmpeq_ps(t, t);
}
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_sfence (void) {
/* TODO: Use Hedley. */
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_sfence();
#elif defined(__GNUC__) && ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 7))
__atomic_thread_fence(__ATOMIC_SEQ_CST);
#elif !defined(__INTEL_COMPILER) && defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) && !defined(__STDC_NO_ATOMICS__)
#if defined(__GNUC__) && (__GNUC__ == 4) && (__GNUC_MINOR__ < 9)
__atomic_thread_fence(__ATOMIC_SEQ_CST);
#else
atomic_thread_fence(memory_order_seq_cst);
#endif
#elif defined(_MSC_VER)
MemoryBarrier();
#elif HEDLEY_HAS_EXTENSION(c_atomic)
__c11_atomic_thread_fence(__ATOMIC_SEQ_CST);
#elif defined(__GNUC__) && ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1))
__sync_synchronize();
#elif defined(_OPENMP)
#pragma omp critical(simde_mm_sfence_)
{ }
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sfence() simde_mm_sfence()
#endif
#define SIMDE_MM_SHUFFLE(z, y, x, w) (((z) << 6) | ((y) << 4) | ((x) << 2) | (w))
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _MM_SHUFFLE(z, y, x, w) SIMDE_MM_SHUFFLE(z, y, x, w)
#endif
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(__PGI)
# define simde_mm_shuffle_pi16(a, imm8) _mm_shuffle_pi16(a, imm8)
#elif defined(SIMDE_SHUFFLE_VECTOR_)
# define simde_mm_shuffle_pi16(a, imm8) (__extension__ ({ \
const simde__m64_private simde__tmp_a_ = simde__m64_to_private(a); \
simde__m64_from_private((simde__m64_private) { .i16 = \
SIMDE_SHUFFLE_VECTOR_(16, 8, \
(simde__tmp_a_).i16, \
(simde__tmp_a_).i16, \
(((imm8) ) & 3), \
(((imm8) >> 2) & 3), \
(((imm8) >> 4) & 3), \
(((imm8) >> 6) & 3)) }); }))
#else
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_shuffle_pi16 (simde__m64 a, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) {
simde__m64_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
for (size_t i = 0 ; i < sizeof(r_.i16) / sizeof(r_.i16[0]) ; i++) {
r_.i16[i] = a_.i16[(imm8 >> (i * 2)) & 3];
}
HEDLEY_DIAGNOSTIC_PUSH
#if HEDLEY_HAS_WARNING("-Wconditional-uninitialized")
# pragma clang diagnostic ignored "-Wconditional-uninitialized"
#endif
return simde__m64_from_private(r_);
HEDLEY_DIAGNOSTIC_POP
}
#endif
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(__PGI)
# define simde_m_pshufw(a, imm8) _m_pshufw(a, imm8)
#else
# define simde_m_pshufw(a, imm8) simde_mm_shuffle_pi16(a, imm8)
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_shuffle_pi16(a, imm8) simde_mm_shuffle_pi16(a, imm8)
# define _m_pshufw(a, imm8) simde_mm_shuffle_pi16(a, imm8)
#endif
#if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI)
# define simde_mm_shuffle_ps(a, b, imm8) _mm_shuffle_ps(a, b, imm8)
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_mm_shuffle_ps(a, b, imm8) \
__extension__({ \
float32x4_t ret; \
ret = vmovq_n_f32( \
vgetq_lane_f32(a, (imm8) & (0x3))); \
ret = vsetq_lane_f32( \
vgetq_lane_f32(a, ((imm8) >> 2) & 0x3), \
ret, 1); \
ret = vsetq_lane_f32( \
vgetq_lane_f32(b, ((imm8) >> 4) & 0x3), \
ret, 2); \
ret = vsetq_lane_f32( \
vgetq_lane_f32(b, ((imm8) >> 6) & 0x3), \
ret, 3); \
})
#elif defined(SIMDE_SHUFFLE_VECTOR_)
# define simde_mm_shuffle_ps(a, b, imm8) (__extension__ ({ \
simde__m128_from_private((simde__m128_private) { .f32 = \
SIMDE_SHUFFLE_VECTOR_(32, 16, \
simde__m128_to_private(a).f32, \
simde__m128_to_private(b).f32, \
(((imm8) ) & 3), \
(((imm8) >> 2) & 3), \
(((imm8) >> 4) & 3) + 4, \
(((imm8) >> 6) & 3) + 4) }); }))
#else
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_shuffle_ps (simde__m128 a, simde__m128 b, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) {
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.f32[0] = a_.f32[(imm8 >> 0) & 3];
r_.f32[1] = a_.f32[(imm8 >> 2) & 3];
r_.f32[2] = b_.f32[(imm8 >> 4) & 3];
r_.f32[3] = b_.f32[(imm8 >> 6) & 3];
return simde__m128_from_private(r_);
}
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_shuffle_ps(a, b, imm8) simde_mm_shuffle_ps((a), (b), imm8)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_sqrt_ps (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_sqrt_ps(a);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f32 = vsqrtq_f32(a_.neon_f32);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x4_t est = vrsqrteq_f32(a_.neon_f32);
for (int i = 0 ; i <= SIMDE_ACCURACY_PREFERENCE ; i++) {
est = vmulq_f32(vrsqrtsq_f32(vmulq_f32(a_.neon_f32, est), est), est);
}
r_.neon_f32 = vmulq_f32(a_.neon_f32, est);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_sqrt(a_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
r_.altivec_f32 = vec_sqrt(a_.altivec_f32);
#elif defined(simde_math_sqrt)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < sizeof(r_.f32) / sizeof(r_.f32[0]) ; i++) {
r_.f32[i] = simde_math_sqrtf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sqrt_ps(a) simde_mm_sqrt_ps((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_sqrt_ss (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_sqrt_ss(a);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_sqrt_ps(a));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32_t value =
vgetq_lane_f32(simde__m128_to_private(simde_mm_sqrt_ps(a)).neon_f32, 0);
r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0);
#elif defined(simde_math_sqrtf)
r_.f32[0] = simde_math_sqrtf(a_.f32[0]);
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sqrt_ss(a) simde_mm_sqrt_ss((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_store_ps (simde_float32 mem_addr[4], simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_store_ps(mem_addr, a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst1q_f32(mem_addr, a_.neon_f32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
vec_st(a_.altivec_f32, 0, mem_addr);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
wasm_v128_store(mem_addr, a_.wasm_v128);
#else
simde_memcpy(mem_addr, &a_, sizeof(a));
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_store_ps(mem_addr, a) simde_mm_store_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_store1_ps (simde_float32 mem_addr[4], simde__m128 a) {
simde_float32* mem_addr_ = SIMDE_ALIGN_ASSUME_LIKE(mem_addr, simde__m128);
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_store_ps1(mem_addr_, a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst1q_f32(mem_addr_, vdupq_lane_f32(vget_low_f32(a_.neon_f32), 0));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
wasm_v128_store(mem_addr_, wasm_v32x4_shuffle(a_.wasm_v128, a_.wasm_v128, 0, 0, 0, 0));
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
vec_st(vec_splat(a_.altivec_f32, 0), 0, mem_addr_);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
simde__m128_private tmp_;
tmp_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, a_.f32, 0, 0, 0, 0);
simde_mm_store_ps(mem_addr_, tmp_.f32);
#else
SIMDE_VECTORIZE_ALIGNED(mem_addr_:16)
for (size_t i = 0 ; i < sizeof(a_.f32) / sizeof(a_.f32[0]) ; i++) {
mem_addr_[i] = a_.f32[0];
}
#endif
#endif
}
#define simde_mm_store_ps1(mem_addr, a) simde_mm_store1_ps(mem_addr, a)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_store_ps1(mem_addr, a) simde_mm_store1_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
# define _mm_store1_ps(mem_addr, a) simde_mm_store1_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_store_ss (simde_float32* mem_addr, simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_store_ss(mem_addr, a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst1q_lane_f32(mem_addr, a_.neon_f32, 0);
#else
*mem_addr = a_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_store_ss(mem_addr, a) simde_mm_store_ss(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_storeh_pi (simde__m64* mem_addr, simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_storeh_pi(HEDLEY_REINTERPRET_CAST(__m64*, mem_addr), a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst1_f32(HEDLEY_REINTERPRET_CAST(float32_t*, mem_addr), vget_high_f32(a_.neon_f32));
#else
simde_memcpy(mem_addr, &(a_.m64[1]), sizeof(a_.m64[1]));
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_storeh_pi(mem_addr, a) simde_mm_storeh_pi(mem_addr, (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_storel_pi (simde__m64* mem_addr, simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_storel_pi(HEDLEY_REINTERPRET_CAST(__m64*, mem_addr), a);
#else
simde__m64_private* dest_ = HEDLEY_REINTERPRET_CAST(simde__m64_private*, mem_addr);
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
dest_->neon_f32 = vget_low_f32(a_.neon_f32);
#else
dest_->f32[0] = a_.f32[0];
dest_->f32[1] = a_.f32[1];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_storel_pi(mem_addr, a) simde_mm_storel_pi(mem_addr, (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_storer_ps (simde_float32 mem_addr[4], simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_storer_ps(mem_addr, a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
vec_st(vec_reve(a_.altivec_f32), 0, mem_addr);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x4_t tmp = vrev64q_f32(a_.neon_f32);
vst1q_f32(mem_addr, vextq_f32(tmp, tmp, 2));
#elif defined(SIMDE_SHUFFLE_VECTOR_)
a_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, a_.f32, 3, 2, 1, 0);
simde_mm_store_ps(mem_addr, simde__m128_from_private(a_));
#else
SIMDE_VECTORIZE_ALIGNED(mem_addr:16)
for (size_t i = 0 ; i < sizeof(a_.f32) / sizeof(a_.f32[0]) ; i++) {
mem_addr[i] = a_.f32[((sizeof(a_.f32) / sizeof(a_.f32[0])) - 1) - i];
}
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_storer_ps(mem_addr, a) simde_mm_storer_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_storeu_ps (simde_float32 mem_addr[4], simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_storeu_ps(mem_addr, a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst1q_f32(mem_addr, a_.neon_f32);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
vec_vsx_st(a_.altivec_f32, 0, mem_addr);
#else
simde_memcpy(mem_addr, &a_, sizeof(a_));
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_storeu_ps(mem_addr, a) simde_mm_storeu_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_sub_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_sub_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsubq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_sub(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_sub(a_.altivec_f32, b_.altivec_f32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.f32 = a_.f32 - b_.f32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = a_.f32[i] - b_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sub_ps(a, b) simde_mm_sub_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_sub_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_sub_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_sub_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.f32[0] = a_.f32[0] - b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sub_ss(a, b) simde_mm_sub_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomieq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomieq_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_eq_b = vceqq_f32(a_.neon_f32, b_.neon_f32);
r = !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_eq_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] == b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] == b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomieq_ss(a, b) simde_mm_ucomieq_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomige_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomige_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_ge_b = vcgeq_f32(a_.neon_f32, b_.neon_f32);
r = !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_ge_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] >= b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] >= b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomige_ss(a, b) simde_mm_ucomige_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomigt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomigt_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_gt_b = vcgtq_f32(a_.neon_f32, b_.neon_f32);
r = !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_gt_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] > b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] > b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomigt_ss(a, b) simde_mm_ucomigt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomile_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomile_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_le_b = vcleq_f32(a_.neon_f32, b_.neon_f32);
r = !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_le_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] <= b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] <= b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomile_ss(a, b) simde_mm_ucomile_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomilt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomilt_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_lt_b = vcltq_f32(a_.neon_f32, b_.neon_f32);
r = !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_lt_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] < b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] < b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomilt_ss(a, b) simde_mm_ucomilt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomineq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomineq_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_neq_b = vmvnq_u32(vceqq_f32(a_.neon_f32, b_.neon_f32));
r = !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_neq_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] != b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] != b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomineq_ss(a, b) simde_mm_ucomineq_ss((a), (b))
#endif
#if defined(SIMDE_X86_SSE_NATIVE)
# if defined(__has_builtin)
# if __has_builtin(__builtin_ia32_undef128)
# define SIMDE_HAVE_UNDEFINED128
# endif
# elif !defined(__PGI) && !defined(SIMDE_BUG_GCC_REV_208793) && !defined(_MSC_VER)
# define SIMDE_HAVE_UNDEFINED128
# endif
#endif
#if defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_)
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_unpackhi_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_unpackhi_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f32 = vzip2q_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x2_t a1 = vget_high_f32(a_.neon_f32);
float32x2_t b1 = vget_high_f32(b_.neon_f32);
float32x2x2_t result = vzip_f32(a1, b1);
r_.neon_f32 = vcombine_f32(result.val[0], result.val[1]);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 2, 6, 3, 7);
#else
r_.f32[0] = a_.f32[2];
r_.f32[1] = b_.f32[2];
r_.f32[2] = a_.f32[3];
r_.f32[3] = b_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_unpackhi_ps(a, b) simde_mm_unpackhi_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_unpacklo_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_unpacklo_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f32 = vzip1q_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_mergeh(a_.altivec_f32, b_.altivec_f32);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 0, 4, 1, 5);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x2_t a1 = vget_low_f32(a_.neon_f32);
float32x2_t b1 = vget_low_f32(b_.neon_f32);
float32x2x2_t result = vzip_f32(a1, b1);
r_.neon_f32 = vcombine_f32(result.val[0], result.val[1]);
#else
r_.f32[0] = a_.f32[0];
r_.f32[1] = b_.f32[0];
r_.f32[2] = a_.f32[1];
r_.f32[3] = b_.f32[1];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_unpacklo_ps(a, b) simde_mm_unpacklo_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_stream_pi (simde__m64* mem_addr, simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
_mm_stream_pi(HEDLEY_REINTERPRET_CAST(__m64*, mem_addr), a);
#else
simde__m64_private*
dest = HEDLEY_REINTERPRET_CAST(simde__m64_private*, mem_addr),
a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
dest->i64[0] = vget_lane_s64(a_.neon_i64, 0);
#else
dest->i64[0] = a_.i64[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_stream_pi(mem_addr, a) simde_mm_stream_pi(mem_addr, (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_stream_ps (simde_float32 mem_addr[4], simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_stream_ps(mem_addr, a);
#elif HEDLEY_HAS_BUILTIN(__builtin_nontemporal_store) && defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
simde__m128_private a_ = simde__m128_to_private(a);
__builtin_nontemporal_store(a_.f32, SIMDE_ALIGN_CAST(__typeof__(a_.f32)*, mem_addr));
#else
simde_mm_store_ps(mem_addr, a);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_stream_ps(mem_addr, a) simde_mm_stream_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define SIMDE_MM_TRANSPOSE4_PS(row0, row1, row2, row3) \
do { \
float32x4x2_t ROW01 = vtrnq_f32(row0, row1); \
float32x4x2_t ROW23 = vtrnq_f32(row2, row3); \
row0 = vcombine_f32(vget_low_f32(ROW01.val[0]), \
vget_low_f32(ROW23.val[0])); \
row1 = vcombine_f32(vget_low_f32(ROW01.val[1]), \
vget_low_f32(ROW23.val[1])); \
row2 = vcombine_f32(vget_high_f32(ROW01.val[0]), \
vget_high_f32(ROW23.val[0])); \
row3 = vcombine_f32(vget_high_f32(ROW01.val[1]), \
vget_high_f32(ROW23.val[1])); \
} while (0)
#else
#define SIMDE_MM_TRANSPOSE4_PS(row0, row1, row2, row3) \
do { \
simde__m128 tmp3, tmp2, tmp1, tmp0; \
tmp0 = simde_mm_unpacklo_ps((row0), (row1)); \
tmp2 = simde_mm_unpacklo_ps((row2), (row3)); \
tmp1 = simde_mm_unpackhi_ps((row0), (row1)); \
tmp3 = simde_mm_unpackhi_ps((row2), (row3)); \
row0 = simde_mm_movelh_ps(tmp0, tmp2); \
row1 = simde_mm_movehl_ps(tmp2, tmp0); \
row2 = simde_mm_movelh_ps(tmp1, tmp3); \
row3 = simde_mm_movehl_ps(tmp3, tmp1); \
} while (0)
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) SIMDE_MM_TRANSPOSE4_PS(row0, row1, row2, row3)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_SSE_H) */
|
parallel.c | /*
gcc -fopenmp -O2 src/parallel.c -o bin/parallel
export OMP_DYNAMIC=FALSE
export OMP_NUM_THREADS=8
$ ./bin/parallel 9
*/
#include <stdio.h>
#include <stdlib.h>
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_thread_num() 0
#endif
int main(int argc, char **argv) {
int thread;
if(argc < 2) {
fprintf(stderr,"\nFalta no de thread \n");
exit(-1);
}
thread = atoi(argv[1]);
#pragma omp parallel
{
if ( omp_get_thread_num() < thread )
printf(" thread %d realiza la tarea 1\n", omp_get_thread_num());
else
printf(" thread %d realiza la tarea 2\n", omp_get_thread_num());
}
return(0);
} |
utils.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015 by Contributors
* \file utils.h
* \brief Basic utilility functions.
*/
#ifndef MXNET_COMMON_UTILS_H_
#define MXNET_COMMON_UTILS_H_
#include <dmlc/logging.h>
#include <dmlc/omp.h>
#include <nnvm/graph.h>
#include <mxnet/engine.h>
#include <mxnet/ndarray.h>
#include <mxnet/op_attr_types.h>
#include <mxnet/graph_attr_types.h>
#include <nnvm/graph_attr_types.h>
#include <memory>
#include <vector>
#include <type_traits>
#include <utility>
#include <random>
#include <string>
#include <thread>
#include <algorithm>
#include <functional>
#include "../operator/mxnet_op.h"
namespace mxnet {
namespace common {
/*!
* \brief IndPtr should be non-negative, in non-decreasing order, start with 0
* and end with value equal with size of indices.
*/
struct csr_indptr_check {
template<typename DType, typename IType>
MSHADOW_XINLINE static void Map(int i, DType* out, const IType* indptr,
const nnvm::dim_t end, const nnvm::dim_t idx_size) {
if (indptr[i+1] < 0 || indptr[i+1] < indptr[i] ||
(i == 0 && indptr[i] != 0) ||
(i == end - 1 && indptr[end] != idx_size))
*out = kCSRIndPtrErr;
}
};
/*!
* \brief Indices should be non-negative, less than the number of columns
* and in ascending order per row.
*/
struct csr_idx_check {
template<typename DType, typename IType, typename RType>
MSHADOW_XINLINE static void Map(int i, DType* out, const IType* idx,
const RType* indptr, const nnvm::dim_t ncols) {
for (RType j = indptr[i]; j < indptr[i+1]; j++) {
if (idx[j] >= ncols || idx[j] < 0 ||
(j < indptr[i+1] - 1 && idx[j] >= idx[j+1])) {
*out = kCSRIdxErr;
break;
}
}
}
};
/*!
* \brief Indices of RSPNDArray should be non-negative,
* less than the size of first dimension and in ascending order
*/
struct rsp_idx_check {
template<typename DType, typename IType>
MSHADOW_XINLINE static void Map(int i, DType* out, const IType* idx,
const nnvm::dim_t end, const nnvm::dim_t nrows) {
if ((i < end && idx[i+1] <= idx[i])
|| idx[i] < 0 || idx[i] >= nrows)
*out = kRSPIdxErr;
}
};
template<typename xpu>
void CheckFormatWrapper(const RunContext &rctx, const NDArray &input,
const TBlob &err_cpu, const bool full_check);
/*!
* \brief Check the validity of CSRNDArray.
* \param rctx Execution context.
* \param input Input NDArray of CSRStorage.
* \param err_cpu Error number on cpu.
* \param full_check If true, rigorous check, O(N) operations,
* otherwise basic check, O(1) operations.
*/
template<typename xpu>
void CheckFormatCSRImpl(const RunContext &rctx, const NDArray &input,
const TBlob &err_cpu, const bool full_check) {
using namespace op::mxnet_op;
CHECK_EQ(input.storage_type(), kCSRStorage)
<< "CheckFormatCSRImpl is for CSRNDArray";
const TShape shape = input.shape();
const TShape idx_shape = input.aux_shape(csr::kIdx);
const TShape indptr_shape = input.aux_shape(csr::kIndPtr);
const TShape storage_shape = input.storage_shape();
if ((shape.ndim() != 2) ||
(idx_shape.ndim() != 1 || indptr_shape.ndim() != 1 || storage_shape.ndim() != 1) ||
(indptr_shape[0] != shape[0] + 1) ||
(idx_shape[0] != storage_shape[0])) {
MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, {
DType* err = err_cpu.dptr<DType>();
*err = kCSRShapeErr;
});
return;
}
if (full_check) {
MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, {
MSHADOW_IDX_TYPE_SWITCH(input.aux_type(csr::kIndPtr), RType, {
MSHADOW_IDX_TYPE_SWITCH(input.aux_type(csr::kIdx), IType, {
mshadow::Stream<xpu> *s = rctx.get_stream<xpu>();
NDArray ret_xpu = NDArray(mshadow::Shape1(1),
rctx.get_ctx(), false, err_cpu.type_flag_);
TBlob val_xpu = ret_xpu.data();
Kernel<set_to_int<kNormalErr>, xpu>::Launch(s, val_xpu.Size(), val_xpu.dptr<DType>());
Kernel<csr_indptr_check, xpu>::Launch(s, indptr_shape[0] - 1, val_xpu.dptr<DType>(),
input.aux_data(csr::kIndPtr).dptr<RType>(),
indptr_shape[0] - 1, idx_shape[0]);
// no need to check indices if indices are empty
if (idx_shape[0] != 0) {
Kernel<csr_idx_check, xpu>::Launch(s, indptr_shape[0] - 1, val_xpu.dptr<DType>(),
input.aux_data(csr::kIdx).dptr<IType>(),
input.aux_data(csr::kIndPtr).dptr<RType>(), shape[1]);
}
mshadow::Copy(err_cpu.get<cpu, 1, DType>(),
val_xpu.get<xpu, 1, DType>(s), s);
});
});
});
}
}
/*!
* \brief Check the validity of RowSparseNDArray.
* \param rctx Execution context.
* \param input Input NDArray of RowSparseStorage.
* \param err_cpu Error number on cpu.
* \param full_check If true, rigorous check, O(N) operations,
* otherwise basic check, O(1) operations.
*/
template<typename xpu>
void CheckFormatRSPImpl(const RunContext &rctx, const NDArray &input,
const TBlob &err_cpu, const bool full_check) {
using namespace op::mxnet_op;
CHECK_EQ(input.storage_type(), kRowSparseStorage)
<< "CheckFormatRSPImpl is for RSPNDArray";
const TShape idx_shape = input.aux_shape(rowsparse::kIdx);
if (idx_shape[0] != input.storage_shape()[0]) {
MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, {
DType* err = err_cpu.dptr<DType>();
*err = kRSPShapeErr;
});
return;
}
if (idx_shape[0] == 0) {
return;
}
if (full_check) {
MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, {
MSHADOW_IDX_TYPE_SWITCH(input.aux_type(rowsparse::kIdx), IType, {
mshadow::Stream<xpu> *s = rctx.get_stream<xpu>();
NDArray ret_xpu = NDArray(mshadow::Shape1(1),
rctx.get_ctx(), false, err_cpu.type_flag_);
TBlob val_xpu = ret_xpu.data();
Kernel<set_to_int<kNormalErr>, xpu>::Launch(s, val_xpu.Size(), val_xpu.dptr<DType>());
Kernel<rsp_idx_check, xpu>::Launch(s, idx_shape[0],
val_xpu.dptr<DType>(), input.aux_data(rowsparse::kIdx).dptr<IType>(),
idx_shape[0] - 1, input.shape()[0]);
mshadow::Copy(err_cpu.get<cpu, 1, DType>(),
val_xpu.get<xpu, 1, DType>(s), s);
});
});
}
}
template<typename xpu>
void CheckFormatImpl(const RunContext &rctx, const NDArray &input,
const TBlob &err_cpu, const bool full_check) {
int stype = input.storage_type();
if (stype == kCSRStorage) {
CheckFormatCSRImpl<xpu>(rctx, input, err_cpu, full_check);
} else if (stype == kRowSparseStorage) {
CheckFormatRSPImpl<xpu>(rctx, input, err_cpu, full_check);
} else if (stype == kDefaultStorage) {
// no-op for default storage
} else {
LOG(FATAL) << "Unknown storage type " << stype;
}
}
/*! \brief Pick rows specified by user input index array from a row sparse ndarray
* and save them in the output sparse ndarray.
*/
template<typename xpu>
void SparseRetainOpForwardRspWrapper(mshadow::Stream<xpu> *s,
const NDArray& input_nd,
const TBlob& idx_data,
const OpReqType req,
NDArray* output_nd);
/* \brief Casts tensor storage type to the new type.
*/
template<typename xpu>
void CastStorageDispatch(const OpContext& ctx, const NDArray& input, const NDArray& output);
/*! \brief returns true if all storage types in `vstorage` are the same as target `stype`.
* false is returned for empty inputs.
*/
inline bool ContainsOnlyStorage(const StorageTypeVector& vstorage,
const NDArrayStorageType stype) {
if (!vstorage.empty()) {
for (const auto& i : vstorage) {
if (i != stype) return false;
}
return true;
}
return false;
}
/*! \brief returns true if all storage types in `vstorage` are the same as target `stype1`
* or `stype2'. Sets boolean if both found.
* false is returned for empty inputs.
*/
inline bool ContainsOnlyStorage(const StorageTypeVector& vstorage,
const NDArrayStorageType stype1,
const NDArrayStorageType stype2,
bool *has_both) {
if (has_both) {
*has_both = false;
}
if (!vstorage.empty()) {
uint8_t has = 0;
for (const auto i : vstorage) {
if (i == stype1) {
has |= 1;
} else if (i == stype2) {
has |= 2;
} else {
return false;
}
}
if (has_both) {
*has_both = has == 3;
}
return true;
}
return false;
}
/*! \brief returns true if the storage types of arrays in `ndarrays`
* are the same as target `stype`. false is returned for empty inputs.
*/
inline bool ContainsOnlyStorage(const std::vector<NDArray>& ndarrays,
const NDArrayStorageType stype) {
if (!ndarrays.empty()) {
for (const auto& nd : ndarrays) {
if (nd.storage_type() != stype) {
return false;
}
}
return true;
}
return false;
}
/*! \brief returns true if the storage types of arrays in `ndarrays`
* are the same as targets `stype1` or `stype2`. false is returned for empty inputs.
*/
inline bool ContainsOnlyStorage(const std::vector<NDArray>& ndarrays,
const NDArrayStorageType stype1,
const NDArrayStorageType stype2,
bool *has_both) {
if (has_both) {
*has_both = false;
}
if (!ndarrays.empty()) {
uint8_t has = 0;
for (const auto& nd : ndarrays) {
const NDArrayStorageType stype = nd.storage_type();
if (stype == stype1) {
has |= 1;
} else if (stype == stype2) {
has |= 2;
} else {
return false;
}
}
if (has_both) {
*has_both = has == 3;
}
return true;
}
return false;
}
/*! \brief get string representation of dispatch_mode */
inline std::string dispatch_mode_string(const DispatchMode x) {
switch (x) {
case DispatchMode::kFCompute:
return "fcompute";
case DispatchMode::kFComputeEx:
return "fcompute_ex";
case DispatchMode::kFComputeFallback:
return "fcompute_fallback";
case DispatchMode::kVariable:
return "variable";
case DispatchMode::kUndefined:
return "undefined";
}
return "unknown";
}
/*! \brief get string representation of storage_type */
inline std::string stype_string(const int x) {
switch (x) {
case kDefaultStorage:
return "default";
case kCSRStorage:
return "csr";
case kRowSparseStorage:
return "row_sparse";
}
return "unknown";
}
/*! \brief get string representation of device type */
inline std::string dev_type_string(const int dev_type) {
switch (dev_type) {
case Context::kCPU:
return "cpu";
case Context::kGPU:
return "gpu";
case Context::kCPUPinned:
return "cpu_pinned";
case Context::kCPUShared:
return "cpu_shared";
}
return "unknown";
}
/*! \brief get string representation of the operator stypes */
inline std::string operator_stype_string(const nnvm::NodeAttrs& attrs,
const int dev_mask,
const std::vector<int>& in_attrs,
const std::vector<int>& out_attrs) {
std::ostringstream os;
os << "operator = " << attrs.op->name
<< "\ninput storage types = [";
for (const int attr : in_attrs) {
os << stype_string(attr) << ", ";
}
os << "]\n"
<< "output storage types = [";
for (const int attr : out_attrs) {
os << stype_string(attr) << ", ";
}
os << "]\n"
<< "params = {";
for (auto kv : attrs.dict) {
os << "\"" << kv.first << "\" : " << kv.second << ", ";
}
os << "}\n"
<< "context.dev_mask = " << dev_type_string(dev_mask);
return os.str();
}
/*! \brief get string representation of the operator */
inline std::string operator_string(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs) {
std::string result = "";
std::vector<int> in_stypes;
std::vector<int> out_stypes;
in_stypes.reserve(inputs.size());
out_stypes.reserve(outputs.size());
auto xform = [](const NDArray arr) -> int { return arr.storage_type(); };
std::transform(inputs.begin(), inputs.end(), std::back_inserter(in_stypes), xform);
std::transform(outputs.begin(), outputs.end(), std::back_inserter(out_stypes), xform);
result += operator_stype_string(attrs, ctx.run_ctx.ctx.dev_mask(), in_stypes, out_stypes);
return result;
}
/*! \brief log message once. Intended for storage fallback warning messages. */
inline void LogOnce(const std::string& message) {
typedef dmlc::ThreadLocalStore<std::unordered_set<std::string>> LogStore;
auto log_store = LogStore::Get();
if (log_store->find(message) == log_store->end()) {
LOG(INFO) << message;
log_store->insert(message);
}
}
/*! \brief log storage fallback event
*/
inline void LogStorageFallback(const nnvm::NodeAttrs& attrs,
const int dev_mask,
const std::vector<int>* in_attrs,
const std::vector<int>* out_attrs) {
static bool log = dmlc::GetEnv("MXNET_STORAGE_FALLBACK_LOG_VERBOSE", true);
if (!log) return;
const std::string op_str = operator_stype_string(attrs, dev_mask, *in_attrs, *out_attrs);
std::ostringstream os;
const char* warning = "\nThe operator with default storage type will be dispatched "
"for execution. You're seeing this warning message because the operator above is unable "
"to process the given ndarrays with specified storage types, context and parameter. "
"Temporary dense ndarrays are generated in order to execute the operator. "
"This does not affect the correctness of the programme. "
"You can set environment variable MXNET_STORAGE_FALLBACK_LOG_VERBOSE to "
"0 to suppress this warning.";
os << "\nStorage type fallback detected:\n" << op_str << warning;
LogOnce(os.str());
}
// heuristic to dermine number of threads per GPU
inline int GetNumThreadsPerGPU() {
// This is resource efficient option.
return dmlc::GetEnv("MXNET_GPU_WORKER_NTHREADS", 2);
}
// heuristic to get number of matching colors.
// this decides how much parallelism we can get in each GPU.
inline int GetExecNumMatchColor() {
// This is resource efficient option.
int num_match_color = dmlc::GetEnv("MXNET_EXEC_NUM_TEMP", 1);
return std::min(num_match_color, GetNumThreadsPerGPU());
}
template<typename T, typename V>
V ParallelAccumulate(const T* a, const int n, V start) {
V sum = start;
#pragma omp parallel for reduction(+:sum)
for (int i = 0; i < n; ++i) {
sum += a[i];
}
return sum;
}
/*!
* \brief
* Helper function for ParallelSort.
* DO NOT call this function directly.
* Use the interface ParallelSort instead.
* Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h
*/
template<typename RandomIt, typename Compare>
void ParallelSortHelper(RandomIt first, size_t len,
size_t grainsize, const Compare& comp) {
if (len < grainsize) {
std::sort(first, first+len, comp);
} else {
std::thread thr(ParallelSortHelper<RandomIt, Compare>, first, len/2, grainsize, comp);
ParallelSortHelper(first+len/2, len - len/2, grainsize, comp);
thr.join();
std::inplace_merge(first, first+len/2, first+len, comp);
}
}
/*!
* \brief
* Sort the elements in the range [first, last) into the ascending order defined by
* the comparator comp.
* If the length of the range [first, last) is greater than a certain threshold,
* the range will be recursively divided into two and assign two threads
* to sort each half range.
* Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h
*/
template<typename RandomIt, typename Compare>
void ParallelSort(RandomIt first, RandomIt last, size_t num_threads, Compare comp) {
const auto num = std::distance(first, last);
size_t grainsize = std::max(num / num_threads + 5, static_cast<size_t>(1024*16));
ParallelSortHelper(first, num, grainsize, comp);
}
/*!
* \brief
* Sort the elements in the range [first, last) into ascending order.
* The elements are compared using the default < operator.
* If the length of the range [first, last) is greater than a certain threshold,
* the range will be recursively divided into two and assign two threads
* to sort each half range.
* Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h
*/
template<typename RandomIt>
void ParallelSort(RandomIt first, RandomIt last, size_t num_threads) {
ParallelSort(first, last, num_threads,
std::less<typename std::iterator_traits<RandomIt>::value_type>());
}
/*!
* \brief Random Engine
*/
typedef std::mt19937 RANDOM_ENGINE;
/*!
* \brief Helper functions.
*/
namespace helper {
/*!
* \brief Helper for non-array type `T`.
*/
template <class T>
struct UniqueIf {
/*!
* \brief Type of `T`.
*/
using SingleObject = std::unique_ptr<T>;
};
/*!
* \brief Helper for an array of unknown bound `T`.
*/
template <class T>
struct UniqueIf<T[]> {
/*!
* \brief Type of `T`.
*/
using UnknownBound = std::unique_ptr<T[]>;
};
/*!
* \brief Helper for an array of known bound `T`.
*/
template <class T, size_t kSize>
struct UniqueIf<T[kSize]> {
/*!
* \brief Type of `T`.
*/
using KnownBound = void;
};
} // namespace helper
/*!
* \brief Constructs an object of type `T` and wraps it in a
* `std``::``unique_ptr`.
* \param args List of arguments with which an instance of `T` will be
* constructed.
* \return `std``::``unique_ptr` of an instance of type `T`.
*
* Constructs a non-array type `T`. The arguments `args` are passed to the
* constructor of `T`. The function does not participate in the overload
* resolution if `T` is an array type.
*/
template <class T, class... Args>
typename helper::UniqueIf<T>::SingleObject MakeUnique(Args&&... args) {
return std::unique_ptr<T>(new T(std::forward<Args>(args)...));
}
/*!
* \brief Constructs an object of type `T` and wraps it in a
* `std``::``unique_ptr`.
* \param n The size of the array to construct.
* \return `std``::``unique_ptr` of an instance of type `T`.
*
* Constructs an array of unknown bound `T`. The function does not participate
* in the overload resolution unless `T` is an array of unknown bound.
*/
template <class T>
typename helper::UniqueIf<T>::UnknownBound MakeUnique(size_t n) {
using U = typename std::remove_extent<T>::type;
return std::unique_ptr<T>(new U[n]{});
}
/*!
* \brief Constructs an object of type `T` and wraps it in a
* `std``::``unique_ptr`.
* \param args List of arguments with which an instance of `T` will be
* constructed.
*
* Constructs an arrays of known bound is disallowed.
*/
template <class T, class... Args>
typename helper::UniqueIf<T>::KnownBound MakeUnique(Args&&... args) = delete;
template<typename FCompType>
FCompType GetFCompute(const nnvm::Op* op, const std::string& name,
const Context& ctx) {
static auto& fcompute_cpu = nnvm::Op::GetAttr<FCompType>(name + "<cpu>");
static auto& fcompute_gpu = nnvm::Op::GetAttr<FCompType>(name + "<gpu>");
if (ctx.dev_mask() == cpu::kDevMask) {
return fcompute_cpu.get(op, nullptr);
} else if (ctx.dev_mask() == gpu::kDevMask) {
return fcompute_gpu.get(op, nullptr);
} else {
LOG(FATAL) << "Unknown device mask";
return nullptr;
}
}
} // namespace common
} // namespace mxnet
#endif // MXNET_COMMON_UTILS_H_
|
pwsafe_fmt_plug.c | /* Password Safe and Password Gorilla cracker patch for JtR. Hacked together
* during May of 2012 by Dhiru Kholia <dhiru.kholia at gmail.com>.
*
* Optimization patch during January of 2013 by Brian Wallace <brian.wallace9809 at gmail.com>.
*
* This software is Copyright (c) 2012-2013
* Dhiru Kholia <dhiru.kholia at gmail.com> and Brian Wallace <brian.wallace9809 at gmail.com>
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification, are permitted.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_pwsafe;
#elif FMT_REGISTERS_H
john_register_one(&fmt_pwsafe);
#else
#include <string.h>
#include <assert.h>
#include <errno.h>
#include "arch.h"
//#undef SIMD_COEF_32
#include "sha2.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#include "johnswap.h"
#include "simd-intrinsics.h"
#ifdef _OPENMP
static int omp_t = 1;
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 1 // tuned on core i7
#endif
#endif
#include "memdbg.h"
#define FORMAT_LABEL "pwsafe"
#define FORMAT_NAME "Password Safe"
#define FORMAT_TAG "$pwsafe$*"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#define ALGORITHM_NAME "SHA256 " SHA256_ALGORITHM_NAME
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 125
#define BINARY_SIZE 32
#define SALT_SIZE sizeof(struct custom_salt)
#define BINARY_ALIGN sizeof(uint32_t)
#define SALT_ALIGN sizeof(int)
#ifdef SIMD_COEF_32
#define GETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + (3-((i)&3)) + (unsigned int)index/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32*4 )
#define MIN_KEYS_PER_CRYPT (SIMD_COEF_32*SIMD_PARA_SHA256)
#define MAX_KEYS_PER_CRYPT (SIMD_COEF_32*SIMD_PARA_SHA256)
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
static struct fmt_tests pwsafe_tests[] = {
{"$pwsafe$*3*fefc1172093344c9d5577b25f5b4b6e5d2942c94f9fc24c21733e28ae6527521*2048*88cbaf7d8668c1a98263f5dce7cb39c3304c49a3e0d76a7ea475dc02ab2f97a7", "12345678"},
{"$pwsafe$*3*581cd1135b9b993ccb0f6b01c1fcfacd799c69960496c96286f94fe1400c1b25*2048*4ab3c2d3af251e94eb2f753fdf30fb9da074bec6bac0fa9d9d152b95fc5795c6", "openwall"},
{"$pwsafe$*3*34ba0066d0fc594c126b60b9db98b6024e1cf585901b81b5b005ce386f173d4c*2048*cc86f1a5d930ff19b3602770a86586b5d9dea7bb657012aca875aa2a7dc71dc0", "12345678901234567890123"},
{"$pwsafe$*3*a42431191707895fb8d1121a3a6e255e33892d8eecb50fc616adab6185b5affb*2048*0f71d12df2b7c5394ae90771f6475a7ad0437007a8eeb5d9b58e35d8fd57c827", "123456789012345678901234567"},
{"$pwsafe$*3*c380dee0dbb536f5454f78603b020be76b33e294e9c2a0e047f43b9c61669fc8*2048*e88ed54a85e419d555be219d200563ae3ba864e24442826f412867fc0403917d", "this is an 87 character password to test the max bound of pwsafe-opencl................"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static uint32_t (*crypt_out)[BINARY_SIZE / sizeof(uint32_t)];
static struct custom_salt {
int version;
unsigned int iterations;
unsigned char salt[32];
} *cur_salt;
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt);
crypt_out = mem_calloc(sizeof(*crypt_out), self->params.max_keys_per_crypt);
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
// format $pwsafe$version*salt*iterations*hash
char *p;
char *ctcopy;
char *keeptr;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN) != 0)
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += FORMAT_TAG_LEN; /* skip over "$pwsafe$*" */
if ((p = strtokm(ctcopy, "*")) == NULL) /* version */
goto err;
if (!isdec(p))
goto err;
if (!atoi(p))
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* salt */
goto err;
if (strlen(p) < 64)
goto err;
if (strspn(p, HEXCHARS_lc) != 64)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* iterations */
goto err;
if (!isdec(p))
goto err;
if (!atoi(p))
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* hash */
goto err;
if (strlen(p) != 64)
goto err;
if (strspn(p, HEXCHARS_lc) != 64)
goto err;
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
char *p;
int i;
static struct custom_salt cs;
ctcopy += FORMAT_TAG_LEN; /* skip over "$pwsafe$*" */
p = strtokm(ctcopy, "*");
cs.version = atoi(p);
p = strtokm(NULL, "*");
for (i = 0; i < 32; i++)
cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "*");
cs.iterations = (unsigned int)atoi(p);
MEM_FREE(keeptr);
return (void *)&cs;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i;
p = strrchr(ciphertext, '*') + 1;
for (i = 0; i < BINARY_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
#ifndef SIMD_COEF_32
#define rotl(x,y) ( x<<y | x>>(32-y) )
#define rotr(x,y) ( x>>y | x<<(32-y) )
#define CHOICE(x,y,z) ( z ^ (x & ( y ^ z)) )
#define MAJORITY(x,y,z) ( (x & y) | (z & (x | y)) )
#define ROTXOR1(x) (rotr(x,2) ^ rotr(x,13) ^ rotr(x,22))
#define ROTXOR2(x) (rotr(x,6) ^ rotr(x,11) ^ rotr(x,25))
#define ROTXOR3(x) (rotr(x,7) ^ rotr(x,18) ^ (x>>3))
#define ROTXOR4(x) (rotr(x,17) ^ rotr(x,19) ^ (x>>10))
#if ARCH_LITTLE_ENDIAN
#define bytereverse(x) ( ((x) << 24) | (((x) << 8) & 0x00ff0000) | (((x) >> 8) & 0x0000ff00) | ((x) >> 24) )
#else
#define bytereverse(x) (x)
#endif
static void pwsafe_sha256_iterate(unsigned int * state, unsigned int iterations)
{
unsigned int word00,word01,word02,word03,word04,word05,word06,word07;
unsigned int word08,word09,word10,word11,word12,word13,word14,word15;
unsigned int temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7;
iterations++;
word00 = state[0];
word01 = state[1];
word02 = state[2];
word03 = state[3];
word04 = state[4];
word05 = state[5];
word06 = state[6];
word07 = state[7];
while(iterations)
{
iterations--;
temp0 = 0x6a09e667UL;
temp1 = 0xbb67ae85UL;
temp2 = 0x3c6ef372UL;
temp3 = 0xa54ff53aUL;
temp4 = 0x510e527fUL;
temp5 = 0x9b05688cUL;
temp6 = 0x1f83d9abUL;
temp7 = 0x5be0cd19UL;
temp7 += ROTXOR2( temp4 ) + CHOICE( temp4, temp5, temp6 ) + 0x428a2f98 + (word00);
temp3 += temp7;
temp7 += ROTXOR1( temp0 ) + MAJORITY( temp0, temp1, temp2 );
temp6 += ROTXOR2( temp3 ) + CHOICE( temp3, temp4, temp5 ) + 0x71374491 + (word01);
temp2 += temp6;
temp6 += ROTXOR1( temp7 ) + MAJORITY( temp7, temp0, temp1 );
temp5 += ROTXOR2( temp2 ) + CHOICE( temp2, temp3, temp4 ) + 0xb5c0fbcf + (word02);
temp1 += temp5;
temp5 += ROTXOR1( temp6 ) + MAJORITY( temp6, temp7, temp0 );
temp4 += ROTXOR2( temp1 ) + CHOICE( temp1, temp2, temp3 ) + 0xe9b5dba5 + (word03);
temp0 += temp4;
temp4 += ROTXOR1( temp5 ) + MAJORITY( temp5, temp6, temp7 );
temp3 += ROTXOR2( temp0 ) + CHOICE( temp0, temp1, temp2 ) + 0x3956c25b + (word04);
temp7 += temp3;
temp3 += ROTXOR1( temp4 ) + MAJORITY( temp4, temp5, temp6 );
temp2 += ROTXOR2( temp7 ) + CHOICE( temp7, temp0, temp1 ) + 0x59f111f1 + (word05);
temp6 += temp2;
temp2 += ROTXOR1( temp3 ) + MAJORITY( temp3, temp4, temp5 );
temp1 += ROTXOR2( temp6 ) + CHOICE( temp6, temp7, temp0 ) + 0x923f82a4 + (word06);
temp5 += temp1;
temp1 += ROTXOR1( temp2 ) + MAJORITY( temp2, temp3, temp4 );
temp0 += ROTXOR2( temp5 ) + CHOICE( temp5, temp6, temp7 ) + 0xab1c5ed5 + (word07);
temp4 += temp0;
temp0 += ROTXOR1( temp1 ) + MAJORITY( temp1, temp2, temp3 );
temp7 += ROTXOR2( temp4 ) + CHOICE( temp4, temp5, temp6 ) + 0xd807aa98 + ( (word08 = 0x80000000U) );
temp3 += temp7;
temp7 += ROTXOR1( temp0 ) + MAJORITY( temp0, temp1, temp2 );
temp6 += ROTXOR2( temp3 ) + CHOICE( temp3, temp4, temp5 ) + 0x12835b01 + ( (word09 = 0) );
temp2 += temp6;
temp6 += ROTXOR1( temp7 ) + MAJORITY( temp7, temp0, temp1 );
temp5 += ROTXOR2( temp2 ) + CHOICE( temp2, temp3, temp4 ) + 0x243185be + ( (word10 = 0) );
temp1 += temp5;
temp5 += ROTXOR1( temp6 ) + MAJORITY( temp6, temp7, temp0 );
temp4 += ROTXOR2( temp1 ) + CHOICE( temp1, temp2, temp3 ) + 0x550c7dc3 + ( (word11 = 0) );
temp0 += temp4;
temp4 += ROTXOR1( temp5 ) + MAJORITY( temp5, temp6, temp7 );
temp3 += ROTXOR2( temp0 ) + CHOICE( temp0, temp1, temp2 ) + 0x72be5d74 + ( (word12 = 0) );
temp7 += temp3;
temp3 += ROTXOR1( temp4 ) + MAJORITY( temp4, temp5, temp6 );
temp2 += ROTXOR2( temp7 ) + CHOICE( temp7, temp0, temp1 ) + 0x80deb1fe + ( (word13 = 0) );
temp6 += temp2;
temp2 += ROTXOR1( temp3 ) + MAJORITY( temp3, temp4, temp5 );
temp1 += ROTXOR2( temp6 ) + CHOICE( temp6, temp7, temp0 ) + 0x9bdc06a7 + ( (word14 = 0) );
temp5 += temp1;
temp1 += ROTXOR1( temp2 ) + MAJORITY( temp2, temp3, temp4 );
temp0 += ROTXOR2( temp5 ) + CHOICE( temp5, temp6, temp7 ) + 0xc19bf174 + ( (word15 = 256) );
temp4 += temp0;
temp0 += ROTXOR1( temp1 ) + MAJORITY( temp1, temp2, temp3 );
temp7 += ROTXOR2( temp4 ) + CHOICE( temp4, temp5, temp6 ) + 0xe49b69c1 + ( (word00 += ROTXOR4( word14 ) + word09 + ROTXOR3( word01 ) ) );
temp3 += temp7;
temp7 += ROTXOR1( temp0 ) + MAJORITY( temp0, temp1, temp2 );
temp6 += ROTXOR2( temp3 ) + CHOICE( temp3, temp4, temp5 ) + 0xefbe4786 + ( (word01 += ROTXOR4( word15 ) + word10 + ROTXOR3( word02 ) ) );
temp2 += temp6;
temp6 += ROTXOR1( temp7 ) + MAJORITY( temp7, temp0, temp1 );
temp5 += ROTXOR2( temp2 ) + CHOICE( temp2, temp3, temp4 ) + 0x0fc19dc6 + ( (word02 += ROTXOR4( word00 ) + word11 + ROTXOR3( word03 ) ) );
temp1 += temp5;
temp5 += ROTXOR1( temp6 ) + MAJORITY( temp6, temp7, temp0 );
temp4 += ROTXOR2( temp1 ) + CHOICE( temp1, temp2, temp3 ) + 0x240ca1cc + ( (word03 += ROTXOR4( word01 ) + word12 + ROTXOR3( word04 ) ) );
temp0 += temp4;
temp4 += ROTXOR1( temp5 ) + MAJORITY( temp5, temp6, temp7 );
temp3 += ROTXOR2( temp0 ) + CHOICE( temp0, temp1, temp2 ) + 0x2de92c6f + ( (word04 += ROTXOR4( word02 ) + word13 + ROTXOR3( word05 ) ) );
temp7 += temp3;
temp3 += ROTXOR1( temp4 ) + MAJORITY( temp4, temp5, temp6 );
temp2 += ROTXOR2( temp7 ) + CHOICE( temp7, temp0, temp1 ) + 0x4a7484aa + ( (word05 += ROTXOR4( word03 ) + word14 + ROTXOR3( word06 ) ) );
temp6 += temp2;
temp2 += ROTXOR1( temp3 ) + MAJORITY( temp3, temp4, temp5 );
temp1 += ROTXOR2( temp6 ) + CHOICE( temp6, temp7, temp0 ) + 0x5cb0a9dc + ( (word06 += ROTXOR4( word04 ) + word15 + ROTXOR3( word07 ) ) );
temp5 += temp1;
temp1 += ROTXOR1( temp2 ) + MAJORITY( temp2, temp3, temp4 );
temp0 += ROTXOR2( temp5 ) + CHOICE( temp5, temp6, temp7 ) + 0x76f988da + ( (word07 += ROTXOR4( word05 ) + word00 + ROTXOR3( word08 ) ) );
temp4 += temp0;
temp0 += ROTXOR1( temp1 ) + MAJORITY( temp1, temp2, temp3 );
temp7 += ROTXOR2( temp4 ) + CHOICE( temp4, temp5, temp6 ) + 0x983e5152 + ( (word08 += ROTXOR4( word06 ) + word01 + ROTXOR3( word09 ) ) );
temp3 += temp7;
temp7 += ROTXOR1( temp0 ) + MAJORITY( temp0, temp1, temp2 );
temp6 += ROTXOR2( temp3 ) + CHOICE( temp3, temp4, temp5 ) + 0xa831c66d + ( (word09 += ROTXOR4( word07 ) + word02 + ROTXOR3( word10 ) ) );
temp2 += temp6;
temp6 += ROTXOR1( temp7 ) + MAJORITY( temp7, temp0, temp1 );
temp5 += ROTXOR2( temp2 ) + CHOICE( temp2, temp3, temp4 ) + 0xb00327c8 + ( (word10 += ROTXOR4( word08 ) + word03 + ROTXOR3( word11 ) ) );
temp1 += temp5;
temp5 += ROTXOR1( temp6 ) + MAJORITY( temp6, temp7, temp0 );
temp4 += ROTXOR2( temp1 ) + CHOICE( temp1, temp2, temp3 ) + 0xbf597fc7 + ( (word11 += ROTXOR4( word09 ) + word04 + ROTXOR3( word12 ) ) );
temp0 += temp4;
temp4 += ROTXOR1( temp5 ) + MAJORITY( temp5, temp6, temp7 );
temp3 += ROTXOR2( temp0 ) + CHOICE( temp0, temp1, temp2 ) + 0xc6e00bf3 + ( (word12 += ROTXOR4( word10 ) + word05 + ROTXOR3( word13 ) ) );
temp7 += temp3;
temp3 += ROTXOR1( temp4 ) + MAJORITY( temp4, temp5, temp6 );
temp2 += ROTXOR2( temp7 ) + CHOICE( temp7, temp0, temp1 ) + 0xd5a79147 + ( (word13 += ROTXOR4( word11 ) + word06 + ROTXOR3( word14 ) ) );
temp6 += temp2;
temp2 += ROTXOR1( temp3 ) + MAJORITY( temp3, temp4, temp5 );
temp1 += ROTXOR2( temp6 ) + CHOICE( temp6, temp7, temp0 ) + 0x06ca6351 + ( (word14 += ROTXOR4( word12 ) + word07 + ROTXOR3( word15 ) ) );
temp5 += temp1;
temp1 += ROTXOR1( temp2 ) + MAJORITY( temp2, temp3, temp4 );
temp0 += ROTXOR2( temp5 ) + CHOICE( temp5, temp6, temp7 ) + 0x14292967 + ( (word15 += ROTXOR4( word13 ) + word08 + ROTXOR3( word00 ) ) );
temp4 += temp0;
temp0 += ROTXOR1( temp1 ) + MAJORITY( temp1, temp2, temp3 );
temp7 += ROTXOR2( temp4 ) + CHOICE( temp4, temp5, temp6 ) + 0x27b70a85 + ( (word00 += ROTXOR4( word14 ) + word09 + ROTXOR3( word01 ) ) );
temp3 += temp7;
temp7 += ROTXOR1( temp0 ) + MAJORITY( temp0, temp1, temp2 );
temp6 += ROTXOR2( temp3 ) + CHOICE( temp3, temp4, temp5 ) + 0x2e1b2138 + ( (word01 += ROTXOR4( word15 ) + word10 + ROTXOR3( word02 ) ) );
temp2 += temp6;
temp6 += ROTXOR1( temp7 ) + MAJORITY( temp7, temp0, temp1 );
temp5 += ROTXOR2( temp2 ) + CHOICE( temp2, temp3, temp4 ) + 0x4d2c6dfc + ( (word02 += ROTXOR4( word00 ) + word11 + ROTXOR3( word03 ) ) );
temp1 += temp5;
temp5 += ROTXOR1( temp6 ) + MAJORITY( temp6, temp7, temp0 );
temp4 += ROTXOR2( temp1 ) + CHOICE( temp1, temp2, temp3 ) + 0x53380d13 + ( (word03 += ROTXOR4( word01 ) + word12 + ROTXOR3( word04 ) ) );
temp0 += temp4;
temp4 += ROTXOR1( temp5 ) + MAJORITY( temp5, temp6, temp7 );
temp3 += ROTXOR2( temp0 ) + CHOICE( temp0, temp1, temp2 ) + 0x650a7354 + ( (word04 += ROTXOR4( word02 ) + word13 + ROTXOR3( word05 ) ) );
temp7 += temp3;
temp3 += ROTXOR1( temp4 ) + MAJORITY( temp4, temp5, temp6 );
temp2 += ROTXOR2( temp7 ) + CHOICE( temp7, temp0, temp1 ) + 0x766a0abb + ( (word05 += ROTXOR4( word03 ) + word14 + ROTXOR3( word06 ) ) );
temp6 += temp2;
temp2 += ROTXOR1( temp3 ) + MAJORITY( temp3, temp4, temp5 );
temp1 += ROTXOR2( temp6 ) + CHOICE( temp6, temp7, temp0 ) + 0x81c2c92e + ( (word06 += ROTXOR4( word04 ) + word15 + ROTXOR3( word07 ) ) );
temp5 += temp1;
temp1 += ROTXOR1( temp2 ) + MAJORITY( temp2, temp3, temp4 );
temp0 += ROTXOR2( temp5 ) + CHOICE( temp5, temp6, temp7 ) + 0x92722c85 + ( (word07 += ROTXOR4( word05 ) + word00 + ROTXOR3( word08 ) ) );
temp4 += temp0;
temp0 += ROTXOR1( temp1 ) + MAJORITY( temp1, temp2, temp3 );
temp7 += ROTXOR2( temp4 ) + CHOICE( temp4, temp5, temp6 ) + 0xa2bfe8a1 + ( (word08 += ROTXOR4( word06 ) + word01 + ROTXOR3( word09 ) ) );
temp3 += temp7;
temp7 += ROTXOR1( temp0 ) + MAJORITY( temp0, temp1, temp2 );
temp6 += ROTXOR2( temp3 ) + CHOICE( temp3, temp4, temp5 ) + 0xa81a664b + ( (word09 += ROTXOR4( word07 ) + word02 + ROTXOR3( word10 ) ) );
temp2 += temp6;
temp6 += ROTXOR1( temp7 ) + MAJORITY( temp7, temp0, temp1 );
temp5 += ROTXOR2( temp2 ) + CHOICE( temp2, temp3, temp4 ) + 0xc24b8b70 + ( (word10 += ROTXOR4( word08 ) + word03 + ROTXOR3( word11 ) ) );
temp1 += temp5;
temp5 += ROTXOR1( temp6 ) + MAJORITY( temp6, temp7, temp0 );
temp4 += ROTXOR2( temp1 ) + CHOICE( temp1, temp2, temp3 ) + 0xc76c51a3 + ( (word11 += ROTXOR4( word09 ) + word04 + ROTXOR3( word12 ) ) );
temp0 += temp4;
temp4 += ROTXOR1( temp5 ) + MAJORITY( temp5, temp6, temp7 );
temp3 += ROTXOR2( temp0 ) + CHOICE( temp0, temp1, temp2 ) + 0xd192e819 + ( (word12 += ROTXOR4( word10 ) + word05 + ROTXOR3( word13 ) ) );
temp7 += temp3;
temp3 += ROTXOR1( temp4 ) + MAJORITY( temp4, temp5, temp6 );
temp2 += ROTXOR2( temp7 ) + CHOICE( temp7, temp0, temp1 ) + 0xd6990624 + ( (word13 += ROTXOR4( word11 ) + word06 + ROTXOR3( word14 ) ) );
temp6 += temp2;
temp2 += ROTXOR1( temp3 ) + MAJORITY( temp3, temp4, temp5 );
temp1 += ROTXOR2( temp6 ) + CHOICE( temp6, temp7, temp0 ) + 0xf40e3585 + ( (word14 += ROTXOR4( word12 ) + word07 + ROTXOR3( word15 ) ) );
temp5 += temp1;
temp1 += ROTXOR1( temp2 ) + MAJORITY( temp2, temp3, temp4 );
temp0 += ROTXOR2( temp5 ) + CHOICE( temp5, temp6, temp7 ) + 0x106aa070 + ( (word15 += ROTXOR4( word13 ) + word08 + ROTXOR3( word00 ) ) );
temp4 += temp0;
temp0 += ROTXOR1( temp1 ) + MAJORITY( temp1, temp2, temp3 );
temp7 += ROTXOR2( temp4 ) + CHOICE( temp4, temp5, temp6 ) + 0x19a4c116 + ( (word00 += ROTXOR4( word14 ) + word09 + ROTXOR3( word01 ) ) );
temp3 += temp7;
temp7 += ROTXOR1( temp0 ) + MAJORITY( temp0, temp1, temp2 );
temp6 += ROTXOR2( temp3 ) + CHOICE( temp3, temp4, temp5 ) + 0x1e376c08 + ( (word01 += ROTXOR4( word15 ) + word10 + ROTXOR3( word02 ) ) );
temp2 += temp6;
temp6 += ROTXOR1( temp7 ) + MAJORITY( temp7, temp0, temp1 );
temp5 += ROTXOR2( temp2 ) + CHOICE( temp2, temp3, temp4 ) + 0x2748774c + ( (word02 += ROTXOR4( word00 ) + word11 + ROTXOR3( word03 ) ) );
temp1 += temp5;
temp5 += ROTXOR1( temp6 ) + MAJORITY( temp6, temp7, temp0 );
temp4 += ROTXOR2( temp1 ) + CHOICE( temp1, temp2, temp3 ) + 0x34b0bcb5 + ( (word03 += ROTXOR4( word01 ) + word12 + ROTXOR3( word04 ) ) );
temp0 += temp4;
temp4 += ROTXOR1( temp5 ) + MAJORITY( temp5, temp6, temp7 );
temp3 += ROTXOR2( temp0 ) + CHOICE( temp0, temp1, temp2 ) + 0x391c0cb3 + ( (word04 += ROTXOR4( word02 ) + word13 + ROTXOR3( word05 ) ) );
temp7 += temp3;
temp3 += ROTXOR1( temp4 ) + MAJORITY( temp4, temp5, temp6 );
temp2 += ROTXOR2( temp7 ) + CHOICE( temp7, temp0, temp1 ) + 0x4ed8aa4a + ( (word05 += ROTXOR4( word03 ) + word14 + ROTXOR3( word06 ) ) );
temp6 += temp2;
temp2 += ROTXOR1( temp3 ) + MAJORITY( temp3, temp4, temp5 );
temp1 += ROTXOR2( temp6 ) + CHOICE( temp6, temp7, temp0 ) + 0x5b9cca4f + ( (word06 += ROTXOR4( word04 ) + word15 + ROTXOR3( word07 ) ) );
temp5 += temp1;
temp1 += ROTXOR1( temp2 ) + MAJORITY( temp2, temp3, temp4 );
temp0 += ROTXOR2( temp5 ) + CHOICE( temp5, temp6, temp7 ) + 0x682e6ff3 + ( (word07 += ROTXOR4( word05 ) + word00 + ROTXOR3( word08 ) ) );
temp4 += temp0;
temp0 += ROTXOR1( temp1 ) + MAJORITY( temp1, temp2, temp3 );
temp7 += ROTXOR2( temp4 ) + CHOICE( temp4, temp5, temp6 ) + 0x748f82ee + ( (word08 += ROTXOR4( word06 ) + word01 + ROTXOR3( word09 ) ) );
temp3 += temp7;
temp7 += ROTXOR1( temp0 ) + MAJORITY( temp0, temp1, temp2 );
temp6 += ROTXOR2( temp3 ) + CHOICE( temp3, temp4, temp5 ) + 0x78a5636f + ( (word09 += ROTXOR4( word07 ) + word02 + ROTXOR3( word10 ) ) );
temp2 += temp6;
temp6 += ROTXOR1( temp7 ) + MAJORITY( temp7, temp0, temp1 );
temp5 += ROTXOR2( temp2 ) + CHOICE( temp2, temp3, temp4 ) + 0x84c87814 + ( (word10 += ROTXOR4( word08 ) + word03 + ROTXOR3( word11 ) ) );
temp1 += temp5;
temp5 += ROTXOR1( temp6 ) + MAJORITY( temp6, temp7, temp0 );
temp4 += ROTXOR2( temp1 ) + CHOICE( temp1, temp2, temp3 ) + 0x8cc70208 + ( (word11 += ROTXOR4( word09 ) + word04 + ROTXOR3( word12 ) ) );
temp0 += temp4;
temp4 += ROTXOR1( temp5 ) + MAJORITY( temp5, temp6, temp7 );
temp3 += ROTXOR2( temp0 ) + CHOICE( temp0, temp1, temp2 ) + 0x90befffa + ( (word12 += ROTXOR4( word10 ) + word05 + ROTXOR3( word13 ) ) );
temp7 += temp3;
temp3 += ROTXOR1( temp4 ) + MAJORITY( temp4, temp5, temp6 );
temp2 += ROTXOR2( temp7 ) + CHOICE( temp7, temp0, temp1 ) + 0xa4506ceb + ( (word13 += ROTXOR4( word11 ) + word06 + ROTXOR3( word14 ) ) );
temp6 += temp2;
temp2 += ROTXOR1( temp3 ) + MAJORITY( temp3, temp4, temp5 );
temp1 += ROTXOR2( temp6 ) + CHOICE( temp6, temp7, temp0 ) + 0xbef9a3f7 + ( (word14 += ROTXOR4( word12 ) + word07 + ROTXOR3( word15 ) ) );
temp5 += temp1;
temp1 += ROTXOR1( temp2 ) + MAJORITY( temp2, temp3, temp4 );
temp0 += ROTXOR2( temp5 ) + CHOICE( temp5, temp6, temp7 ) + 0xc67178f2 + ( (word15 += ROTXOR4( word13 ) + word08 + ROTXOR3( word00 ) ) );
temp4 += temp0;
temp0 += ROTXOR1( temp1 ) + MAJORITY( temp1, temp2, temp3 );
word00 = 0x6a09e667UL + temp0;
word01 = 0xbb67ae85UL + temp1;
word02 = 0x3c6ef372UL + temp2;
word03 = 0xa54ff53aUL + temp3;
word04 = 0x510e527fUL + temp4;
word05 = 0x9b05688cUL + temp5;
word06 = 0x1f83d9abUL + temp6;
word07 = 0x5be0cd19UL + temp7;
}
state[0] = bytereverse(word00);
state[1] = bytereverse(word01);
state[2] = bytereverse(word02);
state[3] = bytereverse(word03);
state[4] = bytereverse(word04);
state[5] = bytereverse(word05);
state[6] = bytereverse(word06);
state[7] = bytereverse(word07);
}
#endif
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index+=MAX_KEYS_PER_CRYPT)
{
SHA256_CTX ctx;
#ifdef SIMD_COEF_32
unsigned int i;
unsigned char _IBuf[64*MAX_KEYS_PER_CRYPT+MEM_ALIGN_CACHE], *keys, tmpBuf[32];
uint32_t *keys32, j;
keys = (unsigned char*)mem_align(_IBuf, MEM_ALIGN_CACHE);
keys32 = (uint32_t*)keys;
memset(keys, 0, 64*MAX_KEYS_PER_CRYPT);
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
SHA256_Init(&ctx);
SHA256_Update(&ctx, saved_key[index+i], strlen(saved_key[index+i]));
SHA256_Update(&ctx, cur_salt->salt, 32);
SHA256_Final(tmpBuf, &ctx);
for (j = 0; j < 32; ++j)
keys[GETPOS(j, i)] = tmpBuf[j];
keys[GETPOS(j, i)] = 0x80;
// 32 bytes of crypt data (0x100 bits).
keys[GETPOS(62, i)] = 0x01;
}
for (i = 0; i < cur_salt->iterations; i++) {
SIMDSHA256body(keys, keys32, NULL, SSEi_MIXED_IN|SSEi_OUTPUT_AS_INP_FMT);
}
// Last one with FLAT_OUT
SIMDSHA256body(keys, crypt_out[index], NULL, SSEi_MIXED_IN|SSEi_OUTPUT_AS_INP_FMT|SSEi_FLAT_OUT);
#else
SHA256_Init(&ctx);
SHA256_Update(&ctx, saved_key[index], strlen(saved_key[index]));
SHA256_Update(&ctx, cur_salt->salt, 32);
SHA256_Final((unsigned char*)crypt_out[index], &ctx);
#if 1
// This complex crap only boosted speed on my quad-HT from 5016 to 5285.
// A ton of complex code for VERY little gain. The SIMD change gave us
// a 4x improvement with very little change. This pwsafe_sha256_iterate
// does get 5% gain, but 400% is so much better, lol. I put the other
// code in to be able to dump data out easier, getting dump_stuff()
// data in flat, to be able to help get the SIMD code working.
#ifdef COMMON_DIGEST_FOR_OPENSSL
pwsafe_sha256_iterate(ctx.hash, cur_salt->iterations);
memcpy(crypt_out[index], ctx.hash, 32);
#else
pwsafe_sha256_iterate(ctx.h, cur_salt->iterations);
memcpy(crypt_out[index], ctx.h, 32);
#endif
#else
{ int i;
for (i = 0; i <= cur_salt->iterations; ++i) {
SHA256_Init(&ctx);
SHA256_Update(&ctx, (unsigned char*)crypt_out[index], 32);
SHA256_Final((unsigned char*)crypt_out[index], &ctx);
} }
#endif
#endif
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
for (; index < count; index++)
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void pwsafe_set_key(char *key, int index)
{
int saved_len = strlen(key);
if (saved_len > PLAINTEXT_LENGTH)
saved_len = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_len);
saved_key[index][saved_len] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
static unsigned int iteration_count(void *salt)
{
struct custom_salt *my_salt;
my_salt = salt;
return (unsigned int) my_salt->iterations;
}
struct fmt_main fmt_pwsafe = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{
"iteration count",
},
{ FORMAT_TAG },
pwsafe_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{
iteration_count,
},
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
set_salt,
pwsafe_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
sse.h | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2017-2020 Evan Nemerson <evan@nemerson.com>
* 2015-2017 John W. Ratcliff <jratcliffscarab@gmail.com>
* 2015 Brandon Rowlett <browlett@nvidia.com>
* 2015 Ken Fast <kfast@gdeb.com>
*/
#if !defined(SIMDE_X86_SSE_H)
#define SIMDE_X86_SSE_H
#include "mmx.h"
#if defined(_WIN32) && !defined(SIMDE_X86_SSE_NATIVE) && defined(_MSC_VER)
#include <windows.h>
#endif
#if defined(__ARM_ACLE)
#include <arm_acle.h>
#endif
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
typedef union {
#if defined(SIMDE_VECTOR_SUBSCRIPT)
SIMDE_ALIGN_TO_16 int8_t i8 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 int16_t i16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 int32_t i32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 int64_t i64 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 uint8_t u8 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 uint16_t u16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 uint32_t u32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 uint64_t u64 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
#if defined(SIMDE_HAVE_INT128_)
SIMDE_ALIGN_TO_16 simde_int128 i128 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 simde_uint128 u128 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
#endif
SIMDE_ALIGN_TO_16 simde_float32 f32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 int_fast32_t i32f SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 uint_fast32_t u32f SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
#else
SIMDE_ALIGN_TO_16 int8_t i8[16];
SIMDE_ALIGN_TO_16 int16_t i16[8];
SIMDE_ALIGN_TO_16 int32_t i32[4];
SIMDE_ALIGN_TO_16 int64_t i64[2];
SIMDE_ALIGN_TO_16 uint8_t u8[16];
SIMDE_ALIGN_TO_16 uint16_t u16[8];
SIMDE_ALIGN_TO_16 uint32_t u32[4];
SIMDE_ALIGN_TO_16 uint64_t u64[2];
#if defined(SIMDE_HAVE_INT128_)
SIMDE_ALIGN_TO_16 simde_int128 i128[1];
SIMDE_ALIGN_TO_16 simde_uint128 u128[1];
#endif
SIMDE_ALIGN_TO_16 simde_float32 f32[4];
SIMDE_ALIGN_TO_16 int_fast32_t i32f[16 / sizeof(int_fast32_t)];
SIMDE_ALIGN_TO_16 uint_fast32_t u32f[16 / sizeof(uint_fast32_t)];
#endif
SIMDE_ALIGN_TO_16 simde__m64_private m64_private[2];
SIMDE_ALIGN_TO_16 simde__m64 m64[2];
#if defined(SIMDE_X86_SSE_NATIVE)
SIMDE_ALIGN_TO_16 __m128 n;
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_ALIGN_TO_16 int8x16_t neon_i8;
SIMDE_ALIGN_TO_16 int16x8_t neon_i16;
SIMDE_ALIGN_TO_16 int32x4_t neon_i32;
SIMDE_ALIGN_TO_16 int64x2_t neon_i64;
SIMDE_ALIGN_TO_16 uint8x16_t neon_u8;
SIMDE_ALIGN_TO_16 uint16x8_t neon_u16;
SIMDE_ALIGN_TO_16 uint32x4_t neon_u32;
SIMDE_ALIGN_TO_16 uint64x2_t neon_u64;
SIMDE_ALIGN_TO_16 float32x4_t neon_f32;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
SIMDE_ALIGN_TO_16 float64x2_t neon_f64;
#endif
#elif defined(SIMDE_MIPS_MSA_NATIVE)
v16i8 msa_i8;
v8i16 msa_i16;
v4i32 msa_i32;
v2i64 msa_i64;
v16u8 msa_u8;
v8u16 msa_u16;
v4u32 msa_u32;
v2u64 msa_u64;
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
SIMDE_ALIGN_TO_16 v128_t wasm_v128;
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) altivec_u8;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) altivec_u16;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) altivec_u32;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed char) altivec_i8;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed short) altivec_i16;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed int) altivec_i32;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(float) altivec_f32;
#if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long) altivec_u64;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed long long) altivec_i64;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(double) altivec_f64;
#endif
#endif
} simde__m128_private;
#if defined(SIMDE_X86_SSE_NATIVE)
typedef __m128 simde__m128;
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
typedef float32x4_t simde__m128;
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
typedef v128_t simde__m128;
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
typedef SIMDE_POWER_ALTIVEC_VECTOR(float) simde__m128;
#elif defined(SIMDE_VECTOR_SUBSCRIPT)
typedef simde_float32 simde__m128 SIMDE_ALIGN_TO_16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
#else
typedef simde__m128_private simde__m128;
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
typedef simde__m128 __m128;
#endif
HEDLEY_STATIC_ASSERT(16 == sizeof(simde__m128), "simde__m128 size incorrect");
HEDLEY_STATIC_ASSERT(16 == sizeof(simde__m128_private), "simde__m128_private size incorrect");
#if defined(SIMDE_CHECK_ALIGNMENT) && defined(SIMDE_ALIGN_OF)
HEDLEY_STATIC_ASSERT(SIMDE_ALIGN_OF(simde__m128) == 16, "simde__m128 is not 16-byte aligned");
HEDLEY_STATIC_ASSERT(SIMDE_ALIGN_OF(simde__m128_private) == 16, "simde__m128_private is not 16-byte aligned");
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde__m128_from_private(simde__m128_private v) {
simde__m128 r;
simde_memcpy(&r, &v, sizeof(r));
return r;
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128_private
simde__m128_to_private(simde__m128 v) {
simde__m128_private r;
simde_memcpy(&r, &v, sizeof(r));
return r;
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int8x16_t, neon, i8)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int16x8_t, neon, i16)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int32x4_t, neon, i32)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int64x2_t, neon, i64)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint8x16_t, neon, u8)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint16x8_t, neon, u16)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint32x4_t, neon, u32)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint64x2_t, neon, u64)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, float32x4_t, neon, f32)
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, float64x2_t, neon, f64)
#endif
#endif /* defined(SIMDE_ARM_NEON_A32V7_NATIVE) */
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed char), altivec, i8)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed short), altivec, i16)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed int), altivec, i32)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), altivec, u8)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned short), altivec, u16)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), altivec, u32)
#if defined(SIMDE_BUG_GCC_95782)
SIMDE_FUNCTION_ATTRIBUTES
SIMDE_POWER_ALTIVEC_VECTOR(float)
simde__m128_to_altivec_f32(simde__m128 value) {
simde__m128_private r_ = simde__m128_to_private(value);
return r_.altivec_f32;
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde__m128_from_altivec_f32(SIMDE_POWER_ALTIVEC_VECTOR(float) value) {
simde__m128_private r_;
r_.altivec_f32 = value;
return simde__m128_from_private(r_);
}
#else
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(float), altivec, f32)
#endif
#if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed long long), altivec, i64)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long), altivec, u64)
#endif
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, v128_t, wasm, v128);
#endif /* defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) */
enum {
#if defined(SIMDE_X86_SSE_NATIVE)
SIMDE_MM_ROUND_NEAREST = _MM_ROUND_NEAREST,
SIMDE_MM_ROUND_DOWN = _MM_ROUND_DOWN,
SIMDE_MM_ROUND_UP = _MM_ROUND_UP,
SIMDE_MM_ROUND_TOWARD_ZERO = _MM_ROUND_TOWARD_ZERO
#else
SIMDE_MM_ROUND_NEAREST = 0x0000,
SIMDE_MM_ROUND_DOWN = 0x2000,
SIMDE_MM_ROUND_UP = 0x4000,
SIMDE_MM_ROUND_TOWARD_ZERO = 0x6000
#endif
};
#if defined(_MM_FROUND_TO_NEAREST_INT)
# define SIMDE_MM_FROUND_TO_NEAREST_INT _MM_FROUND_TO_NEAREST_INT
# define SIMDE_MM_FROUND_TO_NEG_INF _MM_FROUND_TO_NEG_INF
# define SIMDE_MM_FROUND_TO_POS_INF _MM_FROUND_TO_POS_INF
# define SIMDE_MM_FROUND_TO_ZERO _MM_FROUND_TO_ZERO
# define SIMDE_MM_FROUND_CUR_DIRECTION _MM_FROUND_CUR_DIRECTION
# define SIMDE_MM_FROUND_RAISE_EXC _MM_FROUND_RAISE_EXC
# define SIMDE_MM_FROUND_NO_EXC _MM_FROUND_NO_EXC
#else
# define SIMDE_MM_FROUND_TO_NEAREST_INT 0x00
# define SIMDE_MM_FROUND_TO_NEG_INF 0x01
# define SIMDE_MM_FROUND_TO_POS_INF 0x02
# define SIMDE_MM_FROUND_TO_ZERO 0x03
# define SIMDE_MM_FROUND_CUR_DIRECTION 0x04
# define SIMDE_MM_FROUND_RAISE_EXC 0x00
# define SIMDE_MM_FROUND_NO_EXC 0x08
#endif
#define SIMDE_MM_FROUND_NINT \
(SIMDE_MM_FROUND_TO_NEAREST_INT | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_FLOOR \
(SIMDE_MM_FROUND_TO_NEG_INF | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_CEIL \
(SIMDE_MM_FROUND_TO_POS_INF | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_TRUNC \
(SIMDE_MM_FROUND_TO_ZERO | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_RINT \
(SIMDE_MM_FROUND_CUR_DIRECTION | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_NEARBYINT \
(SIMDE_MM_FROUND_CUR_DIRECTION | SIMDE_MM_FROUND_NO_EXC)
#if defined(SIMDE_X86_SSE4_1_ENABLE_NATIVE_ALIASES) && !defined(_MM_FROUND_TO_NEAREST_INT)
# define _MM_FROUND_TO_NEAREST_INT SIMDE_MM_FROUND_TO_NEAREST_INT
# define _MM_FROUND_TO_NEG_INF SIMDE_MM_FROUND_TO_NEG_INF
# define _MM_FROUND_TO_POS_INF SIMDE_MM_FROUND_TO_POS_INF
# define _MM_FROUND_TO_ZERO SIMDE_MM_FROUND_TO_ZERO
# define _MM_FROUND_CUR_DIRECTION SIMDE_MM_FROUND_CUR_DIRECTION
# define _MM_FROUND_RAISE_EXC SIMDE_MM_FROUND_RAISE_EXC
# define _MM_FROUND_NINT SIMDE_MM_FROUND_NINT
# define _MM_FROUND_FLOOR SIMDE_MM_FROUND_FLOOR
# define _MM_FROUND_CEIL SIMDE_MM_FROUND_CEIL
# define _MM_FROUND_TRUNC SIMDE_MM_FROUND_TRUNC
# define _MM_FROUND_RINT SIMDE_MM_FROUND_RINT
# define _MM_FROUND_NEARBYINT SIMDE_MM_FROUND_NEARBYINT
#endif
#if defined(_MM_EXCEPT_INVALID)
# define SIMDE_MM_EXCEPT_INVALID _MM_EXCEPT_INVALID
#else
# define SIMDE_MM_EXCEPT_INVALID (0x0001)
#endif
#if defined(_MM_EXCEPT_DENORM)
# define SIMDE_MM_EXCEPT_DENORM _MM_EXCEPT_DENORM
#else
# define SIMDE_MM_EXCEPT_DENORM (0x0002)
#endif
#if defined(_MM_EXCEPT_DIV_ZERO)
# define SIMDE_MM_EXCEPT_DIV_ZERO _MM_EXCEPT_DIV_ZERO
#else
# define SIMDE_MM_EXCEPT_DIV_ZERO (0x0004)
#endif
#if defined(_MM_EXCEPT_OVERFLOW)
# define SIMDE_MM_EXCEPT_OVERFLOW _MM_EXCEPT_OVERFLOW
#else
# define SIMDE_MM_EXCEPT_OVERFLOW (0x0008)
#endif
#if defined(_MM_EXCEPT_UNDERFLOW)
# define SIMDE_MM_EXCEPT_UNDERFLOW _MM_EXCEPT_UNDERFLOW
#else
# define SIMDE_MM_EXCEPT_UNDERFLOW (0x0010)
#endif
#if defined(_MM_EXCEPT_INEXACT)
# define SIMDE_MM_EXCEPT_INEXACT _MM_EXCEPT_INEXACT
#else
# define SIMDE_MM_EXCEPT_INEXACT (0x0020)
#endif
#if defined(_MM_EXCEPT_MASK)
# define SIMDE_MM_EXCEPT_MASK _MM_EXCEPT_MASK
#else
# define SIMDE_MM_EXCEPT_MASK \
(SIMDE_MM_EXCEPT_INVALID | SIMDE_MM_EXCEPT_DENORM | \
SIMDE_MM_EXCEPT_DIV_ZERO | SIMDE_MM_EXCEPT_OVERFLOW | \
SIMDE_MM_EXCEPT_UNDERFLOW | SIMDE_MM_EXCEPT_INEXACT)
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _MM_EXCEPT_INVALID SIMDE_MM_EXCEPT_INVALID
#define _MM_EXCEPT_DENORM SIMDE_MM_EXCEPT_DENORM
#define _MM_EXCEPT_DIV_ZERO SIMDE_MM_EXCEPT_DIV_ZERO
#define _MM_EXCEPT_OVERFLOW SIMDE_MM_EXCEPT_OVERFLOW
#define _MM_EXCEPT_UNDERFLOW SIMDE_MM_EXCEPT_UNDERFLOW
#define _MM_EXCEPT_INEXACT SIMDE_MM_EXCEPT_INEXACT
#define _MM_EXCEPT_MASK SIMDE_MM_EXCEPT_MASK
#endif
#if defined(_MM_MASK_INVALID)
# define SIMDE_MM_MASK_INVALID _MM_MASK_INVALID
#else
# define SIMDE_MM_MASK_INVALID (0x0080)
#endif
#if defined(_MM_MASK_DENORM)
# define SIMDE_MM_MASK_DENORM _MM_MASK_DENORM
#else
# define SIMDE_MM_MASK_DENORM (0x0100)
#endif
#if defined(_MM_MASK_DIV_ZERO)
# define SIMDE_MM_MASK_DIV_ZERO _MM_MASK_DIV_ZERO
#else
# define SIMDE_MM_MASK_DIV_ZERO (0x0200)
#endif
#if defined(_MM_MASK_OVERFLOW)
# define SIMDE_MM_MASK_OVERFLOW _MM_MASK_OVERFLOW
#else
# define SIMDE_MM_MASK_OVERFLOW (0x0400)
#endif
#if defined(_MM_MASK_UNDERFLOW)
# define SIMDE_MM_MASK_UNDERFLOW _MM_MASK_UNDERFLOW
#else
# define SIMDE_MM_MASK_UNDERFLOW (0x0800)
#endif
#if defined(_MM_MASK_INEXACT)
# define SIMDE_MM_MASK_INEXACT _MM_MASK_INEXACT
#else
# define SIMDE_MM_MASK_INEXACT (0x1000)
#endif
#if defined(_MM_MASK_MASK)
# define SIMDE_MM_MASK_MASK _MM_MASK_MASK
#else
# define SIMDE_MM_MASK_MASK \
(SIMDE_MM_MASK_INVALID | SIMDE_MM_MASK_DENORM | \
SIMDE_MM_MASK_DIV_ZERO | SIMDE_MM_MASK_OVERFLOW | \
SIMDE_MM_MASK_UNDERFLOW | SIMDE_MM_MASK_INEXACT)
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _MM_MASK_INVALID SIMDE_MM_MASK_INVALID
#define _MM_MASK_DENORM SIMDE_MM_MASK_DENORM
#define _MM_MASK_DIV_ZERO SIMDE_MM_MASK_DIV_ZERO
#define _MM_MASK_OVERFLOW SIMDE_MM_MASK_OVERFLOW
#define _MM_MASK_UNDERFLOW SIMDE_MM_MASK_UNDERFLOW
#define _MM_MASK_INEXACT SIMDE_MM_MASK_INEXACT
#define _MM_MASK_MASK SIMDE_MM_MASK_MASK
#endif
#if defined(_MM_FLUSH_ZERO_MASK)
# define SIMDE_MM_FLUSH_ZERO_MASK _MM_FLUSH_ZERO_MASK
#else
# define SIMDE_MM_FLUSH_ZERO_MASK (0x8000)
#endif
#if defined(_MM_FLUSH_ZERO_ON)
# define SIMDE_MM_FLUSH_ZERO_ON _MM_FLUSH_ZERO_ON
#else
# define SIMDE_MM_FLUSH_ZERO_ON (0x8000)
#endif
#if defined(_MM_FLUSH_ZERO_OFF)
# define SIMDE_MM_FLUSH_ZERO_OFF _MM_FLUSH_ZERO_OFF
#else
# define SIMDE_MM_FLUSH_ZERO_OFF (0x0000)
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _MM_FLUSH_ZERO_MASK SIMDE_MM_FLUSH_ZERO_MASK
#define _MM_FLUSH_ZERO_ON SIMDE_MM_FLUSH_ZERO_ON
#define _MM_FLUSH_ZERO_OFF SIMDE_MM_FLUSH_ZERO_OFF
#endif
SIMDE_FUNCTION_ATTRIBUTES
unsigned int
SIMDE_MM_GET_ROUNDING_MODE(void) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _MM_GET_ROUNDING_MODE();
#elif defined(SIMDE_HAVE_FENV_H)
unsigned int vfe_mode;
switch (fegetround()) {
#if defined(FE_TONEAREST)
case FE_TONEAREST:
vfe_mode = SIMDE_MM_ROUND_NEAREST;
break;
#endif
#if defined(FE_TOWARDZERO)
case FE_TOWARDZERO:
vfe_mode = SIMDE_MM_ROUND_DOWN;
break;
#endif
#if defined(FE_UPWARD)
case FE_UPWARD:
vfe_mode = SIMDE_MM_ROUND_UP;
break;
#endif
#if defined(FE_DOWNWARD)
case FE_DOWNWARD:
vfe_mode = SIMDE_MM_ROUND_TOWARD_ZERO;
break;
#endif
default:
vfe_mode = SIMDE_MM_ROUND_NEAREST;
break;
}
return vfe_mode;
#else
return SIMDE_MM_ROUND_NEAREST;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _MM_GET_ROUNDING_MODE() SIMDE_MM_GET_ROUNDING_MODE()
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
SIMDE_MM_SET_ROUNDING_MODE(unsigned int a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_MM_SET_ROUNDING_MODE(a);
#elif defined(SIMDE_HAVE_FENV_H)
int fe_mode = FE_TONEAREST;
switch (a) {
#if defined(FE_TONEAREST)
case SIMDE_MM_ROUND_NEAREST:
fe_mode = FE_TONEAREST;
break;
#endif
#if defined(FE_TOWARDZERO)
case SIMDE_MM_ROUND_TOWARD_ZERO:
fe_mode = FE_TOWARDZERO;
break;
#endif
#if defined(FE_DOWNWARD)
case SIMDE_MM_ROUND_DOWN:
fe_mode = FE_DOWNWARD;
break;
#endif
#if defined(FE_UPWARD)
case SIMDE_MM_ROUND_UP:
fe_mode = FE_UPWARD;
break;
#endif
default:
return;
}
fesetround(fe_mode);
#else
(void) a;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _MM_SET_ROUNDING_MODE(a) SIMDE_MM_SET_ROUNDING_MODE(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint32_t
SIMDE_MM_GET_FLUSH_ZERO_MODE (void) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_getcsr() & _MM_FLUSH_ZERO_MASK;
#else
return SIMDE_MM_FLUSH_ZERO_OFF;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _MM_SET_FLUSH_ZERO_MODE(a) SIMDE_MM_SET_FLUSH_ZERO_MODE(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
SIMDE_MM_SET_FLUSH_ZERO_MODE (uint32_t a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_MM_SET_FLUSH_ZERO_MODE(a);
#else
(void) a;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _MM_SET_FLUSH_ZERO_MODE(a) SIMDE_MM_SET_FLUSH_ZERO_MODE(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint32_t
simde_mm_getcsr (void) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_getcsr();
#else
return SIMDE_MM_GET_ROUNDING_MODE();
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_getcsr() simde_mm_getcsr()
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_setcsr (uint32_t a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_setcsr(a);
#else
SIMDE_MM_SET_ROUNDING_MODE(HEDLEY_STATIC_CAST(unsigned int, a));
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_setcsr(a) simde_mm_setcsr(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_round_ps (simde__m128 a, int rounding, int lax_rounding)
SIMDE_REQUIRE_CONSTANT_RANGE(rounding, 0, 15)
SIMDE_REQUIRE_CONSTANT_RANGE(lax_rounding, 0, 1) {
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
(void) lax_rounding;
/* For architectures which lack a current direction SIMD instruction.
*
* Note that NEON actually has a current rounding mode instruction,
* but in ARMv8+ the rounding mode is ignored and nearest is always
* used, so we treat ARMv7 as having a rounding mode but ARMv8 as
* not. */
#if \
defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || \
defined(SIMDE_ARM_NEON_A32V8)
if ((rounding & 7) == SIMDE_MM_FROUND_CUR_DIRECTION)
rounding = HEDLEY_STATIC_CAST(int, SIMDE_MM_GET_ROUNDING_MODE()) << 13;
#endif
switch (rounding & ~SIMDE_MM_FROUND_NO_EXC) {
case SIMDE_MM_FROUND_CUR_DIRECTION:
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_round(a_.altivec_f32));
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95399)
r_.neon_f32 = vrndiq_f32(a_.neon_f32);
#elif defined(simde_math_nearbyintf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_nearbyintf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
#endif
break;
case SIMDE_MM_FROUND_TO_NEAREST_INT:
#if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_rint(a_.altivec_f32));
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE)
r_.neon_f32 = vrndnq_f32(a_.neon_f32);
#elif defined(simde_math_roundevenf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_roundevenf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
#endif
break;
case SIMDE_MM_FROUND_TO_NEG_INF:
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_floor(a_.altivec_f32));
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE)
r_.neon_f32 = vrndmq_f32(a_.neon_f32);
#elif defined(simde_math_floorf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_floorf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
#endif
break;
case SIMDE_MM_FROUND_TO_POS_INF:
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_ceil(a_.altivec_f32));
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE)
r_.neon_f32 = vrndpq_f32(a_.neon_f32);
#elif defined(simde_math_ceilf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_ceilf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
#endif
break;
case SIMDE_MM_FROUND_TO_ZERO:
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_trunc(a_.altivec_f32));
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE)
r_.neon_f32 = vrndq_f32(a_.neon_f32);
#elif defined(simde_math_truncf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_truncf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
#endif
break;
default:
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
}
return simde__m128_from_private(r_);
}
#if defined(SIMDE_X86_SSE4_1_NATIVE)
#define simde_mm_round_ps(a, rounding) _mm_round_ps((a), (rounding))
#else
#define simde_mm_round_ps(a, rounding) simde_x_mm_round_ps((a), (rounding), 0)
#endif
#if defined(SIMDE_X86_SSE4_1_ENABLE_NATIVE_ALIASES)
#define _mm_round_ps(a, rounding) simde_mm_round_ps((a), (rounding))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_set_ps (simde_float32 e3, simde_float32 e2, simde_float32 e1, simde_float32 e0) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_set_ps(e3, e2, e1, e0);
#else
simde__m128_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_ALIGN_TO_16 simde_float32 data[4] = { e0, e1, e2, e3 };
r_.neon_f32 = vld1q_f32(data);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_make(e0, e1, e2, e3);
#else
r_.f32[0] = e0;
r_.f32[1] = e1;
r_.f32[2] = e2;
r_.f32[3] = e3;
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_set_ps(e3, e2, e1, e0) simde_mm_set_ps(e3, e2, e1, e0)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_set_ps1 (simde_float32 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_set_ps1(a);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vdupq_n_f32(a);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
(void) a;
return vec_splats(a);
#else
return simde_mm_set_ps(a, a, a, a);
#endif
}
#define simde_mm_set1_ps(a) simde_mm_set_ps1(a)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_set_ps1(a) simde_mm_set_ps1(a)
# define _mm_set1_ps(a) simde_mm_set1_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_move_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_move_ss(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 4, 1, 2, 3);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(vgetq_lane_f32(b_.neon_f32, 0), a_.neon_f32, 0);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
static const SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) m = { ~0U, 0U, 0U, 0U };
r_.altivec_f32 = vec_sel(a_.altivec_f32, b_.altivec_f32, m);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_i8x16_shuffle(b_.wasm_v128, a_.wasm_v128, 0, 1, 2, 3, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31);
#else
r_.f32[0] = b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_move_ss(a, b) simde_mm_move_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_broadcastlow_ps(simde__m128 a) {
/* This function broadcasts the first element in the inpu vector to
* all lanes. It is used to avoid generating spurious exceptions in
* *_ss functions since there may be garbage in the upper lanes. */
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_shuffle_ps(a, a, 0);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f32 = vdupq_laneq_f32(a_.neon_f32, 0);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_splat(a_.altivec_f32, 0);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, a_.f32, 0, 0, 0, 0);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = a_.f32[0];
}
#endif
return simde__m128_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_add_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_add_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vaddq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_add(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_add(a_.altivec_f32, b_.altivec_f32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.f32 = a_.f32 + b_.f32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = a_.f32[i] + b_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_add_ps(a, b) simde_mm_add_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_add_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_add_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_ss(a, simde_mm_add_ps(a, b));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_add_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32_t b0 = vgetq_lane_f32(b_.neon_f32, 0);
float32x4_t value = vsetq_lane_f32(b0, vdupq_n_f32(0), 0);
// the upper values in the result must be the remnants of <a>.
r_.neon_f32 = vaddq_f32(a_.neon_f32, value);
#else
r_.f32[0] = a_.f32[0] + b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_add_ss(a, b) simde_mm_add_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_and_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_and_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vandq_s32(a_.neon_i32, b_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_and(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = a_.i32 & b_.i32;
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_and(a_.altivec_f32, b_.altivec_f32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = a_.i32[i] & b_.i32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_and_ps(a, b) simde_mm_and_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_andnot_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_andnot_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vbicq_s32(b_.neon_i32, a_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_andnot(b_.wasm_v128, a_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
r_.altivec_f32 = vec_andc(b_.altivec_f32, a_.altivec_f32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = ~a_.i32 & b_.i32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = ~(a_.i32[i]) & b_.i32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_andnot_ps(a, b) simde_mm_andnot_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_xor_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_xor_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = veorq_s32(a_.neon_i32, b_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_xor(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_i32 = vec_xor(a_.altivec_i32, b_.altivec_i32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32f = a_.i32f ^ b_.i32f;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) {
r_.u32[i] = a_.u32[i] ^ b_.u32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_xor_ps(a, b) simde_mm_xor_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_or_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_or_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vorrq_s32(a_.neon_i32, b_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_or(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_i32 = vec_or(a_.altivec_i32, b_.altivec_i32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32f = a_.i32f | b_.i32f;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) {
r_.u32[i] = a_.u32[i] | b_.u32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_or_ps(a, b) simde_mm_or_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_not_ps(simde__m128 a) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
__m128i ai = _mm_castps_si128(a);
return _mm_castsi128_ps(_mm_ternarylogic_epi32(ai, ai, ai, 0x55));
#elif defined(SIMDE_X86_SSE2_NATIVE)
/* Note: we use ints instead of floats because we don't want cmpeq
* to return false for (NaN, NaN) */
__m128i ai = _mm_castps_si128(a);
return _mm_castsi128_ps(_mm_andnot_si128(ai, _mm_cmpeq_epi32(ai, ai)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vmvnq_s32(a_.neon_i32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_i32 = vec_nor(a_.altivec_i32, a_.altivec_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_not(a_.wasm_v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = ~a_.i32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = ~(a_.i32[i]);
}
#endif
return simde__m128_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_select_ps(simde__m128 a, simde__m128 b, simde__m128 mask) {
/* This function is for when you want to blend two elements together
* according to a mask. It is similar to _mm_blendv_ps, except that
* it is undefined whether the blend is based on the highest bit in
* each lane (like blendv) or just bitwise operations. This allows
* us to implement the function efficiently everywhere.
*
* Basically, you promise that all the lanes in mask are either 0 or
* ~0. */
#if defined(SIMDE_X86_SSE4_1_NATIVE)
return _mm_blendv_ps(a, b, mask);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b),
mask_ = simde__m128_to_private(mask);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vbslq_s32(mask_.neon_u32, b_.neon_i32, a_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_bitselect(b_.wasm_v128, a_.wasm_v128, mask_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r_.altivec_i32 = vec_sel(a_.altivec_i32, b_.altivec_i32, mask_.altivec_u32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = a_.i32 ^ ((a_.i32 ^ b_.i32) & mask_.i32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = a_.i32[i] ^ ((a_.i32[i] ^ b_.i32[i]) & mask_.i32[i]);
}
#endif
return simde__m128_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_avg_pu16 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_avg_pu16(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u16 = vrhadd_u16(b_.neon_u16, a_.neon_u16);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && defined(SIMDE_CONVERT_VECTOR_) && !defined(SIMDE_BUG_GCC_100761)
uint32_t wa SIMDE_VECTOR(16);
uint32_t wb SIMDE_VECTOR(16);
uint32_t wr SIMDE_VECTOR(16);
SIMDE_CONVERT_VECTOR_(wa, a_.u16);
SIMDE_CONVERT_VECTOR_(wb, b_.u16);
wr = (wa + wb + 1) >> 1;
SIMDE_CONVERT_VECTOR_(r_.u16, wr);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) {
r_.u16[i] = (a_.u16[i] + b_.u16[i] + 1) >> 1;
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pavgw(a, b) simde_mm_avg_pu16(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_avg_pu16(a, b) simde_mm_avg_pu16(a, b)
# define _m_pavgw(a, b) simde_mm_avg_pu16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_avg_pu8 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_avg_pu8(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u8 = vrhadd_u8(b_.neon_u8, a_.neon_u8);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && defined(SIMDE_CONVERT_VECTOR_) && !defined(SIMDE_BUG_GCC_100761)
uint16_t wa SIMDE_VECTOR(16);
uint16_t wb SIMDE_VECTOR(16);
uint16_t wr SIMDE_VECTOR(16);
SIMDE_CONVERT_VECTOR_(wa, a_.u8);
SIMDE_CONVERT_VECTOR_(wb, b_.u8);
wr = (wa + wb + 1) >> 1;
SIMDE_CONVERT_VECTOR_(r_.u8, wr);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
r_.u8[i] = (a_.u8[i] + b_.u8[i] + 1) >> 1;
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pavgb(a, b) simde_mm_avg_pu8(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_avg_pu8(a, b) simde_mm_avg_pu8(a, b)
# define _m_pavgb(a, b) simde_mm_avg_pu8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_abs_ps(simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
simde_float32 mask_;
uint32_t u32_ = UINT32_C(0x7FFFFFFF);
simde_memcpy(&mask_, &u32_, sizeof(u32_));
return _mm_and_ps(_mm_set1_ps(mask_), a);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vabsq_f32(a_.neon_f32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
r_.altivec_f32 = vec_abs(a_.altivec_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_abs(a_.wasm_v128);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_fabsf(a_.f32[i]);
}
#endif
return simde__m128_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpeq_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpeq_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vceqq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_eq(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpeq(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), a_.f32 == b_.f32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] == b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpeq_ps(a, b) simde_mm_cmpeq_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpeq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpeq_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_ss(a, simde_mm_cmpeq_ps(a, b));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpeq_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] == b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpeq_ss(a, b) simde_mm_cmpeq_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpge_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpge_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vcgeq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_ge(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpge(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), (a_.f32 >= b_.f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] >= b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpge_ps(a, b) simde_mm_cmpge_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpge_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI)
return _mm_cmpge_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_ss(a, simde_mm_cmpge_ps(a, b));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpge_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] >= b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpge_ss(a, b) simde_mm_cmpge_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpgt_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpgt_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vcgtq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_gt(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpgt(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), (a_.f32 > b_.f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] > b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpgt_ps(a, b) simde_mm_cmpgt_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpgt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI)
return _mm_cmpgt_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_ss(a, simde_mm_cmpgt_ps(a, b));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpgt_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] > b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpgt_ss(a, b) simde_mm_cmpgt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmple_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmple_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vcleq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_le(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmple(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), (a_.f32 <= b_.f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] <= b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmple_ps(a, b) simde_mm_cmple_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmple_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmple_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_ss(a, simde_mm_cmple_ps(a, b));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmple_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] <= b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmple_ss(a, b) simde_mm_cmple_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmplt_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmplt_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vcltq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_lt(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmplt(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), (a_.f32 < b_.f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] < b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmplt_ps(a, b) simde_mm_cmplt_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmplt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmplt_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_ss(a, simde_mm_cmplt_ps(a, b));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmplt_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] < b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmplt_ss(a, b) simde_mm_cmplt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpneq_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpneq_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vmvnq_u32(vceqq_f32(a_.neon_f32, b_.neon_f32));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_ne(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpeq(a_.altivec_f32, b_.altivec_f32));
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_nor(r_.altivec_f32, r_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), (a_.f32 != b_.f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] != b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpneq_ps(a, b) simde_mm_cmpneq_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpneq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpneq_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_ss(a, simde_mm_cmpneq_ps(a, b));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpneq_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] != b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpneq_ss(a, b) simde_mm_cmpneq_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnge_ps (simde__m128 a, simde__m128 b) {
return simde_mm_cmplt_ps(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnge_ps(a, b) simde_mm_cmpnge_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnge_ss (simde__m128 a, simde__m128 b) {
return simde_mm_cmplt_ss(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnge_ss(a, b) simde_mm_cmpnge_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpngt_ps (simde__m128 a, simde__m128 b) {
return simde_mm_cmple_ps(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpngt_ps(a, b) simde_mm_cmpngt_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpngt_ss (simde__m128 a, simde__m128 b) {
return simde_mm_cmple_ss(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpngt_ss(a, b) simde_mm_cmpngt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnle_ps (simde__m128 a, simde__m128 b) {
return simde_mm_cmpgt_ps(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnle_ps(a, b) simde_mm_cmpnle_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnle_ss (simde__m128 a, simde__m128 b) {
return simde_mm_cmpgt_ss(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnle_ss(a, b) simde_mm_cmpnle_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnlt_ps (simde__m128 a, simde__m128 b) {
return simde_mm_cmpge_ps(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnlt_ps(a, b) simde_mm_cmpnlt_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnlt_ss (simde__m128 a, simde__m128 b) {
return simde_mm_cmpge_ss(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnlt_ss(a, b) simde_mm_cmpnlt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpord_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpord_ps(a, b);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
return wasm_v128_and(wasm_f32x4_eq(a, a), wasm_f32x4_eq(b, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
/* Note: NEON does not have ordered compare builtin
Need to compare a eq a and b eq b to check for NaN
Do AND of results to get final */
uint32x4_t ceqaa = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t ceqbb = vceqq_f32(b_.neon_f32, b_.neon_f32);
r_.neon_u32 = vandq_u32(ceqaa, ceqbb);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_and(wasm_f32x4_eq(a_.wasm_v128, a_.wasm_v128), wasm_f32x4_eq(b_.wasm_v128, b_.wasm_v128));
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float),
vec_and(vec_cmpeq(a_.altivec_f32, a_.altivec_f32), vec_cmpeq(b_.altivec_f32, b_.altivec_f32)));
#elif defined(simde_math_isnanf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (simde_math_isnanf(a_.f32[i]) || simde_math_isnanf(b_.f32[i])) ? UINT32_C(0) : ~UINT32_C(0);
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpord_ps(a, b) simde_mm_cmpord_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpunord_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpunord_ps(a, b);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
return wasm_v128_or(wasm_f32x4_ne(a, a), wasm_f32x4_ne(b, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t ceqaa = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t ceqbb = vceqq_f32(b_.neon_f32, b_.neon_f32);
r_.neon_u32 = vmvnq_u32(vandq_u32(ceqaa, ceqbb));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_or(wasm_f32x4_ne(a_.wasm_v128, a_.wasm_v128), wasm_f32x4_ne(b_.wasm_v128, b_.wasm_v128));
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float),
vec_nand(vec_cmpeq(a_.altivec_f32, a_.altivec_f32), vec_cmpeq(b_.altivec_f32, b_.altivec_f32)));
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float),
vec_and(vec_cmpeq(a_.altivec_f32, a_.altivec_f32), vec_cmpeq(b_.altivec_f32, b_.altivec_f32)));
r_.altivec_f32 = vec_nor(r_.altivec_f32, r_.altivec_f32);
#elif defined(simde_math_isnanf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (simde_math_isnanf(a_.f32[i]) || simde_math_isnanf(b_.f32[i])) ? ~UINT32_C(0) : UINT32_C(0);
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpunord_ps(a, b) simde_mm_cmpunord_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpunord_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI)
return _mm_cmpunord_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_ss(a, simde_mm_cmpunord_ps(a, b));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpunord_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(simde_math_isnanf)
r_.u32[0] = (simde_math_isnanf(a_.f32[0]) || simde_math_isnanf(b_.f32[0])) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpunord_ss(a, b) simde_mm_cmpunord_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comieq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comieq_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_eq_b = vceqq_f32(a_.neon_f32, b_.neon_f32);
return !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_eq_b), 0) != 0);
#else
return a_.f32[0] == b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comieq_ss(a, b) simde_mm_comieq_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comige_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comige_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_ge_b = vcgeq_f32(a_.neon_f32, b_.neon_f32);
return !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_ge_b), 0) != 0);
#else
return a_.f32[0] >= b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comige_ss(a, b) simde_mm_comige_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comigt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comigt_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_gt_b = vcgtq_f32(a_.neon_f32, b_.neon_f32);
return !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_gt_b), 0) != 0);
#else
return a_.f32[0] > b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comigt_ss(a, b) simde_mm_comigt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comile_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comile_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_le_b = vcleq_f32(a_.neon_f32, b_.neon_f32);
return !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_le_b), 0) != 0);
#else
return a_.f32[0] <= b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comile_ss(a, b) simde_mm_comile_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comilt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comilt_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_lt_b = vcltq_f32(a_.neon_f32, b_.neon_f32);
return !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_lt_b), 0) != 0);
#else
return a_.f32[0] < b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comilt_ss(a, b) simde_mm_comilt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comineq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comineq_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_neq_b = vmvnq_u32(vceqq_f32(a_.neon_f32, b_.neon_f32));
return !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_neq_b), 0) != 0);
#else
return a_.f32[0] != b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comineq_ss(a, b) simde_mm_comineq_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_copysign_ps(simde__m128 dest, simde__m128 src) {
simde__m128_private
r_,
dest_ = simde__m128_to_private(dest),
src_ = simde__m128_to_private(src);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
const uint32x4_t sign_pos = vreinterpretq_u32_f32(vdupq_n_f32(-SIMDE_FLOAT32_C(0.0)));
r_.neon_u32 = vbslq_u32(sign_pos, src_.neon_u32, dest_.neon_u32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
const v128_t sign_pos = wasm_f32x4_splat(-0.0f);
r_.wasm_v128 = wasm_v128_bitselect(src_.wasm_v128, dest_.wasm_v128, sign_pos);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
#if defined(SIMDE_BUG_VEC_CPSGN_REVERSED_ARGS)
r_.altivec_f32 = vec_cpsgn(dest_.altivec_f32, src_.altivec_f32);
#else
r_.altivec_f32 = vec_cpsgn(src_.altivec_f32, dest_.altivec_f32);
#endif
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
const SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) sign_pos = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), vec_splats(-0.0f));
r_.altivec_f32 = vec_sel(dest_.altivec_f32, src_.altivec_f32, sign_pos);
#elif defined(SIMDE_IEEE754_STORAGE)
(void) src_;
(void) dest_;
simde__m128 sign_pos = simde_mm_set1_ps(-0.0f);
r_ = simde__m128_to_private(simde_mm_xor_ps(dest, simde_mm_and_ps(simde_mm_xor_ps(dest, src), sign_pos)));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_copysignf(dest_.f32[i], src_.f32[i]);
}
#endif
return simde__m128_from_private(r_);
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_xorsign_ps(simde__m128 dest, simde__m128 src) {
return simde_mm_xor_ps(simde_mm_and_ps(simde_mm_set1_ps(-0.0f), src), dest);
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvt_pi2ps (simde__m128 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvt_pi2ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
simde__m64_private b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcombine_f32(vcvt_f32_s32(b_.neon_i32), vget_high_f32(a_.neon_f32));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.m64_private[0].f32, b_.i32);
r_.m64_private[1] = a_.m64_private[1];
#else
r_.f32[0] = (simde_float32) b_.i32[0];
r_.f32[1] = (simde_float32) b_.i32[1];
r_.i32[2] = a_.i32[2];
r_.i32[3] = a_.i32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvt_pi2ps(a, b) simde_mm_cvt_pi2ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cvt_ps2pi (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvt_ps2pi(a);
#else
simde__m64_private r_;
simde__m128_private a_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
a_ = simde__m128_to_private(simde_mm_round_ps(a, SIMDE_MM_FROUND_CUR_DIRECTION));
r_.neon_i32 = vcvt_s32_f32(vget_low_f32(a_.neon_f32));
#elif defined(SIMDE_CONVERT_VECTOR_) && SIMDE_NATURAL_VECTOR_SIZE_GE(128) && !defined(SIMDE_BUG_GCC_100761)
a_ = simde__m128_to_private(simde_mm_round_ps(a, SIMDE_MM_FROUND_CUR_DIRECTION));
SIMDE_CONVERT_VECTOR_(r_.i32, a_.m64_private[0].f32);
#else
a_ = simde__m128_to_private(a);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = HEDLEY_STATIC_CAST(int32_t, simde_math_nearbyintf(a_.f32[i]));
}
#endif
return simde__m64_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvt_ps2pi(a) simde_mm_cvt_ps2pi((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvt_si2ss (simde__m128 a, int32_t b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cvt_si2ss(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(HEDLEY_STATIC_CAST(float, b), a_.neon_f32, 0);
#else
r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, b);
r_.i32[1] = a_.i32[1];
r_.i32[2] = a_.i32[2];
r_.i32[3] = a_.i32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvt_si2ss(a, b) simde_mm_cvt_si2ss((a), b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
int32_t
simde_mm_cvt_ss2si (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cvt_ss2si(a);
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && defined(SIMDE_FAST_CONVERSION_RANGE) && !defined(SIMDE_BUG_GCC_95399)
return vgetq_lane_s32(vcvtnq_s32_f32(simde__m128_to_neon_f32(a)), 0);
#else
simde__m128_private a_ = simde__m128_to_private(simde_mm_round_ps(a, SIMDE_MM_FROUND_CUR_DIRECTION));
#if !defined(SIMDE_FAST_CONVERSION_RANGE)
return ((a_.f32[0] > HEDLEY_STATIC_CAST(simde_float32, INT32_MIN)) &&
(a_.f32[0] < HEDLEY_STATIC_CAST(simde_float32, INT32_MAX))) ?
SIMDE_CONVERT_FTOI(int32_t, a_.f32[0]) : INT32_MIN;
#else
return SIMDE_CONVERT_FTOI(int32_t, a_.f32[0]);
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvt_ss2si(a) simde_mm_cvt_ss2si((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpi16_ps (simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpi16_ps(a);
#else
simde__m128_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcvtq_f32_s32(vmovl_s16(a_.neon_i16));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.f32, a_.i16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
simde_float32 v = a_.i16[i];
r_.f32[i] = v;
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpi16_ps(a) simde_mm_cvtpi16_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpi32_ps (simde__m128 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpi32_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
simde__m64_private b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcombine_f32(vcvt_f32_s32(b_.neon_i32), vget_high_f32(a_.neon_f32));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.m64_private[0].f32, b_.i32);
r_.m64_private[1] = a_.m64_private[1];
#else
r_.f32[0] = (simde_float32) b_.i32[0];
r_.f32[1] = (simde_float32) b_.i32[1];
r_.i32[2] = a_.i32[2];
r_.i32[3] = a_.i32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpi32_ps(a, b) simde_mm_cvtpi32_ps((a), b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpi32x2_ps (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpi32x2_ps(a, b);
#else
simde__m128_private r_;
simde__m64_private
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcvtq_f32_s32(vcombine_s32(a_.neon_i32, b_.neon_i32));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.m64_private[0].f32, a_.i32);
SIMDE_CONVERT_VECTOR_(r_.m64_private[1].f32, b_.i32);
#else
r_.f32[0] = (simde_float32) a_.i32[0];
r_.f32[1] = (simde_float32) a_.i32[1];
r_.f32[2] = (simde_float32) b_.i32[0];
r_.f32[3] = (simde_float32) b_.i32[1];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpi32x2_ps(a, b) simde_mm_cvtpi32x2_ps(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpi8_ps (simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpi8_ps(a);
#else
simde__m128_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcvtq_f32_s32(vmovl_s16(vget_low_s16(vmovl_s8(a_.neon_i8))));
#else
r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[0]);
r_.f32[1] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[1]);
r_.f32[2] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[2]);
r_.f32[3] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[3]);
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpi8_ps(a) simde_mm_cvtpi8_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cvtps_pi16 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtps_pi16(a);
#else
simde__m64_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95399)
r_.neon_i16 = vmovn_s32(vcvtq_s32_f32(vrndiq_f32(a_.neon_f32)));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = SIMDE_CONVERT_FTOI(int16_t, simde_math_roundf(a_.f32[i]));
}
#endif
return simde__m64_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtps_pi16(a) simde_mm_cvtps_pi16((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cvtps_pi32 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtps_pi32(a);
#else
simde__m64_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && defined(SIMDE_FAST_CONVERSION_RANGE) && !defined(SIMDE_BUG_GCC_95399)
r_.neon_i32 = vcvt_s32_f32(vget_low_f32(vrndiq_f32(a_.neon_f32)));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
simde_float32 v = simde_math_roundf(a_.f32[i]);
#if !defined(SIMDE_FAST_CONVERSION_RANGE)
r_.i32[i] = ((v > HEDLEY_STATIC_CAST(simde_float32, INT32_MIN)) && (v < HEDLEY_STATIC_CAST(simde_float32, INT32_MAX))) ?
SIMDE_CONVERT_FTOI(int32_t, v) : INT32_MIN;
#else
r_.i32[i] = SIMDE_CONVERT_FTOI(int32_t, v);
#endif
}
#endif
return simde__m64_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtps_pi32(a) simde_mm_cvtps_pi32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cvtps_pi8 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtps_pi8(a);
#else
simde__m64_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95471)
/* Clamp the input to [INT8_MIN, INT8_MAX], round, convert to i32, narrow to
* i16, combine with an all-zero vector of i16 (which will become the upper
* half), narrow to i8. */
float32x4_t max = vdupq_n_f32(HEDLEY_STATIC_CAST(simde_float32, INT8_MAX));
float32x4_t min = vdupq_n_f32(HEDLEY_STATIC_CAST(simde_float32, INT8_MIN));
float32x4_t values = vrndnq_f32(vmaxq_f32(vminq_f32(max, a_.neon_f32), min));
r_.neon_i8 = vmovn_s16(vcombine_s16(vmovn_s32(vcvtq_s32_f32(values)), vdup_n_s16(0)));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.f32) / sizeof(a_.f32[0])) ; i++) {
if (a_.f32[i] > HEDLEY_STATIC_CAST(simde_float32, INT8_MAX))
r_.i8[i] = INT8_MAX;
else if (a_.f32[i] < HEDLEY_STATIC_CAST(simde_float32, INT8_MIN))
r_.i8[i] = INT8_MIN;
else
r_.i8[i] = SIMDE_CONVERT_FTOI(int8_t, simde_math_roundf(a_.f32[i]));
}
/* Note: the upper half is undefined */
#endif
return simde__m64_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtps_pi8(a) simde_mm_cvtps_pi8((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpu16_ps (simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpu16_ps(a);
#else
simde__m128_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcvtq_f32_u32(vmovl_u16(a_.neon_u16));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.f32, a_.u16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = (simde_float32) a_.u16[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpu16_ps(a) simde_mm_cvtpu16_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpu8_ps (simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpu8_ps(a);
#else
simde__m128_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcvtq_f32_u32(vmovl_u16(vget_low_u16(vmovl_u8(a_.neon_u8))));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = HEDLEY_STATIC_CAST(simde_float32, a_.u8[i]);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpu8_ps(a) simde_mm_cvtpu8_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtsi32_ss (simde__m128 a, int32_t b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cvtsi32_ss(a, b);
#else
simde__m128_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(HEDLEY_STATIC_CAST(float32_t, b), a_.neon_f32, 0);
#else
r_ = a_;
r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, b);
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtsi32_ss(a, b) simde_mm_cvtsi32_ss((a), b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtsi64_ss (simde__m128 a, int64_t b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_ARCH_AMD64)
#if !defined(__PGI)
return _mm_cvtsi64_ss(a, b);
#else
return _mm_cvtsi64x_ss(a, b);
#endif
#else
simde__m128_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(HEDLEY_STATIC_CAST(float32_t, b), a_.neon_f32, 0);
#else
r_ = a_;
r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, b);
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) || (defined(SIMDE_ENABLE_NATIVE_ALIASES) && !defined(SIMDE_ARCH_AMD64))
# define _mm_cvtsi64_ss(a, b) simde_mm_cvtsi64_ss((a), b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32
simde_mm_cvtss_f32 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cvtss_f32(a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vgetq_lane_f32(a_.neon_f32, 0);
#else
return a_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtss_f32(a) simde_mm_cvtss_f32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int32_t
simde_mm_cvtss_si32 (simde__m128 a) {
return simde_mm_cvt_ss2si(a);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtss_si32(a) simde_mm_cvtss_si32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int64_t
simde_mm_cvtss_si64 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_ARCH_AMD64)
#if !defined(__PGI)
return _mm_cvtss_si64(a);
#else
return _mm_cvtss_si64x(a);
#endif
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return SIMDE_CONVERT_FTOI(int64_t, simde_math_roundf(vgetq_lane_f32(a_.neon_f32, 0)));
#else
return SIMDE_CONVERT_FTOI(int64_t, simde_math_roundf(a_.f32[0]));
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) || (defined(SIMDE_ENABLE_NATIVE_ALIASES) && !defined(SIMDE_ARCH_AMD64))
# define _mm_cvtss_si64(a) simde_mm_cvtss_si64((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cvtt_ps2pi (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtt_ps2pi(a);
#else
simde__m64_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(SIMDE_FAST_CONVERSION_RANGE)
r_.neon_i32 = vcvt_s32_f32(vget_low_f32(a_.neon_f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
simde_float32 v = a_.f32[i];
#if !defined(SIMDE_FAST_CONVERSION_RANGE)
r_.i32[i] = ((v > HEDLEY_STATIC_CAST(simde_float32, INT32_MIN)) && (v < HEDLEY_STATIC_CAST(simde_float32, INT32_MAX))) ?
SIMDE_CONVERT_FTOI(int32_t, v) : INT32_MIN;
#else
r_.i32[i] = SIMDE_CONVERT_FTOI(int32_t, v);
#endif
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_mm_cvttps_pi32(a) simde_mm_cvtt_ps2pi(a)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtt_ps2pi(a) simde_mm_cvtt_ps2pi((a))
# define _mm_cvttps_pi32(a) simde_mm_cvttps_pi32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int32_t
simde_mm_cvtt_ss2si (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cvtt_ss2si(a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(SIMDE_FAST_CONVERSION_RANGE)
return SIMDE_CONVERT_FTOI(int32_t, vgetq_lane_f32(a_.neon_f32, 0));
#else
simde_float32 v = a_.f32[0];
#if !defined(SIMDE_FAST_CONVERSION_RANGE)
return ((v > HEDLEY_STATIC_CAST(simde_float32, INT32_MIN)) && (v < HEDLEY_STATIC_CAST(simde_float32, INT32_MAX))) ?
SIMDE_CONVERT_FTOI(int32_t, v) : INT32_MIN;
#else
return SIMDE_CONVERT_FTOI(int32_t, v);
#endif
#endif
#endif
}
#define simde_mm_cvttss_si32(a) simde_mm_cvtt_ss2si((a))
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtt_ss2si(a) simde_mm_cvtt_ss2si((a))
# define _mm_cvttss_si32(a) simde_mm_cvtt_ss2si((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int64_t
simde_mm_cvttss_si64 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_ARCH_AMD64) && !defined(_MSC_VER)
#if defined(__PGI)
return _mm_cvttss_si64x(a);
#else
return _mm_cvttss_si64(a);
#endif
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return SIMDE_CONVERT_FTOI(int64_t, vgetq_lane_f32(a_.neon_f32, 0));
#else
return SIMDE_CONVERT_FTOI(int64_t, a_.f32[0]);
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) || (defined(SIMDE_ENABLE_NATIVE_ALIASES) && !defined(SIMDE_ARCH_AMD64))
# define _mm_cvttss_si64(a) simde_mm_cvttss_si64((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpord_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpord_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_ss(a, simde_mm_cmpord_ps(a, b));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpord_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(simde_math_isnanf)
r_.u32[0] = (simde_math_isnanf(simde_mm_cvtss_f32(a)) || simde_math_isnanf(simde_mm_cvtss_f32(b))) ? UINT32_C(0) : ~UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpord_ss(a, b) simde_mm_cmpord_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_div_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_div_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f32 = vdivq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x4_t recip0 = vrecpeq_f32(b_.neon_f32);
float32x4_t recip1 = vmulq_f32(recip0, vrecpsq_f32(recip0, b_.neon_f32));
r_.neon_f32 = vmulq_f32(a_.neon_f32, recip1);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_div(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
r_.altivec_f32 = vec_div(a_.altivec_f32, b_.altivec_f32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.f32 = a_.f32 / b_.f32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = a_.f32[i] / b_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_div_ps(a, b) simde_mm_div_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_div_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_div_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_ss(a, simde_mm_div_ps(a, b));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_div_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32_t value =
vgetq_lane_f32(simde__m128_to_private(simde_mm_div_ps(a, b)).neon_f32, 0);
r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0);
#else
r_.f32[0] = a_.f32[0] / b_.f32[0];
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = a_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_div_ss(a, b) simde_mm_div_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int16_t
simde_mm_extract_pi16 (simde__m64 a, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 3) {
simde__m64_private a_ = simde__m64_to_private(a);
return a_.i16[imm8];
}
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(HEDLEY_PGI_VERSION) && !defined(SIMDE_BUG_CLANG_44589)
#define simde_mm_extract_pi16(a, imm8) HEDLEY_STATIC_CAST(int16_t, _mm_extract_pi16(a, imm8))
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_mm_extract_pi16(a, imm8) vget_lane_s16(simde__m64_to_private(a).neon_i16, imm8)
#endif
#define simde_m_pextrw(a, imm8) simde_mm_extract_pi16(a, imm8)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_extract_pi16(a, imm8) simde_mm_extract_pi16((a), (imm8))
# define _m_pextrw(a, imm8) simde_mm_extract_pi16((a), (imm8))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_insert_pi16 (simde__m64 a, int16_t i, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 3) {
simde__m64_private
a_ = simde__m64_to_private(a);
a_.i16[imm8] = i;
return simde__m64_from_private(a_);
}
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(__PGI) && !defined(SIMDE_BUG_CLANG_44589)
#define simde_mm_insert_pi16(a, i, imm8) _mm_insert_pi16(a, i, imm8)
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_mm_insert_pi16(a, i, imm8) simde__m64_from_neon_i16(vset_lane_s16((i), simde__m64_to_neon_i16(a), (imm8)))
#endif
#define simde_m_pinsrw(a, i, imm8) (simde_mm_insert_pi16(a, i, imm8))
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_insert_pi16(a, i, imm8) simde_mm_insert_pi16(a, i, imm8)
# define _m_pinsrw(a, i, imm8) simde_mm_insert_pi16(a, i, imm8)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_load_ps (simde_float32 const mem_addr[HEDLEY_ARRAY_PARAM(4)]) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_load_ps(mem_addr);
#else
simde__m128_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vld1q_f32(mem_addr);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
r_.altivec_f32 = vec_vsx_ld(0, mem_addr);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_ld(0, mem_addr);
#else
simde_memcpy(&r_, SIMDE_ALIGN_ASSUME_LIKE(mem_addr, simde__m128), sizeof(r_));
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_load_ps(mem_addr) simde_mm_load_ps(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_load1_ps (simde_float32 const* mem_addr) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_load_ps1(mem_addr);
#else
simde__m128_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vld1q_dup_f32(mem_addr);
#else
r_ = simde__m128_to_private(simde_mm_set1_ps(*mem_addr));
#endif
return simde__m128_from_private(r_);
#endif
}
#define simde_mm_load_ps1(mem_addr) simde_mm_load1_ps(mem_addr)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_load_ps1(mem_addr) simde_mm_load1_ps(mem_addr)
# define _mm_load1_ps(mem_addr) simde_mm_load1_ps(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_load_ss (simde_float32 const* mem_addr) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_load_ss(mem_addr);
#else
simde__m128_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(*mem_addr, vdupq_n_f32(0), 0);
#else
r_.f32[0] = *mem_addr;
r_.i32[1] = 0;
r_.i32[2] = 0;
r_.i32[3] = 0;
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_load_ss(mem_addr) simde_mm_load_ss(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_loadh_pi (simde__m128 a, simde__m64 const* mem_addr) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_loadh_pi(a, HEDLEY_REINTERPRET_CAST(__m64 const*, mem_addr));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcombine_f32(vget_low_f32(a_.neon_f32), vld1_f32(HEDLEY_REINTERPRET_CAST(const float32_t*, mem_addr)));
#else
simde__m64_private b_ = *HEDLEY_REINTERPRET_CAST(simde__m64_private const*, mem_addr);
r_.f32[0] = a_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = b_.f32[0];
r_.f32[3] = b_.f32[1];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#if HEDLEY_HAS_WARNING("-Wold-style-cast")
#define _mm_loadh_pi(a, mem_addr) simde_mm_loadh_pi((a), HEDLEY_REINTERPRET_CAST(simde__m64 const*, (mem_addr)))
#else
#define _mm_loadh_pi(a, mem_addr) simde_mm_loadh_pi((a), (simde__m64 const*) (mem_addr))
#endif
#endif
/* The SSE documentation says that there are no alignment requirements
for mem_addr. Unfortunately they used the __m64 type for the argument
which is supposed to be 8-byte aligned, so some compilers (like clang
with -Wcast-align) will generate a warning if you try to cast, say,
a simde_float32* to a simde__m64* for this function.
I think the choice of argument type is unfortunate, but I do think we
need to stick to it here. If there is demand I can always add something
like simde_x_mm_loadl_f32(simde__m128, simde_float32 mem_addr[2]) */
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_loadl_pi (simde__m128 a, simde__m64 const* mem_addr) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_loadl_pi(a, HEDLEY_REINTERPRET_CAST(__m64 const*, mem_addr));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcombine_f32(vld1_f32(
HEDLEY_REINTERPRET_CAST(const float32_t*, mem_addr)), vget_high_f32(a_.neon_f32));
#else
simde__m64_private b_;
simde_memcpy(&b_, mem_addr, sizeof(b_));
r_.i32[0] = b_.i32[0];
r_.i32[1] = b_.i32[1];
r_.i32[2] = a_.i32[2];
r_.i32[3] = a_.i32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#if HEDLEY_HAS_WARNING("-Wold-style-cast")
#define _mm_loadl_pi(a, mem_addr) simde_mm_loadl_pi((a), HEDLEY_REINTERPRET_CAST(simde__m64 const*, (mem_addr)))
#else
#define _mm_loadl_pi(a, mem_addr) simde_mm_loadl_pi((a), (simde__m64 const*) (mem_addr))
#endif
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_loadr_ps (simde_float32 const mem_addr[HEDLEY_ARRAY_PARAM(4)]) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_loadr_ps(mem_addr);
#else
simde__m128_private
r_,
v_ = simde__m128_to_private(simde_mm_load_ps(mem_addr));
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vrev64q_f32(v_.neon_f32);
r_.neon_f32 = vextq_f32(r_.neon_f32, r_.neon_f32, 2);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) && defined(__PPC64__)
r_.altivec_f32 = vec_reve(v_.altivec_f32);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, v_.f32, v_.f32, 3, 2, 1, 0);
#else
r_.f32[0] = v_.f32[3];
r_.f32[1] = v_.f32[2];
r_.f32[2] = v_.f32[1];
r_.f32[3] = v_.f32[0];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_loadr_ps(mem_addr) simde_mm_loadr_ps(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_loadu_ps (simde_float32 const mem_addr[HEDLEY_ARRAY_PARAM(4)]) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_loadu_ps(mem_addr);
#else
simde__m128_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vld1q_f32(HEDLEY_REINTERPRET_CAST(const float32_t*, mem_addr));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_load(mem_addr);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) && defined(__PPC64__)
r_.altivec_f32 = vec_vsx_ld(0, mem_addr);
#else
simde_memcpy(&r_, mem_addr, sizeof(r_));
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_loadu_ps(mem_addr) simde_mm_loadu_ps(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_maskmove_si64 (simde__m64 a, simde__m64 mask, int8_t* mem_addr) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
_mm_maskmove_si64(a, mask, HEDLEY_REINTERPRET_CAST(char*, mem_addr));
#else
simde__m64_private
a_ = simde__m64_to_private(a),
mask_ = simde__m64_to_private(mask);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.i8) / sizeof(a_.i8[0])) ; i++)
if (mask_.i8[i] < 0)
mem_addr[i] = a_.i8[i];
#endif
}
#define simde_m_maskmovq(a, mask, mem_addr) simde_mm_maskmove_si64(a, mask, mem_addr)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_maskmove_si64(a, mask, mem_addr) simde_mm_maskmove_si64((a), (mask), SIMDE_CHECKED_REINTERPRET_CAST(int8_t*, char*, (mem_addr)))
# define _m_maskmovq(a, mask, mem_addr) simde_mm_maskmove_si64((a), (mask), SIMDE_CHECKED_REINTERPRET_CAST(int8_t*, char*, (mem_addr)))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_max_pi16 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_max_pi16(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i16 = vmax_s16(a_.neon_i16, b_.neon_i16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = (a_.i16[i] > b_.i16[i]) ? a_.i16[i] : b_.i16[i];
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pmaxsw(a, b) simde_mm_max_pi16(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_max_pi16(a, b) simde_mm_max_pi16(a, b)
# define _m_pmaxsw(a, b) simde_mm_max_pi16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_max_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_max_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(SIMDE_FAST_NANS)
r_.neon_f32 = vmaxq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vbslq_f32(vcgtq_f32(a_.neon_f32, b_.neon_f32), a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE) && defined(SIMDE_FAST_NANS)
r_.wasm_v128 = wasm_f32x4_max(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_bitselect(a_.wasm_v128, b_.wasm_v128, wasm_f32x4_gt(a_.wasm_v128, b_.wasm_v128));
#elif (defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)) && defined(SIMDE_FAST_NANS)
r_.altivec_f32 = vec_max(a_.altivec_f32, b_.altivec_f32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
r_.altivec_f32 = vec_sel(b_.altivec_f32, a_.altivec_f32, vec_cmpgt(a_.altivec_f32, b_.altivec_f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = (a_.f32[i] > b_.f32[i]) ? a_.f32[i] : b_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_max_ps(a, b) simde_mm_max_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_max_pu8 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_max_pu8(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u8 = vmax_u8(a_.neon_u8, b_.neon_u8);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
r_.u8[i] = (a_.u8[i] > b_.u8[i]) ? a_.u8[i] : b_.u8[i];
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pmaxub(a, b) simde_mm_max_pu8(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_max_pu8(a, b) simde_mm_max_pu8(a, b)
# define _m_pmaxub(a, b) simde_mm_max_pu8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_max_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_max_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_ss(a, simde_mm_max_ps(a, b));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_max_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32_t value = vgetq_lane_f32(maxq_f32(a_.neon_f32, b_.neon_f32), 0);
r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0);
#else
r_.f32[0] = (a_.f32[0] > b_.f32[0]) ? a_.f32[0] : b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_max_ss(a, b) simde_mm_max_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_min_pi16 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_min_pi16(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i16 = vmin_s16(a_.neon_i16, b_.neon_i16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = (a_.i16[i] < b_.i16[i]) ? a_.i16[i] : b_.i16[i];
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pminsw(a, b) simde_mm_min_pi16(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_min_pi16(a, b) simde_mm_min_pi16(a, b)
# define _m_pminsw(a, b) simde_mm_min_pi16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_min_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_min_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_FAST_NANS) && defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vminq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_pmin(b_.wasm_v128, a_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
#if defined(SIMDE_FAST_NANS)
r_.altivec_f32 = vec_min(a_.altivec_f32, b_.altivec_f32);
#else
r_.altivec_f32 = vec_sel(b_.altivec_f32, a_.altivec_f32, vec_cmpgt(b_.altivec_f32, a_.altivec_f32));
#endif
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
uint32_t SIMDE_VECTOR(16) m = HEDLEY_REINTERPRET_CAST(__typeof__(m), a_.f32 < b_.f32);
r_.f32 =
HEDLEY_REINTERPRET_CAST(
__typeof__(r_.f32),
( (HEDLEY_REINTERPRET_CAST(__typeof__(m), a_.f32) & m) |
(HEDLEY_REINTERPRET_CAST(__typeof__(m), b_.f32) & ~m)
)
);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = (a_.f32[i] < b_.f32[i]) ? a_.f32[i] : b_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_min_ps(a, b) simde_mm_min_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_min_pu8 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_min_pu8(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u8 = vmin_u8(a_.neon_u8, b_.neon_u8);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
r_.u8[i] = (a_.u8[i] < b_.u8[i]) ? a_.u8[i] : b_.u8[i];
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pminub(a, b) simde_mm_min_pu8(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_min_pu8(a, b) simde_mm_min_pu8(a, b)
# define _m_pminub(a, b) simde_mm_min_pu8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_min_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_min_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_ss(a, simde_mm_min_ps(a, b));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_min_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32_t value = vgetq_lane_f32(vminq_f32(a_.neon_f32, b_.neon_f32), 0);
r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0);
#else
r_.f32[0] = (a_.f32[0] < b_.f32[0]) ? a_.f32[0] : b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_min_ss(a, b) simde_mm_min_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_movehl_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_movehl_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x2_t a32 = vget_high_f32(a_.neon_f32);
float32x2_t b32 = vget_high_f32(b_.neon_f32);
r_.neon_f32 = vcombine_f32(b32, a32);
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float),
vec_mergel(b_.altivec_i64, a_.altivec_i64));
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 6, 7, 2, 3);
#else
r_.f32[0] = b_.f32[2];
r_.f32[1] = b_.f32[3];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_movehl_ps(a, b) simde_mm_movehl_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_movelh_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_movelh_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 0, 1, 4, 5);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x2_t a10 = vget_low_f32(a_.neon_f32);
float32x2_t b10 = vget_low_f32(b_.neon_f32);
r_.neon_f32 = vcombine_f32(a10, b10);
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float),
vec_mergeh(a_.altivec_i64, b_.altivec_i64));
#else
r_.f32[0] = a_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = b_.f32[0];
r_.f32[3] = b_.f32[1];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_movelh_ps(a, b) simde_mm_movelh_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_movemask_pi8 (simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_movemask_pi8(a);
#else
simde__m64_private a_ = simde__m64_to_private(a);
int r = 0;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
uint8x8_t input = a_.neon_u8;
const int8_t xr[8] = {-7, -6, -5, -4, -3, -2, -1, 0};
const uint8x8_t mask_and = vdup_n_u8(0x80);
const int8x8_t mask_shift = vld1_s8(xr);
const uint8x8_t mask_result = vshl_u8(vand_u8(input, mask_and), mask_shift);
uint8x8_t lo = mask_result;
r = vaddv_u8(lo);
#else
const size_t nmemb = sizeof(a_.i8) / sizeof(a_.i8[0]);
SIMDE_VECTORIZE_REDUCTION(|:r)
for (size_t i = 0 ; i < nmemb ; i++) {
r |= (a_.u8[nmemb - 1 - i] >> 7) << (nmemb - 1 - i);
}
#endif
return r;
#endif
}
#define simde_m_pmovmskb(a) simde_mm_movemask_pi8(a)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_movemask_pi8(a) simde_mm_movemask_pi8(a)
# define _m_pmovmskb(a) simde_mm_movemask_pi8(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_movemask_ps (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_movemask_ps(a);
#else
int r = 0;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
// Shift out everything but the sign bits with a 32-bit unsigned shift right.
uint64x2_t high_bits = vreinterpretq_u64_u32(vshrq_n_u32(a_.neon_u32, 31));
// Merge the two pairs together with a 64-bit unsigned shift right + add.
uint8x16_t paired = vreinterpretq_u8_u64(vsraq_n_u64(high_bits, high_bits, 31));
// Extract the result.
return vgetq_lane_u8(paired, 0) | (vgetq_lane_u8(paired, 8) << 2);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
static const uint32_t md[4] = {
1 << 0, 1 << 1, 1 << 2, 1 << 3
};
uint32x4_t extended = vreinterpretq_u32_s32(vshrq_n_s32(a_.neon_i32, 31));
uint32x4_t masked = vandq_u32(vld1q_u32(md), extended);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return HEDLEY_STATIC_CAST(int32_t, vaddvq_u32(masked));
#else
uint64x2_t t64 = vpaddlq_u32(masked);
return
HEDLEY_STATIC_CAST(int, vgetq_lane_u64(t64, 0)) +
HEDLEY_STATIC_CAST(int, vgetq_lane_u64(t64, 1));
#endif
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) && defined(SIMDE_BUG_CLANG_50932)
SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) idx = { 96, 64, 32, 0, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 };
SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) res = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), vec_bperm(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned __int128), a_.altivec_u64), idx));
return HEDLEY_STATIC_CAST(int32_t, vec_extract(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed int), res), 2));
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) idx = { 96, 64, 32, 0, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 };
SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) res = vec_bperm(a_.altivec_u8, idx);
return HEDLEY_STATIC_CAST(int32_t, vec_extract(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed int), res), 2));
#else
SIMDE_VECTORIZE_REDUCTION(|:r)
for (size_t i = 0 ; i < sizeof(a_.u32) / sizeof(a_.u32[0]) ; i++) {
r |= (a_.u32[i] >> ((sizeof(a_.u32[i]) * CHAR_BIT) - 1)) << i;
}
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_movemask_ps(a) simde_mm_movemask_ps((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_mul_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_mul_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vmulq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_mul(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.f32 = a_.f32 * b_.f32;
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
r_.altivec_f32 = vec_mul(a_.altivec_f32, b_.altivec_f32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = a_.f32[i] * b_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_mul_ps(a, b) simde_mm_mul_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_mul_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_mul_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_ss(a, simde_mm_mul_ps(a, b));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_mul_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.f32[0] = a_.f32[0] * b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_mul_ss(a, b) simde_mm_mul_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_mulhi_pu16 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_mulhi_pu16(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
const uint32x4_t t1 = vmull_u16(a_.neon_u16, b_.neon_u16);
const uint32x4_t t2 = vshrq_n_u32(t1, 16);
const uint16x4_t t3 = vmovn_u32(t2);
r_.neon_u16 = t3;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) {
r_.u16[i] = HEDLEY_STATIC_CAST(uint16_t, ((HEDLEY_STATIC_CAST(uint32_t, a_.u16[i]) * HEDLEY_STATIC_CAST(uint32_t, b_.u16[i])) >> UINT32_C(16)));
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pmulhuw(a, b) simde_mm_mulhi_pu16(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_mulhi_pu16(a, b) simde_mm_mulhi_pu16(a, b)
# define _m_pmulhuw(a, b) simde_mm_mulhi_pu16(a, b)
#endif
#if defined(SIMDE_X86_SSE_NATIVE) && defined(HEDLEY_GCC_VERSION)
#define SIMDE_MM_HINT_NTA HEDLEY_STATIC_CAST(enum _mm_hint, 0)
#define SIMDE_MM_HINT_T0 HEDLEY_STATIC_CAST(enum _mm_hint, 1)
#define SIMDE_MM_HINT_T1 HEDLEY_STATIC_CAST(enum _mm_hint, 2)
#define SIMDE_MM_HINT_T2 HEDLEY_STATIC_CAST(enum _mm_hint, 3)
#define SIMDE_MM_HINT_ENTA HEDLEY_STATIC_CAST(enum _mm_hint, 4)
#define SIMDE_MM_HINT_ET0 HEDLEY_STATIC_CAST(enum _mm_hint, 5)
#define SIMDE_MM_HINT_ET1 HEDLEY_STATIC_CAST(enum _mm_hint, 6)
#define SIMDE_MM_HINT_ET2 HEDLEY_STATIC_CAST(enum _mm_hint, 7)
#else
#define SIMDE_MM_HINT_NTA 0
#define SIMDE_MM_HINT_T0 1
#define SIMDE_MM_HINT_T1 2
#define SIMDE_MM_HINT_T2 3
#define SIMDE_MM_HINT_ENTA 4
#define SIMDE_MM_HINT_ET0 5
#define SIMDE_MM_HINT_ET1 6
#define SIMDE_MM_HINT_ET2 7
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
HEDLEY_DIAGNOSTIC_PUSH
#if HEDLEY_HAS_WARNING("-Wreserved-id-macro")
_Pragma("clang diagnostic ignored \"-Wreserved-id-macro\"")
#endif
#undef _MM_HINT_NTA
#define _MM_HINT_NTA SIMDE_MM_HINT_NTA
#undef _MM_HINT_T0
#define _MM_HINT_T0 SIMDE_MM_HINT_T0
#undef _MM_HINT_T1
#define _MM_HINT_T1 SIMDE_MM_HINT_T1
#undef _MM_HINT_T2
#define _MM_HINT_T2 SIMDE_MM_HINT_T2
#undef _MM_HINT_ENTA
#define _MM_HINT_ETNA SIMDE_MM_HINT_ENTA
#undef _MM_HINT_ET0
#define _MM_HINT_ET0 SIMDE_MM_HINT_ET0
#undef _MM_HINT_ET1
#define _MM_HINT_ET1 SIMDE_MM_HINT_ET1
#undef _MM_HINT_ET1
#define _MM_HINT_ET2 SIMDE_MM_HINT_ET2
HEDLEY_DIAGNOSTIC_POP
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_prefetch (const void* p, int i) {
#if \
HEDLEY_HAS_BUILTIN(__builtin_prefetch) || \
HEDLEY_GCC_VERSION_CHECK(3,4,0) || \
HEDLEY_INTEL_VERSION_CHECK(13,0,0)
switch(i) {
case SIMDE_MM_HINT_NTA:
__builtin_prefetch(p, 0, 0);
break;
case SIMDE_MM_HINT_T0:
__builtin_prefetch(p, 0, 3);
break;
case SIMDE_MM_HINT_T1:
__builtin_prefetch(p, 0, 2);
break;
case SIMDE_MM_HINT_T2:
__builtin_prefetch(p, 0, 1);
break;
case SIMDE_MM_HINT_ENTA:
__builtin_prefetch(p, 1, 0);
break;
case SIMDE_MM_HINT_ET0:
__builtin_prefetch(p, 1, 3);
break;
case SIMDE_MM_HINT_ET1:
__builtin_prefetch(p, 1, 2);
break;
case SIMDE_MM_HINT_ET2:
__builtin_prefetch(p, 0, 1);
break;
}
#elif defined(__ARM_ACLE)
#if (__ARM_ACLE >= 101)
switch(i) {
case SIMDE_MM_HINT_NTA:
__pldx(0, 0, 1, p);
break;
case SIMDE_MM_HINT_T0:
__pldx(0, 0, 0, p);
break;
case SIMDE_MM_HINT_T1:
__pldx(0, 1, 0, p);
break;
case SIMDE_MM_HINT_T2:
__pldx(0, 2, 0, p);
break;
case SIMDE_MM_HINT_ENTA:
__pldx(1, 0, 1, p);
break;
case SIMDE_MM_HINT_ET0:
__pldx(1, 0, 0, p);
break;
case SIMDE_MM_HINT_ET1:
__pldx(1, 1, 0, p);
break;
case SIMDE_MM_HINT_ET2:
__pldx(1, 2, 0, p);
break;
}
#else
(void) i;
__pld(p)
#endif
#elif HEDLEY_PGI_VERSION_CHECK(10,0,0)
(void) i;
#pragma mem prefetch p
#elif HEDLEY_CRAY_VERSION_CHECK(8,1,0)
switch (i) {
case SIMDE_MM_HINT_NTA:
#pragma _CRI prefetch (nt) p
break;
case SIMDE_MM_HINT_T0:
case SIMDE_MM_HINT_T1:
case SIMDE_MM_HINT_T2:
#pragma _CRI prefetch p
break;
case SIMDE_MM_HINT_ENTA:
#pragma _CRI prefetch (write, nt) p
break;
case SIMDE_MM_HINT_ET0:
case SIMDE_MM_HINT_ET1:
case SIMDE_MM_HINT_ET2:
#pragma _CRI prefetch (write) p
break;
}
#elif HEDLEY_IBM_VERSION_CHECK(11,0,0)
switch(i) {
case SIMDE_MM_HINT_NTA:
__prefetch_by_load(p, 0, 0);
break;
case SIMDE_MM_HINT_T0:
__prefetch_by_load(p, 0, 3);
break;
case SIMDE_MM_HINT_T1:
__prefetch_by_load(p, 0, 2);
break;
case SIMDE_MM_HINT_T2:
__prefetch_by_load(p, 0, 1);
break;
case SIMDE_MM_HINT_ENTA:
__prefetch_by_load(p, 1, 0);
break;
case SIMDE_MM_HINT_ET0:
__prefetch_by_load(p, 1, 3);
break;
case SIMDE_MM_HINT_ET1:
__prefetch_by_load(p, 1, 2);
break;
case SIMDE_MM_HINT_ET2:
__prefetch_by_load(p, 0, 1);
break;
}
#endif
}
#if defined(SIMDE_X86_SSE_NATIVE)
#if defined(__clang__) && !SIMDE_DETECT_CLANG_VERSION_CHECK(10,0,0) /* https://reviews.llvm.org/D71718 */
#define simde_mm_prefetch(p, i) \
(__extension__({ \
HEDLEY_DIAGNOSTIC_PUSH \
HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL \
_mm_prefetch((p), (i)); \
HEDLEY_DIAGNOSTIC_POP \
}))
#else
#define simde_mm_prefetch(p, i) _mm_prefetch(p, i)
#endif
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_prefetch(p, i) simde_mm_prefetch(p, i)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_negate_ps(simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return simde_mm_xor_ps(a, _mm_set1_ps(SIMDE_FLOAT32_C(-0.0)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vnegq_f32(a_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_neg(a_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
r_.altivec_f32 = vec_neg(a_.altivec_f32);
#elif defined(SIMDE_VECTOR_NEGATE)
r_.f32 = -a_.f32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = -a_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_rcp_ps (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_rcp_ps(a);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x4_t recip = vrecpeq_f32(a_.neon_f32);
#if SIMDE_ACCURACY_PREFERENCE > 0
for (int i = 0; i < SIMDE_ACCURACY_PREFERENCE ; ++i) {
recip = vmulq_f32(recip, vrecpsq_f32(recip, a_.neon_f32));
}
#endif
r_.neon_f32 = recip;
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_div(simde_mm_set1_ps(1.0f), a_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_re(a_.altivec_f32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.f32 = 1.0f / a_.f32;
#elif defined(SIMDE_IEEE754_STORAGE)
/* https://stackoverflow.com/questions/12227126/division-as-multiply-and-lut-fast-float-division-reciprocal/12228234#12228234 */
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
int32_t ix;
simde_float32 fx = a_.f32[i];
simde_memcpy(&ix, &fx, sizeof(ix));
int32_t x = INT32_C(0x7EF311C3) - ix;
simde_float32 temp;
simde_memcpy(&temp, &x, sizeof(temp));
r_.f32[i] = temp * (SIMDE_FLOAT32_C(2.0) - temp * fx);
}
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = 1.0f / a_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_rcp_ps(a) simde_mm_rcp_ps((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_rcp_ss (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_rcp_ss(a);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_ss(a, simde_mm_rcp_ps(a));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_rcp_ps(simde_x_mm_broadcastlow_ps(a)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
r_.f32[0] = 1.0f / a_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_rcp_ss(a) simde_mm_rcp_ss((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_rsqrt_ps (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_rsqrt_ps(a);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vrsqrteq_f32(a_.neon_f32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_rsqrte(a_.altivec_f32);
#elif defined(SIMDE_IEEE754_STORAGE)
/* https://basesandframes.files.wordpress.com/2020/04/even_faster_math_functions_green_2020.pdf
Pages 100 - 103 */
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
#if SIMDE_ACCURACY_PREFERENCE <= 0
r_.i32[i] = INT32_C(0x5F37624F) - (a_.i32[i] >> 1);
#else
simde_float32 x = a_.f32[i];
simde_float32 xhalf = SIMDE_FLOAT32_C(0.5) * x;
int32_t ix;
simde_memcpy(&ix, &x, sizeof(ix));
#if SIMDE_ACCURACY_PREFERENCE == 1
ix = INT32_C(0x5F375A82) - (ix >> 1);
#else
ix = INT32_C(0x5F37599E) - (ix >> 1);
#endif
simde_memcpy(&x, &ix, sizeof(x));
#if SIMDE_ACCURACY_PREFERENCE >= 2
x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x);
#endif
x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x);
r_.f32[i] = x;
#endif
}
#elif defined(simde_math_sqrtf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = 1.0f / simde_math_sqrtf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_rsqrt_ps(a) simde_mm_rsqrt_ps((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_rsqrt_ss (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_rsqrt_ss(a);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_ss(a, simde_mm_rsqrt_ps(a));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_rsqrt_ps(simde_x_mm_broadcastlow_ps(a)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(vgetq_lane_f32(simde_mm_rsqrt_ps(a).neon_f32, 0), a_.neon_f32, 0);
#elif defined(SIMDE_IEEE754_STORAGE)
{
#if SIMDE_ACCURACY_PREFERENCE <= 0
r_.i32[0] = INT32_C(0x5F37624F) - (a_.i32[0] >> 1);
#else
simde_float32 x = a_.f32[0];
simde_float32 xhalf = SIMDE_FLOAT32_C(0.5) * x;
int32_t ix;
simde_memcpy(&ix, &x, sizeof(ix));
#if SIMDE_ACCURACY_PREFERENCE == 1
ix = INT32_C(0x5F375A82) - (ix >> 1);
#else
ix = INT32_C(0x5F37599E) - (ix >> 1);
#endif
simde_memcpy(&x, &ix, sizeof(x));
#if SIMDE_ACCURACY_PREFERENCE >= 2
x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x);
#endif
x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x);
r_.f32[0] = x;
#endif
}
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#elif defined(simde_math_sqrtf)
r_.f32[0] = 1.0f / simde_math_sqrtf(a_.f32[0]);
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_rsqrt_ss(a) simde_mm_rsqrt_ss((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_sad_pu8 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_sad_pu8(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint64x1_t t = vpaddl_u32(vpaddl_u16(vpaddl_u8(vabd_u8(a_.neon_u8, b_.neon_u8))));
r_.neon_u16 = vset_lane_u16(HEDLEY_STATIC_CAST(uint64_t, vget_lane_u64(t, 0)), vdup_n_u16(0), 0);
#else
uint16_t sum = 0;
SIMDE_VECTORIZE_REDUCTION(+:sum)
for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
sum += HEDLEY_STATIC_CAST(uint8_t, simde_math_abs(a_.u8[i] - b_.u8[i]));
}
r_.i16[0] = HEDLEY_STATIC_CAST(int16_t, sum);
r_.i16[1] = 0;
r_.i16[2] = 0;
r_.i16[3] = 0;
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_psadbw(a, b) simde_mm_sad_pu8(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sad_pu8(a, b) simde_mm_sad_pu8(a, b)
# define _m_psadbw(a, b) simde_mm_sad_pu8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_set_ss (simde_float32 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_set_ss(a);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vsetq_lane_f32(a, vdupq_n_f32(SIMDE_FLOAT32_C(0.0)), 0);
#else
return simde_mm_set_ps(SIMDE_FLOAT32_C(0.0), SIMDE_FLOAT32_C(0.0), SIMDE_FLOAT32_C(0.0), a);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_set_ss(a) simde_mm_set_ss(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_setr_ps (simde_float32 e3, simde_float32 e2, simde_float32 e1, simde_float32 e0) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_setr_ps(e3, e2, e1, e0);
#else
return simde_mm_set_ps(e0, e1, e2, e3);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_setr_ps(e3, e2, e1, e0) simde_mm_setr_ps(e3, e2, e1, e0)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_setzero_ps (void) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_setzero_ps();
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vdupq_n_f32(SIMDE_FLOAT32_C(0.0));
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
return vec_splats(SIMDE_FLOAT32_C(0.0));
#else
simde__m128 r;
simde_memset(&r, 0, sizeof(r));
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_setzero_ps() simde_mm_setzero_ps()
#endif
#if defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_)
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_undefined_ps (void) {
simde__m128_private r_;
#if defined(SIMDE_HAVE_UNDEFINED128)
r_.n = _mm_undefined_ps();
#elif !defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_)
r_ = simde__m128_to_private(simde_mm_setzero_ps());
#endif
return simde__m128_from_private(r_);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_undefined_ps() simde_mm_undefined_ps()
#endif
#if defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_)
HEDLEY_DIAGNOSTIC_POP
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_setone_ps (void) {
simde__m128 t = simde_mm_setzero_ps();
return simde_mm_cmpeq_ps(t, t);
}
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_sfence (void) {
/* TODO: Use Hedley. */
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_sfence();
#elif defined(__GNUC__) && ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 7))
__atomic_thread_fence(__ATOMIC_SEQ_CST);
#elif !defined(__INTEL_COMPILER) && defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) && !defined(__STDC_NO_ATOMICS__)
#if defined(__GNUC__) && (__GNUC__ == 4) && (__GNUC_MINOR__ < 9)
__atomic_thread_fence(__ATOMIC_SEQ_CST);
#else
atomic_thread_fence(memory_order_seq_cst);
#endif
#elif defined(_MSC_VER)
MemoryBarrier();
#elif HEDLEY_HAS_EXTENSION(c_atomic)
__c11_atomic_thread_fence(__ATOMIC_SEQ_CST);
#elif defined(__GNUC__) && ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1))
__sync_synchronize();
#elif defined(_OPENMP)
#pragma omp critical(simde_mm_sfence_)
{ }
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sfence() simde_mm_sfence()
#endif
#define SIMDE_MM_SHUFFLE(z, y, x, w) (((z) << 6) | ((y) << 4) | ((x) << 2) | (w))
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _MM_SHUFFLE(z, y, x, w) SIMDE_MM_SHUFFLE(z, y, x, w)
#endif
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(__PGI)
# define simde_mm_shuffle_pi16(a, imm8) _mm_shuffle_pi16(a, imm8)
#elif defined(SIMDE_SHUFFLE_VECTOR_)
# define simde_mm_shuffle_pi16(a, imm8) (__extension__ ({ \
const simde__m64_private simde__tmp_a_ = simde__m64_to_private(a); \
simde__m64_from_private((simde__m64_private) { .i16 = \
SIMDE_SHUFFLE_VECTOR_(16, 8, \
(simde__tmp_a_).i16, \
(simde__tmp_a_).i16, \
(((imm8) ) & 3), \
(((imm8) >> 2) & 3), \
(((imm8) >> 4) & 3), \
(((imm8) >> 6) & 3)) }); }))
#else
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_shuffle_pi16 (simde__m64 a, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) {
simde__m64_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
for (size_t i = 0 ; i < sizeof(r_.i16) / sizeof(r_.i16[0]) ; i++) {
r_.i16[i] = a_.i16[(imm8 >> (i * 2)) & 3];
}
HEDLEY_DIAGNOSTIC_PUSH
#if HEDLEY_HAS_WARNING("-Wconditional-uninitialized")
# pragma clang diagnostic ignored "-Wconditional-uninitialized"
#endif
return simde__m64_from_private(r_);
HEDLEY_DIAGNOSTIC_POP
}
#endif
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(__PGI)
# define simde_m_pshufw(a, imm8) _m_pshufw(a, imm8)
#else
# define simde_m_pshufw(a, imm8) simde_mm_shuffle_pi16(a, imm8)
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_shuffle_pi16(a, imm8) simde_mm_shuffle_pi16(a, imm8)
# define _m_pshufw(a, imm8) simde_mm_shuffle_pi16(a, imm8)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_shuffle_ps (simde__m128 a, simde__m128 b, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) {
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.f32[0] = a_.f32[(imm8 >> 0) & 3];
r_.f32[1] = a_.f32[(imm8 >> 2) & 3];
r_.f32[2] = b_.f32[(imm8 >> 4) & 3];
r_.f32[3] = b_.f32[(imm8 >> 6) & 3];
return simde__m128_from_private(r_);
}
#if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI)
# define simde_mm_shuffle_ps(a, b, imm8) _mm_shuffle_ps(a, b, imm8)
#elif defined(SIMDE_SHUFFLE_VECTOR_)
#define simde_mm_shuffle_ps(a, b, imm8) (__extension__ ({ \
simde__m128_from_private((simde__m128_private) { .f32 = \
SIMDE_SHUFFLE_VECTOR_(32, 16, \
simde__m128_to_private(a).f32, \
simde__m128_to_private(b).f32, \
(((imm8) ) & 3), \
(((imm8) >> 2) & 3), \
(((imm8) >> 4) & 3) + 4, \
(((imm8) >> 6) & 3) + 4) }); }))
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(SIMDE_STATEMENT_EXPR_)
#define simde_mm_shuffle_ps(a, b, imm8) \
(__extension__({ \
float32x4_t simde_mm_shuffle_ps_a_ = simde__m128i_to_neon_f32(a); \
float32x4_t simde_mm_shuffle_ps_b_ = simde__m128i_to_neon_f32(b); \
float32x4_t simde_mm_shuffle_ps_r_; \
\
simde_mm_shuffle_ps_r_ = vmovq_n_f32(vgetq_lane_f32(simde_mm_shuffle_ps_a_, (imm8) & (0x3))); \
simde_mm_shuffle_ps_r_ = vsetq_lane_f32(vgetq_lane_f32(simde_mm_shuffle_ps_a_, ((imm8) >> 2) & 0x3), simde_mm_shuffle_ps_r_, 1); \
simde_mm_shuffle_ps_r_ = vsetq_lane_f32(vgetq_lane_f32(simde_mm_shuffle_ps_b_, ((imm8) >> 4) & 0x3), simde_mm_shuffle_ps_r_, 2); \
vsetq_lane_f32(vgetq_lane_f32(simde_mm_shuffle_ps_b_, ((imm8) >> 6) & 0x3), simde_mm_shuffle_ps_r_, 3); \
}))
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_shuffle_ps(a, b, imm8) simde_mm_shuffle_ps((a), (b), imm8)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_sqrt_ps (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_sqrt_ps(a);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f32 = vsqrtq_f32(a_.neon_f32);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x4_t est = vrsqrteq_f32(a_.neon_f32);
for (int i = 0 ; i <= SIMDE_ACCURACY_PREFERENCE ; i++) {
est = vmulq_f32(vrsqrtsq_f32(vmulq_f32(a_.neon_f32, est), est), est);
}
r_.neon_f32 = vmulq_f32(a_.neon_f32, est);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_sqrt(a_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
r_.altivec_f32 = vec_sqrt(a_.altivec_f32);
#elif defined(simde_math_sqrt)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < sizeof(r_.f32) / sizeof(r_.f32[0]) ; i++) {
r_.f32[i] = simde_math_sqrtf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sqrt_ps(a) simde_mm_sqrt_ps((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_sqrt_ss (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_sqrt_ss(a);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_ss(a, simde_mm_sqrt_ps(a));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_sqrt_ps(simde_x_mm_broadcastlow_ps(a)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32_t value =
vgetq_lane_f32(simde__m128_to_private(simde_mm_sqrt_ps(a)).neon_f32, 0);
r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0);
#elif defined(simde_math_sqrtf)
r_.f32[0] = simde_math_sqrtf(a_.f32[0]);
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sqrt_ss(a) simde_mm_sqrt_ss((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_store_ps (simde_float32 mem_addr[4], simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_store_ps(mem_addr, a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst1q_f32(mem_addr, a_.neon_f32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
vec_st(a_.altivec_f32, 0, mem_addr);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
wasm_v128_store(mem_addr, a_.wasm_v128);
#else
simde_memcpy(mem_addr, &a_, sizeof(a));
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_store_ps(mem_addr, a) simde_mm_store_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_store1_ps (simde_float32 mem_addr[4], simde__m128 a) {
simde_float32* mem_addr_ = SIMDE_ALIGN_ASSUME_LIKE(mem_addr, simde__m128);
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_store_ps1(mem_addr_, a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst1q_f32(mem_addr_, vdupq_lane_f32(vget_low_f32(a_.neon_f32), 0));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
wasm_v128_store(mem_addr_, wasm_i32x4_shuffle(a_.wasm_v128, a_.wasm_v128, 0, 0, 0, 0));
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
vec_st(vec_splat(a_.altivec_f32, 0), 0, mem_addr_);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
simde__m128_private tmp_;
tmp_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, a_.f32, 0, 0, 0, 0);
simde_mm_store_ps(mem_addr_, tmp_.f32);
#else
SIMDE_VECTORIZE_ALIGNED(mem_addr_:16)
for (size_t i = 0 ; i < sizeof(a_.f32) / sizeof(a_.f32[0]) ; i++) {
mem_addr_[i] = a_.f32[0];
}
#endif
#endif
}
#define simde_mm_store_ps1(mem_addr, a) simde_mm_store1_ps(mem_addr, a)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_store_ps1(mem_addr, a) simde_mm_store1_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
# define _mm_store1_ps(mem_addr, a) simde_mm_store1_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_store_ss (simde_float32* mem_addr, simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_store_ss(mem_addr, a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst1q_lane_f32(mem_addr, a_.neon_f32, 0);
#else
*mem_addr = a_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_store_ss(mem_addr, a) simde_mm_store_ss(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_storeh_pi (simde__m64* mem_addr, simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_storeh_pi(HEDLEY_REINTERPRET_CAST(__m64*, mem_addr), a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst1_f32(HEDLEY_REINTERPRET_CAST(float32_t*, mem_addr), vget_high_f32(a_.neon_f32));
#else
simde_memcpy(mem_addr, &(a_.m64[1]), sizeof(a_.m64[1]));
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_storeh_pi(mem_addr, a) simde_mm_storeh_pi(mem_addr, (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_storel_pi (simde__m64* mem_addr, simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_storel_pi(HEDLEY_REINTERPRET_CAST(__m64*, mem_addr), a);
#else
simde__m64_private* dest_ = HEDLEY_REINTERPRET_CAST(simde__m64_private*, mem_addr);
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
dest_->neon_f32 = vget_low_f32(a_.neon_f32);
#else
dest_->f32[0] = a_.f32[0];
dest_->f32[1] = a_.f32[1];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_storel_pi(mem_addr, a) simde_mm_storel_pi(mem_addr, (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_storer_ps (simde_float32 mem_addr[4], simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_storer_ps(mem_addr, a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
vec_st(vec_reve(a_.altivec_f32), 0, mem_addr);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x4_t tmp = vrev64q_f32(a_.neon_f32);
vst1q_f32(mem_addr, vextq_f32(tmp, tmp, 2));
#elif defined(SIMDE_SHUFFLE_VECTOR_)
a_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, a_.f32, 3, 2, 1, 0);
simde_mm_store_ps(mem_addr, simde__m128_from_private(a_));
#else
SIMDE_VECTORIZE_ALIGNED(mem_addr:16)
for (size_t i = 0 ; i < sizeof(a_.f32) / sizeof(a_.f32[0]) ; i++) {
mem_addr[i] = a_.f32[((sizeof(a_.f32) / sizeof(a_.f32[0])) - 1) - i];
}
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_storer_ps(mem_addr, a) simde_mm_storer_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_storeu_ps (simde_float32 mem_addr[4], simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_storeu_ps(mem_addr, a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst1q_f32(mem_addr, a_.neon_f32);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
vec_vsx_st(a_.altivec_f32, 0, mem_addr);
#else
simde_memcpy(mem_addr, &a_, sizeof(a_));
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_storeu_ps(mem_addr, a) simde_mm_storeu_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_sub_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_sub_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsubq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_sub(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_sub(a_.altivec_f32, b_.altivec_f32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.f32 = a_.f32 - b_.f32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = a_.f32[i] - b_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sub_ps(a, b) simde_mm_sub_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_sub_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_sub_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_ss(a, simde_mm_sub_ps(a, b));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_sub_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.f32[0] = a_.f32[0] - b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sub_ss(a, b) simde_mm_sub_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomieq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomieq_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_eq_b = vceqq_f32(a_.neon_f32, b_.neon_f32);
r = !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_eq_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] == b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] == b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomieq_ss(a, b) simde_mm_ucomieq_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomige_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomige_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_ge_b = vcgeq_f32(a_.neon_f32, b_.neon_f32);
r = !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_ge_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] >= b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] >= b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomige_ss(a, b) simde_mm_ucomige_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomigt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomigt_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_gt_b = vcgtq_f32(a_.neon_f32, b_.neon_f32);
r = !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_gt_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] > b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] > b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomigt_ss(a, b) simde_mm_ucomigt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomile_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomile_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_le_b = vcleq_f32(a_.neon_f32, b_.neon_f32);
r = !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_le_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] <= b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] <= b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomile_ss(a, b) simde_mm_ucomile_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomilt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomilt_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_lt_b = vcltq_f32(a_.neon_f32, b_.neon_f32);
r = !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_lt_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] < b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] < b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomilt_ss(a, b) simde_mm_ucomilt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomineq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomineq_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_neq_b = vmvnq_u32(vceqq_f32(a_.neon_f32, b_.neon_f32));
r = !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_neq_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] != b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] != b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomineq_ss(a, b) simde_mm_ucomineq_ss((a), (b))
#endif
#if defined(SIMDE_X86_SSE_NATIVE)
# if defined(__has_builtin)
# if __has_builtin(__builtin_ia32_undef128)
# define SIMDE_HAVE_UNDEFINED128
# endif
# elif !defined(__PGI) && !defined(SIMDE_BUG_GCC_REV_208793) && !defined(_MSC_VER)
# define SIMDE_HAVE_UNDEFINED128
# endif
#endif
#if defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_)
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_unpackhi_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_unpackhi_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f32 = vzip2q_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x2_t a1 = vget_high_f32(a_.neon_f32);
float32x2_t b1 = vget_high_f32(b_.neon_f32);
float32x2x2_t result = vzip_f32(a1, b1);
r_.neon_f32 = vcombine_f32(result.val[0], result.val[1]);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 2, 6, 3, 7);
#else
r_.f32[0] = a_.f32[2];
r_.f32[1] = b_.f32[2];
r_.f32[2] = a_.f32[3];
r_.f32[3] = b_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_unpackhi_ps(a, b) simde_mm_unpackhi_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_unpacklo_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_unpacklo_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f32 = vzip1q_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_mergeh(a_.altivec_f32, b_.altivec_f32);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 0, 4, 1, 5);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x2_t a1 = vget_low_f32(a_.neon_f32);
float32x2_t b1 = vget_low_f32(b_.neon_f32);
float32x2x2_t result = vzip_f32(a1, b1);
r_.neon_f32 = vcombine_f32(result.val[0], result.val[1]);
#else
r_.f32[0] = a_.f32[0];
r_.f32[1] = b_.f32[0];
r_.f32[2] = a_.f32[1];
r_.f32[3] = b_.f32[1];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_unpacklo_ps(a, b) simde_mm_unpacklo_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_stream_pi (simde__m64* mem_addr, simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
_mm_stream_pi(HEDLEY_REINTERPRET_CAST(__m64*, mem_addr), a);
#else
simde__m64_private*
dest = HEDLEY_REINTERPRET_CAST(simde__m64_private*, mem_addr),
a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
dest->i64[0] = vget_lane_s64(a_.neon_i64, 0);
#else
dest->i64[0] = a_.i64[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_stream_pi(mem_addr, a) simde_mm_stream_pi(mem_addr, (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_stream_ps (simde_float32 mem_addr[4], simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_stream_ps(mem_addr, a);
#elif HEDLEY_HAS_BUILTIN(__builtin_nontemporal_store) && defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
simde__m128_private a_ = simde__m128_to_private(a);
__builtin_nontemporal_store(a_.f32, SIMDE_ALIGN_CAST(__typeof__(a_.f32)*, mem_addr));
#else
simde_mm_store_ps(mem_addr, a);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_stream_ps(mem_addr, a) simde_mm_stream_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define SIMDE_MM_TRANSPOSE4_PS(row0, row1, row2, row3) \
do { \
float32x4x2_t SIMDE_MM_TRANSPOSE4_PS_ROW01 = vtrnq_f32(row0, row1); \
float32x4x2_t SIMDE_MM_TRANSPOSE4_PS_ROW23 = vtrnq_f32(row2, row3); \
row0 = vcombine_f32(vget_low_f32(SIMDE_MM_TRANSPOSE4_PS_ROW01.val[0]), \
vget_low_f32(SIMDE_MM_TRANSPOSE4_PS_ROW23.val[0])); \
row1 = vcombine_f32(vget_low_f32(SIMDE_MM_TRANSPOSE4_PS_ROW01.val[1]), \
vget_low_f32(SIMDE_MM_TRANSPOSE4_PS_ROW23.val[1])); \
row2 = vcombine_f32(vget_high_f32(SIMDE_MM_TRANSPOSE4_PS_ROW01.val[0]), \
vget_high_f32(SIMDE_MM_TRANSPOSE4_PS_ROW23.val[0])); \
row3 = vcombine_f32(vget_high_f32(SIMDE_MM_TRANSPOSE4_PS_ROW01.val[1]), \
vget_high_f32(SIMDE_MM_TRANSPOSE4_PS_ROW23.val[1])); \
} while (0)
#else
#define SIMDE_MM_TRANSPOSE4_PS(row0, row1, row2, row3) \
do { \
simde__m128 SIMDE_MM_TRANSPOSE4_PS_tmp3, SIMDE_MM_TRANSPOSE4_PS_tmp2, SIMDE_MM_TRANSPOSE4_PS_tmp1, SIMDE_MM_TRANSPOSE4_PS_tmp0; \
SIMDE_MM_TRANSPOSE4_PS_tmp0 = simde_mm_unpacklo_ps((row0), (row1)); \
SIMDE_MM_TRANSPOSE4_PS_tmp2 = simde_mm_unpacklo_ps((row2), (row3)); \
SIMDE_MM_TRANSPOSE4_PS_tmp1 = simde_mm_unpackhi_ps((row0), (row1)); \
SIMDE_MM_TRANSPOSE4_PS_tmp3 = simde_mm_unpackhi_ps((row2), (row3)); \
row0 = simde_mm_movelh_ps(SIMDE_MM_TRANSPOSE4_PS_tmp0, SIMDE_MM_TRANSPOSE4_PS_tmp2); \
row1 = simde_mm_movehl_ps(SIMDE_MM_TRANSPOSE4_PS_tmp2, SIMDE_MM_TRANSPOSE4_PS_tmp0); \
row2 = simde_mm_movelh_ps(SIMDE_MM_TRANSPOSE4_PS_tmp1, SIMDE_MM_TRANSPOSE4_PS_tmp3); \
row3 = simde_mm_movehl_ps(SIMDE_MM_TRANSPOSE4_PS_tmp3, SIMDE_MM_TRANSPOSE4_PS_tmp1); \
} while (0)
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) SIMDE_MM_TRANSPOSE4_PS(row0, row1, row2, row3)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_SSE_H) */
|
GeometryConverter.h | /* -*-c++-*- IfcQuery www.ifcquery.com
*
MIT License
Copyright (c) 2017 Fabian Gerold
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#pragma once
#include <unordered_set>
#include <ifcpp/model/BasicTypes.h>
#include <ifcpp/model/BuildingModel.h>
#include <ifcpp/model/StatusCallback.h>
#include <ifcpp/IFC4/include/IfcCurtainWall.h>
#include <ifcpp/IFC4/include/IfcPropertySetDefinitionSet.h>
#include <ifcpp/IFC4/include/IfcRelAggregates.h>
#include <ifcpp/IFC4/include/IfcRelContainedInSpatialStructure.h>
#include <ifcpp/IFC4/include/IfcRelDefinesByProperties.h>
#include <ifcpp/IFC4/include/IfcSpace.h>
#include <ifcpp/IFC4/include/IfcWindow.h>
#include "IncludeCarveHeaders.h"
#include "GeometryInputData.h"
#include "RepresentationConverter.h"
#include "CSG_Adapter.h"
class GeometryConverter : public StatusCallback
{
protected:
shared_ptr<BuildingModel> m_ifc_model;
shared_ptr<GeometrySettings> m_geom_settings;
shared_ptr<RepresentationConverter> m_representation_converter;
std::map<int, shared_ptr<ProductShapeData> > m_product_shape_data;
std::map<int, shared_ptr<BuildingObject> > m_map_outside_spatial_structure;
double m_recent_progress = 0;
double m_csg_eps = 1.5e-05;
std::map<int, std::vector<shared_ptr<StatusCallback::Message> > > m_messages;
#ifdef ENABLE_OPENMP
Mutex m_writelock_messages;
#endif
public:
// getters and setters
shared_ptr<BuildingModel>& getBuildingModel() { return m_ifc_model; }
shared_ptr<RepresentationConverter>& getRepresentationConverter() { return m_representation_converter; }
shared_ptr<GeometrySettings>& getGeomSettings() { return m_geom_settings; }
std::map<int, shared_ptr<ProductShapeData> >& getShapeInputData() { return m_product_shape_data; }
std::map<int, shared_ptr<BuildingObject> >& getObjectsOutsideSpatialStructure() { return m_map_outside_spatial_structure; }
GeometryConverter( shared_ptr<BuildingModel>& ifc_model )
{
m_ifc_model = ifc_model;
m_geom_settings = shared_ptr<GeometrySettings>( new GeometrySettings() );
resetNumVerticesPerCircle();
shared_ptr<UnitConverter>& unit_converter = m_ifc_model->getUnitConverter();
m_representation_converter = shared_ptr<RepresentationConverter>( new RepresentationConverter( m_geom_settings, unit_converter ) );
// redirect all messages to this->messageTarget
m_ifc_model->setMessageTarget( this );
m_representation_converter->setMessageTarget( this );
}
virtual ~GeometryConverter() {}
void resetModel()
{
progressTextCallback( L"Unloading model, cleaning up memory..." );
clearInputCache();
m_recent_progress = 0.0;
m_ifc_model->clearCache();
m_ifc_model->clearIfcModel();
progressTextCallback( L"Unloading model done" );
progressValueCallback( 0.0, "parse" );
#ifdef _DEBUG
GeomDebugDump::clearMeshsetDump();
#endif
}
void clearInputCache()
{
m_product_shape_data.clear();
m_map_outside_spatial_structure.clear();
m_representation_converter->clearCache();
m_messages.clear();
}
void resetNumVerticesPerCircle()
{
m_geom_settings->resetNumVerticesPerCircle();
}
void setCsgEps(double eps)
{
m_csg_eps = eps;
}
void setModel( shared_ptr<BuildingModel> model )
{
if( m_ifc_model )
{
m_ifc_model->unsetMessageCallBack();
}
clearInputCache();
m_ifc_model = model;
m_representation_converter->clearCache();
m_representation_converter->setUnitConverter( m_ifc_model->getUnitConverter() );
m_ifc_model->setMessageTarget( this );
}
void resolveProjectStructure( shared_ptr<ProductShapeData>& product_data )
{
if( !product_data )
{
return;
}
if( product_data->m_ifc_object_definition.expired() )
{
return;
}
shared_ptr<IfcObjectDefinition> ifc_object_def( product_data->m_ifc_object_definition );
const int entity_id = ifc_object_def->m_entity_id;
product_data->m_added_to_spatial_structure = true;
const std::vector<weak_ptr<IfcRelAggregates> >& vec_IsDecomposedBy = ifc_object_def->m_IsDecomposedBy_inverse;
for( size_t ii = 0; ii < vec_IsDecomposedBy.size(); ++ii )
{
const weak_ptr<IfcRelAggregates>& rel_aggregates_weak_ptr = vec_IsDecomposedBy[ii];
if( rel_aggregates_weak_ptr.expired() )
{
continue;
}
shared_ptr<IfcRelAggregates> rel_aggregates( rel_aggregates_weak_ptr );
if( rel_aggregates )
{
const std::vector<shared_ptr<IfcObjectDefinition> >& vec_related_objects = rel_aggregates->m_RelatedObjects;
for( size_t jj = 0; jj < vec_related_objects.size(); ++jj )
{
const shared_ptr<IfcObjectDefinition>& related_obj_def = vec_related_objects[jj];
if( related_obj_def )
{
auto it_product_map = m_product_shape_data.find( related_obj_def->m_entity_id );
if( it_product_map != m_product_shape_data.end() )
{
shared_ptr<ProductShapeData>& related_product_shape = it_product_map->second;
if( related_product_shape )
{
product_data->addChildProduct( related_product_shape, product_data );
resolveProjectStructure( related_product_shape );
}
}
}
}
}
}
shared_ptr<IfcSpatialStructureElement> spatial_ele = dynamic_pointer_cast<IfcSpatialStructureElement>(ifc_object_def);
if( spatial_ele )
{
const std::vector<weak_ptr<IfcRelContainedInSpatialStructure> >& vec_contains = spatial_ele->m_ContainsElements_inverse;
for( size_t ii = 0; ii < vec_contains.size(); ++ii )
{
const weak_ptr<IfcRelContainedInSpatialStructure>& rel_contained_weak_ptr = vec_contains[ii];
if( rel_contained_weak_ptr.expired() )
{
continue;
}
shared_ptr<IfcRelContainedInSpatialStructure> rel_contained( rel_contained_weak_ptr );
if( rel_contained )
{
const std::vector<shared_ptr<IfcProduct> >& vec_related_elements = rel_contained->m_RelatedElements;
for( size_t jj = 0; jj < vec_related_elements.size(); ++jj )
{
const shared_ptr<IfcProduct>& related_product = vec_related_elements[jj];
if( related_product )
{
auto it_product_map = m_product_shape_data.find( related_product->m_entity_id );
if( it_product_map != m_product_shape_data.end() )
{
shared_ptr<ProductShapeData>& related_product_shape = it_product_map->second;
if( related_product_shape )
{
product_data->addChildProduct( related_product_shape, product_data );
resolveProjectStructure( related_product_shape );
}
}
}
}
}
}
}
// TODO: handle IfcRelAssignsToProduct
}
void readAppearanceFromPropertySet( const shared_ptr<IfcPropertySet>& prop_set, shared_ptr<ProductShapeData>& product_shape )
{
if( !prop_set )
{
return;
}
for( auto& ifc_property : prop_set->m_HasProperties )
{
if( !ifc_property )
{
continue;
}
shared_ptr<IfcSimpleProperty> simple_property = dynamic_pointer_cast<IfcSimpleProperty>(ifc_property);
if( simple_property )
{
// ENTITY IfcSimpleProperty ABSTRACT SUPERTYPE OF(ONEOF( IfcPropertyBoundedValue, IfcPropertyEnumeratedValue, IfcPropertyListValue,
// IfcPropertyReferenceValue, IfcPropertySingleValue, IfcPropertyTableValue))
shared_ptr<IfcIdentifier> property_name = simple_property->m_Name;
std::wstring name_str = property_name->m_value;
if( name_str.compare( L"LayerName" ) == 0 )
{
// TODO: implement layers
}
shared_ptr<IfcText> description = simple_property->m_Description;
shared_ptr<IfcPropertySingleValue> property_single_value = dynamic_pointer_cast<IfcPropertySingleValue>(simple_property);
if( property_single_value )
{
//shared_ptr<IfcValue>& nominal_value = property_single_value->m_NominalValue; //optional
//shared_ptr<IfcUnit>& unit = property_single_value->m_Unit; //optional
}
continue;
}
shared_ptr<IfcComplexProperty> complex_property = dynamic_pointer_cast<IfcComplexProperty>(ifc_property);
if( complex_property )
{
if( !complex_property->m_UsageName ) continue;
if( complex_property->m_UsageName->m_value.compare( L"Color" ) == 0 )
{
vec4 vec_color;
m_representation_converter->getStylesConverter()->convertIfcComplexPropertyColor( complex_property, vec_color );
shared_ptr<AppearanceData> appearance_data( new AppearanceData( -1 ) );
if( !appearance_data )
{
throw OutOfMemoryException( __FUNC__ );
}
appearance_data->m_apply_to_geometry_type = AppearanceData::GEOM_TYPE_ANY;
appearance_data->m_color_ambient.setColor( vec_color );
appearance_data->m_color_diffuse.setColor( vec_color );
appearance_data->m_color_specular.setColor( vec_color );
appearance_data->m_shininess = 35.f;
product_shape->addAppearance( appearance_data );
}
}
}
}
/*\brief method convertGeometry: Creates geometry for Carve from previously loaded BuildingModel model.
**/
void convertGeometry()
{
progressTextCallback( L"Creating geometry..." );
progressValueCallback( 0, "geometry" );
m_product_shape_data.clear();
m_map_outside_spatial_structure.clear();
m_representation_converter->clearCache();
if( !m_ifc_model )
{
return;
}
shared_ptr<ProductShapeData> ifc_project_data;
std::vector<shared_ptr<IfcObjectDefinition> > vec_object_defs;
double length_to_meter_factor = 1.0;
if( m_ifc_model->getUnitConverter() )
{
length_to_meter_factor = m_ifc_model->getUnitConverter()->getLengthInMeterFactor();
}
carve::setEpsilon( m_csg_eps );
const std::map<int, shared_ptr<BuildingEntity> >& map_entities = m_ifc_model->getMapIfcEntities();
for( auto it = map_entities.begin(); it != map_entities.end(); ++it )
{
shared_ptr<BuildingEntity> obj = it->second;
shared_ptr<IfcObjectDefinition> object_def = dynamic_pointer_cast<IfcObjectDefinition>(obj);
if( object_def )
{
vec_object_defs.push_back( object_def );
}
}
// create geometry for for each IfcProduct independently, spatial structure will be resolved later
std::map<int, shared_ptr<ProductShapeData> >* map_products_ptr = &m_product_shape_data;
const int num_products = (int)vec_object_defs.size();
#ifdef ENABLE_OPENMP
Mutex writelock_map;
Mutex writelock_ifc_project;
#pragma omp parallel firstprivate(num_products) shared(map_products_ptr)
{
// time for one product may vary significantly, so schedule not so many
#pragma omp for schedule(dynamic,40)
#endif
for( int i = 0; i < num_products; ++i )
{
shared_ptr<IfcObjectDefinition> ifc_object_def = vec_object_defs[i];
const int entity_id = ifc_object_def->m_entity_id;
shared_ptr<ProductShapeData> product_geom_input_data( new ProductShapeData( entity_id ) );
product_geom_input_data->m_ifc_object_definition = ifc_object_def;
std::stringstream thread_err;
if( !m_geom_settings->getRenderObjectFilter()(ifc_object_def) )
{
// geometry will be created in method subtractOpenings
continue;
}
else if( dynamic_pointer_cast<IfcProject>(ifc_object_def) )
{
#ifdef ENABLE_OPENMP
ScopedLock scoped_lock( writelock_ifc_project );
#endif
ifc_project_data = product_geom_input_data;
}
try
{
convertIfcProductShape( product_geom_input_data );
}
catch( OutOfMemoryException& e )
{
throw e;
}
catch( BuildingException& e )
{
thread_err << e.what();
}
catch( carve::exception& e )
{
thread_err << e.str();
}
catch( std::exception& e )
{
thread_err << e.what();
}
catch( ... )
{
thread_err << "undefined error, product id " << entity_id;
}
{
#ifdef ENABLE_OPENMP
ScopedLock scoped_lock( writelock_map );
#endif
map_products_ptr->insert( std::make_pair( entity_id, product_geom_input_data ) );
if( thread_err.tellp() > 0 )
{
messageCallback( thread_err.str().c_str(), StatusCallback::MESSAGE_TYPE_ERROR, __FUNC__ );
}
}
// progress callback
double progress = (double)i / (double)num_products;
if( progress - m_recent_progress > 0.02 )
{
#ifdef ENABLE_OPENMP
if( omp_get_thread_num() == 0 )
#endif
{
// leave 10% of progress to openscenegraph internals
progressValueCallback( progress*0.9, "geometry" );
m_recent_progress = progress;
}
}
}
#ifdef ENABLE_OPENMP
} // implicit barrier
#endif
// subtract openings in related objects, such as IFCBUILDINGELEMENTPART connected to a window through IFCRELAGGREGATES
for( auto it = map_products_ptr->begin(); it != map_products_ptr->end(); ++it )
{
shared_ptr<ProductShapeData> product_geom_input_data = it->second;
try
{
subtractOpeningsInRelatedObjects(product_geom_input_data);
}
catch( OutOfMemoryException& e )
{
throw e;
}
catch( BuildingException& e )
{
messageCallback(e.what(), StatusCallback::MESSAGE_TYPE_ERROR, "");
}
catch( carve::exception& e )
{
messageCallback(e.str(), StatusCallback::MESSAGE_TYPE_ERROR, "");
}
catch( std::exception& e )
{
messageCallback(e.what(), StatusCallback::MESSAGE_TYPE_ERROR, "");
}
catch( ... )
{
messageCallback("undefined error", StatusCallback::MESSAGE_TYPE_ERROR, __FUNC__);
}
}
try
{
// now resolve spatial structure
if( ifc_project_data )
{
resolveProjectStructure( ifc_project_data );
}
// check if there are entities that are not in spatial structure
for( auto it_product_shapes = m_product_shape_data.begin(); it_product_shapes != m_product_shape_data.end(); ++it_product_shapes )
{
shared_ptr<ProductShapeData> product_shape = it_product_shapes->second;
if( !product_shape )
{
continue;
}
if( !product_shape->m_added_to_spatial_structure )
{
if( !product_shape->m_ifc_object_definition.expired() )
{
shared_ptr<IfcObjectDefinition> ifc_product( product_shape->m_ifc_object_definition );
shared_ptr<IfcFeatureElementSubtraction> opening = dynamic_pointer_cast<IfcFeatureElementSubtraction>(ifc_product);
if( !m_geom_settings->getRenderObjectFilter()(ifc_product) )
{
continue;
}
m_map_outside_spatial_structure[ifc_product->m_entity_id] = ifc_product;
}
}
}
}
catch( OutOfMemoryException& e )
{
throw e;
}
catch( BuildingException& e )
{
messageCallback( e.what(), StatusCallback::MESSAGE_TYPE_ERROR, "" );
}
catch( std::exception& e )
{
messageCallback( e.what(), StatusCallback::MESSAGE_TYPE_ERROR, "" );
}
catch( ... )
{
messageCallback( "undefined error", StatusCallback::MESSAGE_TYPE_ERROR, __FUNC__ );
}
m_representation_converter->getProfileCache()->clearProfileCache();
progressTextCallback( L"Loading file done" );
progressValueCallback( 1.0, "geometry" );
}
//\brief method convertIfcProduct: Creates geometry objects (meshset with connected vertex-edge-face graph) from an IfcProduct object
// caution: when using OpenMP, this method runs in parallel threads, so every write access to member variables needs a write lock
void convertIfcProductShape( shared_ptr<ProductShapeData>& product_shape )
{
if( product_shape->m_ifc_object_definition.expired() )
{
return;
}
shared_ptr<IfcObjectDefinition> ifc_object_def( product_shape->m_ifc_object_definition );
shared_ptr<IfcProduct> ifc_product = dynamic_pointer_cast<IfcProduct>(ifc_object_def);
if( !ifc_product )
{
return;
}
if( !ifc_product->m_Representation )
{
return;
}
double length_factor = 1.0;
if( m_ifc_model )
{
if( m_ifc_model->getUnitConverter() )
{
length_factor = m_ifc_model->getUnitConverter()->getLengthInMeterFactor();
}
}
// evaluate IFC geometry
shared_ptr<IfcProductRepresentation>& product_representation = ifc_product->m_Representation;
std::vector<shared_ptr<IfcRepresentation> >& vec_representations = product_representation->m_Representations;
for( size_t i_representations = 0; i_representations < vec_representations.size(); ++i_representations )
{
const shared_ptr<IfcRepresentation>& representation = vec_representations[i_representations];
if( !representation )
{
continue;
}
try
{
shared_ptr<RepresentationData> representation_data( new RepresentationData() );
m_representation_converter->convertIfcRepresentation( representation, representation_data );
product_shape->m_vec_representations.push_back( representation_data );
representation_data->m_parent_product = product_shape;
}
catch( OutOfMemoryException& e )
{
throw e;
}
catch( BuildingException& e )
{
messageCallback( e.what(), StatusCallback::MESSAGE_TYPE_ERROR, "" );
}
catch( std::exception& e )
{
messageCallback( e.what(), StatusCallback::MESSAGE_TYPE_ERROR, "" );
}
}
// IfcProduct has an ObjectPlacement that can be local or global
product_shape->m_object_placement = ifc_product->m_ObjectPlacement;
if( ifc_product->m_ObjectPlacement )
{
// IfcPlacement2Matrix follows related placements in case of local coordinate systems
std::unordered_set<IfcObjectPlacement*> placement_already_applied;
m_representation_converter->getPlacementConverter()->convertIfcObjectPlacement( ifc_product->m_ObjectPlacement, product_shape, placement_already_applied, false );
}
// handle openings
std::vector<shared_ptr<ProductShapeData> > vec_opening_data;
const shared_ptr<IfcElement> ifc_element = dynamic_pointer_cast<IfcElement>(ifc_product);
if( ifc_element )
{
m_representation_converter->subtractOpenings(ifc_element, product_shape);
}
// Fetch the IFCProduct relationships
if( ifc_product->m_IsDefinedBy_inverse.size() > 0 )
{
std::vector<weak_ptr<IfcRelDefinesByProperties> >& vec_IsDefinedBy_inverse = ifc_product->m_IsDefinedBy_inverse;
for( size_t i = 0; i < vec_IsDefinedBy_inverse.size(); ++i )
{
shared_ptr<IfcRelDefinesByProperties> rel_def( vec_IsDefinedBy_inverse[i] );
shared_ptr<IfcPropertySetDefinitionSelect> relating_property_definition_select = rel_def->m_RelatingPropertyDefinition;
if( relating_property_definition_select )
{
// TYPE IfcPropertySetDefinitionSelect = SELECT (IfcPropertySetDefinition ,IfcPropertySetDefinitionSet);
shared_ptr<IfcPropertySetDefinition> property_set_def = dynamic_pointer_cast<IfcPropertySetDefinition>(relating_property_definition_select);
if( property_set_def )
{
shared_ptr<IfcPropertySet> property_set = dynamic_pointer_cast<IfcPropertySet>(property_set_def);
if( property_set )
{
readAppearanceFromPropertySet( property_set, product_shape );
}
continue;
}
shared_ptr<IfcPropertySetDefinitionSet> property_set_def_set = dynamic_pointer_cast<IfcPropertySetDefinitionSet>(relating_property_definition_select);
if( property_set_def_set )
{
std::vector<shared_ptr<IfcPropertySetDefinition> >& vec_propterty_set_def = property_set_def_set->m_vec;
std::vector<shared_ptr<IfcPropertySetDefinition> >::iterator it_property_set_def;
for( it_property_set_def = vec_propterty_set_def.begin(); it_property_set_def != vec_propterty_set_def.end(); ++it_property_set_def )
{
shared_ptr<IfcPropertySetDefinition> property_set_def2 = (*it_property_set_def);
if( property_set_def2 )
{
shared_ptr<IfcPropertySet> property_set = dynamic_pointer_cast<IfcPropertySet>(property_set_def2);
if( property_set )
{
readAppearanceFromPropertySet( property_set, product_shape );
}
}
}
continue;
}
}
}
}
}
void subtractOpeningsInRelatedObjects(shared_ptr<ProductShapeData>& product_shape)
{
if( product_shape->m_ifc_object_definition.expired() )
{
return;
}
shared_ptr<IfcObjectDefinition> ifc_object_def(product_shape->m_ifc_object_definition);
shared_ptr<IfcElement> ifc_element = dynamic_pointer_cast<IfcElement>(ifc_object_def);
if( !ifc_element )
{
return;
}
if( ifc_element->m_HasOpenings_inverse.size() == 0 )
{
return;
}
// collect aggregated objects
const std::vector<weak_ptr<IfcRelAggregates> >& vec_decomposed_by = ifc_element->m_IsDecomposedBy_inverse;
for( auto& decomposed_by : vec_decomposed_by )
{
if( decomposed_by.expired() )
{
continue;
}
shared_ptr<IfcRelAggregates> decomposed_by_aggregates(decomposed_by);
std::vector<shared_ptr<IfcObjectDefinition> >& vec_related_objects = decomposed_by_aggregates->m_RelatedObjects;
for( auto& related_object : vec_related_objects )
{
if( !related_object )
{
continue;
}
if( related_object->m_entity_id >= 0 )
{
auto it_find_related_shape = m_product_shape_data.find(related_object->m_entity_id);
if( it_find_related_shape != m_product_shape_data.end() )
{
shared_ptr<ProductShapeData>& related_product_shape = it_find_related_shape->second;
m_representation_converter->subtractOpenings(ifc_element, related_product_shape);
}
}
}
}
}
virtual void messageTarget( void* ptr, shared_ptr<StatusCallback::Message> m )
{
GeometryConverter* myself = (GeometryConverter*)ptr;
if( myself )
{
if( m->m_entity )
{
#ifdef ENABLE_OPENMP
ScopedLock lock( myself->m_writelock_messages );
#endif
// make sure that the same message for one entity does not appear several times
const int entity_id = m->m_entity->m_entity_id;
auto it = myself->m_messages.find( entity_id );
if( it != myself->m_messages.end() )
{
std::vector<shared_ptr<StatusCallback::Message> >& vec_message_for_entity = it->second;
for( size_t i = 0; i < vec_message_for_entity.size(); ++i )
{
shared_ptr<StatusCallback::Message>& existing_message = vec_message_for_entity[i];
if( existing_message->m_message_text.compare( m->m_message_text ) == 0 )
{
// same message for same entity is already there, so ignore message
return;
}
}
vec_message_for_entity.push_back( m );
}
else
{
std::vector<shared_ptr<StatusCallback::Message> >& vec = myself->m_messages.insert( std::make_pair( entity_id, std::vector<shared_ptr<StatusCallback::Message> >() ) ).first->second;
vec.push_back( m );
}
}
myself->messageCallback( m );
}
}
};
|
simd-clones-2.c | /* { dg-options "-fopenmp -fdump-tree-optimized -O" } */
#pragma omp declare simd inbranch uniform(c) linear(b:66)
#pragma omp declare simd notinbranch aligned(c:32)
int addit(int a, int b, int *c)
{
return a + b;
}
#pragma omp declare simd uniform(a) aligned(a:32) linear(k:1) notinbranch
float setArray(float *a, float x, int k)
{
a[k] = a[k] + x;
return a[k];
}
/* { dg-final { scan-tree-dump "_ZGVbN4ua32vl_setArray" "optimized" { target i?86-*-* x86_64-*-* } } } */
/* { dg-final { scan-tree-dump "_ZGVbN4vvva32_addit" "optimized" { target i?86-*-* x86_64-*-* } } } */
/* { dg-final { scan-tree-dump "_ZGVbM4vl66u_addit" "optimized" { target i?86-*-* x86_64-*-* } } } */
/* { dg-final { scan-tree-dump "_ZGVcN8ua32vl_setArray" "optimized" { target i?86-*-* x86_64-*-* } } } */
/* { dg-final { scan-tree-dump "_ZGVcN4vvva32_addit" "optimized" { target i?86-*-* x86_64-*-* } } } */
/* { dg-final { scan-tree-dump "_ZGVcM4vl66u_addit" "optimized" { target i?86-*-* x86_64-*-* } } } */
/* { dg-final { scan-tree-dump "_ZGVdN8ua32vl_setArray" "optimized" { target i?86-*-* x86_64-*-* } } } */
/* { dg-final { scan-tree-dump "_ZGVdN8vvva32_addit" "optimized" { target i?86-*-* x86_64-*-* } } } */
/* { dg-final { scan-tree-dump "_ZGVdM8vl66u_addit" "optimized" { target i?86-*-* x86_64-*-* } } } */
/* { dg-final { scan-tree-dump "_ZGVeN16ua32vl_setArray" "optimized" { target i?86-*-* x86_64-*-* } } } */
/* { dg-final { scan-tree-dump "_ZGVeN16vvva32_addit" "optimized" { target i?86-*-* x86_64-*-* } } } */
/* { dg-final { scan-tree-dump "_ZGVeM16vl66u_addit" "optimized" { target i?86-*-* x86_64-*-* } } } */
|
EDT.h | #ifndef EDT_INCLUDED
#define EDT_INCLUDED
#include <omp.h>
#include "SignalProcessing/CubeGrid.h"
template< class Real >
void SquaredEDT( const CubeGrid< char >& rasterization , CubeGrid< Real >& edt , int threads=1 );
void SquaredEDT( const CubeGrid< char >& rasterization , CubeGrid< int >& edt , int threads=1 );
template< class Real >
void GaussianEDT( const CubeGrid< char >& rasterization , CubeGrid< Real >& gedt , Real fallOff=Real(sqrt(8.)) , int threads=1 );
///////////////////////////////
// Rasterization definitions //
///////////////////////////////
template< class Real >
void SquaredEDT( const CubeGrid< char >& rasterization , CubeGrid< Real >& edt , int threads )
{
int res = rasterization.resolution();
CubeGrid< int > _edt;
SquaredEDT( rasterization , _edt , threads );
edt.resize( res );
const int* _edtPtr = _edt[0];
Real *edtPtr = edt[0];
#pragma omp parallel for num_threads( threads )
for( int i=0 ; i<res*res*res ; i++ ) edtPtr[i] = Real( _edtPtr[i] );
}
template< class Real >
void GaussianEDT( const CubeGrid< char >& rasterization , CubeGrid< Real >& gedt , Real fallOff , int threads )
{
int res = rasterization.resolution();
SquaredEDT( rasterization , gedt , threads );
Real* _gedt = gedt[0];
fallOff = Real(2.)*fallOff*fallOff;
#pragma omp parallel for num_threads( threads )
for( int i=0 ; i<res*res*res; i++ ) _gedt[i] = Real( exp( - _gedt[i] / fallOff) );
}
void SquaredEDT( const CubeGrid< char >& rasterization , CubeGrid< int >& edt , int threads )
{
threads = std::max< int >( threads , 1 );
int res = rasterization.resolution();
if( !res )
{
fprintf( stderr , "[WARNING] Cannot compute distance transform of zero resolution rasterization\n" );
return;
}
edt.resize( rasterization.resolution() );
std::vector< int* > oldBuffer( threads ) , newBuffer( threads );
for( int i=0 ; i<threads ; i++ ) oldBuffer[i] = new int[res] , newBuffer[i] = new int[res];
// Set the upper bound on the distance values
{
int* edtPtr = edt[0];
#pragma omp parallel for num_threads( threads )
for( int i=0 ; i<res*res*res ; i++ ) edtPtr[i] = 3 * (res+1) * (res+1);
}
// scan along z axis
#pragma omp parallel for num_threads( threads )
for( int xy=0 ; xy<res*res ; xy++ )
{
int x = xy/res , y = xy%res;
bool first=true;
int dist = 0;
int* edtPtr = edt[x] + y*res;
const char* rasterizationPtr = rasterization[x] + y*res;
for( int z=0 ; z<res ; z++ )
{
if( rasterizationPtr[z] )
{
dist = 0;
first = false;
edtPtr[z] = 0;
}
else if( !first )
{
dist++;
edtPtr[z] = dist*dist;
}
}
// backward scan
dist = 0;
first = true;
for( int z=(res-1) ; z>=0 ; z-- )
{
if( rasterizationPtr[z] )
{
dist = 0;
first = false;
edtPtr[z] = 0;
}
else if( !first )
{
dist++;
int square = dist*dist;
if( square<edtPtr[z] ) edtPtr[z] = square;
}
}
}
// scan along y axis
#pragma omp parallel for num_threads( threads )
for( int thread=0 ; thread<threads ; thread++ )
{
int *_oldBuffer=oldBuffer[thread] , *_newBuffer=newBuffer[thread];
for( int xz=(res*res*thread)/threads ; xz<(res*res*(thread+1))/threads ; xz++ )
{
int x = xz/res , z = xz%res;
// forward scan
int s=0;
int* edtPtr = edt[x] + z;
for( int y=0 ; y<res; y++ )
{
_oldBuffer[y] = edtPtr[y*res];
int dist = _oldBuffer[y];
bool foundCloser=false;
if( dist )
{
for( int t=s ; t<=y ; t++ )
{
int new_dist = _oldBuffer[t] + (y - t) * (y - t);
if( new_dist<=dist ) dist = new_dist , s=t , foundCloser=true;
}
}
if( !foundCloser ) s=y;
_newBuffer[y] = dist;
}
// backward scan
s=res-1;
for( int y=res-1 ; y>=0 ; y-- )
{
int dist = _newBuffer[y];
bool foundCloser = false;
if( dist )
{
for( int t=s ; t>y ; t-- )
{
int new_dist = _oldBuffer[t] + (y - t) * (y - t);
if( new_dist<=dist ) dist = new_dist , s=t , foundCloser=true;
}
edtPtr[y*res] = dist;
}
if( !foundCloser ) s=y;
}
}
}
// scan along x axis
#pragma omp parallel for num_threads( threads )
for( int thread=0 ; thread<threads ; thread++ )
{
int *_oldBuffer = oldBuffer[thread] , *_newBuffer = newBuffer[thread];
for( int yz=(res*res*thread)/threads ; yz<(res*res*(thread+1))/threads ; yz++ )
{
int y = yz/res , z=yz%res;
// forward scan
int s=0;
int* edtPtr = edt[0] + y*res+z;
for( int x=0 ; x<res ; x++ )
{
int dist = _oldBuffer[x] = edtPtr[x*res*res];
// If the calculated distance to this point is not zero,
// start from s and see if you can find something closer.
bool foundCloser = false;
if( dist )
{
for( int t=s ; t<=x ; t++ )
{
// Compute the squared distance that would be obtained if we used the (squared) orthogonal distance to t
// plus the (squared) parallel distance from t to x
int new_dist = _oldBuffer[t] + (x - t) * (x - t); // <=> new_dist = _oldBuffer[t] + x*x - 2*t*x + t*t
// If s has not been updated then: _oldBuffer[t] + (x - t) * (x - t) > _oldBuffer[x] for all t <= x
// Taking y = x+d (w/ d>0) we get:
// _oldBuffer[t] + ( y - t ) * ( y - t ) = _oldBuffer[t] + ( x - t + d ) * ( x - t + d )
// = _oldBuffer[t] + ( x - t ) * ( x - t ) + 2 * d * ( x - t ) + d * d
// > _oldBuffer[x] + ( y - x ) * ( y - x ) + 2 * d * ( x - t )
// > _oldBuffer[x] + ( y - x ) * ( y - x )
// for all t <= x
// That is, the squared distance through t <= x has to be at least as large as the squared distance through x
if( new_dist<=dist ) dist = new_dist , s=t , foundCloser=true;
}
}
if( !foundCloser ) s=x;
_newBuffer[x] = dist;
}
// backwards scan
s = res-1;
for( int x=res-1 ; x>=0 ; x-- )
{
int dist = _newBuffer[x];
bool foundCloser = false;
if( dist )
{
for( int t=s; t>=x ; t-- )
{
int new_dist = _oldBuffer[t] + (x - t) * (x - t);
if( new_dist<=dist ) dist = new_dist , s=t , foundCloser=true;
}
edtPtr[x*res*res] = dist;
}
if( !foundCloser ) s=x;
}
}
}
for( int i=0 ; i<threads ; i++ )
{
delete[] oldBuffer[i];
delete[] newBuffer[i];
}
}
#endif // EDT_INCLUDED |
ark_analytic_nonlin_ompdev.c | /*-----------------------------------------------------------------
* Programmer(s): Shelby Lockhart @ LLNL
*---------------------------------------------------------------
* This code is based on the serial code found in
* ark_analytic_nonlin.c developed by Daniel R. Reynolds
*---------------------------------------------------------------
* SUNDIALS Copyright Start
* Copyright (c) 2002-2019, Lawrence Livermore National Security
* and Southern Methodist University.
* All rights reserved.
*
* See the top-level LICENSE and NOTICE files for details.
*
* SPDX-License-Identifier: BSD-3-Clause
* SUNDIALS Copyright End
*---------------------------------------------------------------
* Example problem:
*
* The following is a simple example problem with analytical
* solution,
* dy/dt = (t+1)*exp(-y)
* for t in the interval [0.0, 10.0], with initial condition: y=0.
* This has analytical solution
* y(t) = log(0.5*t^2 + t + 1)
*
* This program solves the problem with the ERK method.
* Output is printed every 1.0 units of time (10 total).
* Run statistics (optional outputs) are printed at the end.
*-----------------------------------------------------------------*/
/* Header files */
#include <stdio.h>
#include <math.h>
#include <arkode/arkode_erkstep.h> /* prototypes for ERKStep fcts., consts */
#include <nvector/nvector_openmpdev.h> /* OpenMPDEV N_Vector types, fcts., macros */
#include <sundials/sundials_types.h> /* def. of type 'realtype' */
#include <sundials/sundials_math.h> /* def. of SUNRsqrt, etc. */
#ifdef _OPENMP
#include <omp.h> /* OpenMP functions */
#endif
#if defined(SUNDIALS_EXTENDED_PRECISION)
#define GSYM "Lg"
#define ESYM "Le"
#define FSYM "Lf"
#else
#define GSYM "g"
#define ESYM "e"
#define FSYM "f"
#endif
/* User-supplied Functions Called by the Solver */
static int f(realtype t, N_Vector y, N_Vector ydot, void *user_data);
/* Private function to check function return values */
static int check_flag(void *flagvalue, const char *funcname, int opt);
/* Main Program */
int main()
{
/* general problem parameters */
realtype T0 = RCONST(0.0); /* initial time */
realtype Tf = RCONST(10.0); /* final time */
realtype dTout = RCONST(1.0); /* time between outputs */
sunindextype NEQ = 1; /* number of dependent vars. */
realtype reltol = 1.0e-6; /* tolerances */
realtype abstol = 1.0e-10;
/* general problem variables */
int flag; /* reusable error-checking flag */
N_Vector y = NULL; /* empty vector for storing solution */
void *arkode_mem = NULL; /* empty ARKode memory structure */
FILE *UFID;
realtype t, tout;
long int nst, nst_a, nfe, netf;
realtype *y_data = NULL;
/* Initial problem output */
printf("\nAnalytical ODE test problem:\n");
printf(" reltol = %.1"ESYM"\n", reltol);
printf(" abstol = %.1"ESYM"\n\n",abstol);
/* Initialize data structures */
y = N_VNew_OpenMPDEV(NEQ); /* Create OpenMPDEV vector for solution */
if (check_flag((void *)y, "N_VNew_OpenMPDEV", 0)) return 1;
y_data = N_VGetHostArrayPointer_OpenMPDEV(y);
y_data[0] = 0.0; /* Specify initial condition */
N_VCopyToDevice_OpenMPDEV(y); /* Copy to device */
arkode_mem = ERKStepCreate(f, T0, y); /* Create the solver memory */
if (check_flag((void *)arkode_mem, "ERKStepCreate", 0)) return 1;
/* Specify tolerances */
flag = ERKStepSStolerances(arkode_mem, reltol, abstol);
if (check_flag(&flag, "ERKStepSStolerances", 1)) return 1;
/* Open output stream for results, output comment line */
UFID = fopen("solution.txt","w");
fprintf(UFID,"# t u\n");
/* output initial condition to disk */
N_VCopyFromDevice_OpenMPDEV(y);
fprintf(UFID," %.16"ESYM" %.16"ESYM"\n", T0, y_data[0]);
/* Main time-stepping loop: calls ERKStep to perform the integration, then
prints results. Stops when the final time has been reached */
t = T0;
tout = T0+dTout;
printf(" t u\n");
printf(" ---------------------\n");
while (Tf - t > 1.0e-15) {
flag = ERKStepEvolve(arkode_mem, tout, y, &t, ARK_NORMAL); /* call integrator */
if (check_flag(&flag, "ERKStep", 1)) break;
N_VCopyFromDevice_OpenMPDEV(y);
printf(" %10.6"FSYM" %10.6"FSYM"\n", t, y_data[0]); /* access/print solution */
fprintf(UFID," %.16"ESYM" %.16"ESYM"\n", t, y_data[0]);
if (flag >= 0) { /* successful solve: update time */
tout += dTout;
tout = (tout > Tf) ? Tf : tout;
} else { /* unsuccessful solve: break */
fprintf(stderr,"Solver failure, stopping integration\n");
break;
}
}
printf(" ---------------------\n");
fclose(UFID);
/* Print some final statistics */
flag = ERKStepGetNumSteps(arkode_mem, &nst);
check_flag(&flag, "ERKStepGetNumSteps", 1);
flag = ERKStepGetNumStepAttempts(arkode_mem, &nst_a);
check_flag(&flag, "ERKStepGetNumStepAttempts", 1);
flag = ERKStepGetNumRhsEvals(arkode_mem, &nfe);
check_flag(&flag, "ERKStepGetNumRhsEvals", 1);
flag = ERKStepGetNumErrTestFails(arkode_mem, &netf);
check_flag(&flag, "ERKStepGetNumErrTestFails", 1);
printf("\nFinal Solver Statistics:\n");
printf(" Internal solver steps = %li (attempted = %li)\n", nst, nst_a);
printf(" Total RHS evals = %li\n", nfe);
printf(" Total number of error test failures = %li\n\n", netf);
/* Clean up and return with successful completion */
N_VDestroy(y); /* Free y vector */
ERKStepFree(&arkode_mem); /* Free integrator memory */
return 0;
}
/*-------------------------------
* Functions called by the solver
*-------------------------------*/
/* f routine to compute the ODE RHS function f(t,y). */
static int f(realtype t, N_Vector y, N_Vector ydot, void *user_data)
{
int dev;
realtype *y_data = N_VGetDeviceArrayPointer_OpenMPDEV(y);
realtype *ydot_data = N_VGetDeviceArrayPointer_OpenMPDEV(ydot);
dev = omp_get_default_device();
#pragma omp target map(to:t) is_device_ptr(y_data, ydot_data) device(dev)
{
ydot_data[0] = (t+1.0)*SUNRexp(-1.0 * y_data[0]);
}
return 0;
}
/*-------------------------------
* Private helper functions
*-------------------------------*/
/* Check function return value...
opt == 0 means SUNDIALS function allocates memory so check if
returned NULL pointer
opt == 1 means SUNDIALS function returns a flag so check if
flag >= 0
opt == 2 means function allocates memory so check if returned
NULL pointer
*/
static int check_flag(void *flagvalue, const char *funcname, int opt)
{
int *errflag;
/* Check if SUNDIALS function returned NULL pointer - no memory allocated */
if (opt == 0 && flagvalue == NULL) {
fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed - returned NULL pointer\n\n",
funcname);
return 1; }
/* Check if flag < 0 */
else if (opt == 1) {
errflag = (int *) flagvalue;
if (*errflag < 0) {
fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed with flag = %d\n\n",
funcname, *errflag);
return 1; }}
/* Check if function returned NULL pointer - no memory allocated */
else if (opt == 2 && flagvalue == NULL) {
fprintf(stderr, "\nMEMORY_ERROR: %s() failed - returned NULL pointer\n\n",
funcname);
return 1; }
return 0;
}
/*---- end of file ----*/
|
4361.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4000. */
#include "atax.h"
/* Array initialization. */
static
void init_array (int nx, int ny,
DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny),
DATA_TYPE POLYBENCH_1D(x,NY,ny))
{
int i, j;
for (i = 0; i < ny; i++)
x[i] = i * M_PI;
for (i = 0; i < nx; i++)
for (j = 0; j < ny; j++)
A[i][j] = ((DATA_TYPE) i*(j+1)) / nx;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int nx,
DATA_TYPE POLYBENCH_1D(y,NX,nx))
{
int i;
for (i = 0; i < nx; i++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, y[i]);
if (i % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_atax(int nx, int ny,
DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny),
DATA_TYPE POLYBENCH_1D(x,NY,ny),
DATA_TYPE POLYBENCH_1D(y,NY,ny),
DATA_TYPE POLYBENCH_1D(tmp,NX,nx))
{
int i, j;
#pragma scop
{
#pragma omp target teams distribute thread_limit(128) schedule(dynamic, 28)
for (i = 0; i < _PB_NY; i++)
{
y[i] = 0;
}
#pragma omp target teams distribute thread_limit(128) schedule(dynamic, 28)
for (i = 0; i < _PB_NX; i++)
{
tmp[i] = 0;
for (j = 0; j < _PB_NY; j++)
tmp[i] = tmp[i] + A[i][j] * x[j];
for (j = 0; j < _PB_NY; j++)
y[j] = y[j] + A[i][j] * tmp[i];
}
}
#pragma endscop
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int nx = NX;
int ny = NY;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NX, NY, nx, ny);
POLYBENCH_1D_ARRAY_DECL(x, DATA_TYPE, NY, ny);
POLYBENCH_1D_ARRAY_DECL(y, DATA_TYPE, NY, ny);
POLYBENCH_1D_ARRAY_DECL(tmp, DATA_TYPE, NX, nx);
/* Initialize array(s). */
init_array (nx, ny, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(x));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_atax (nx, ny,
POLYBENCH_ARRAY(A),
POLYBENCH_ARRAY(x),
POLYBENCH_ARRAY(y),
POLYBENCH_ARRAY(tmp));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(nx, POLYBENCH_ARRAY(y)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(x);
POLYBENCH_FREE_ARRAY(y);
POLYBENCH_FREE_ARRAY(tmp);
return 0;
}
|
mysql_fmt_plug.c | /* MYSQL_half_fmt.c
*
* Copyright (c) 2008 by <earthquake at rycon.hu>
*
* John the ripper MYSQL-fast module
*
*
* Note: The mysql hash's first 8byte is relevant,
* the another ones depends on the first 8. Maybe
* the passwords after 9-10character have collision
* in the first 8byte, so we have to check the full
* hash.
*
* Unbelievable good optimization by Péter Kasza
*
* http://rycon.hu/
*
* OpenMP support and other assorted hacks by Solar Designer
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_MYSQL_fast;
#elif FMT_REGISTERS_H
john_register_one(&fmt_MYSQL_fast);
#else
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#if !FAST_FORMATS_OMP
#undef _OPENMP
#endif
#ifdef _OPENMP
#include <omp.h>
#ifdef __MIC__
#ifndef OMP_SCALE
#define OMP_SCALE 2048
#endif
#else
#ifndef OMP_SCALE
#define OMP_SCALE 81920
#endif
#endif
#endif
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "memdbg.h"
#define FORMAT_LABEL "mysql"
#define FORMAT_NAME "MySQL pre-4.1"
#define ALGORITHM_NAME "32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 32
#define CIPHERTEXT_LENGTH 16
#define BINARY_SIZE 4
#define SALT_SIZE 0
#define BINARY_ALIGN sizeof(uint32_t)
#define SALT_ALIGN 1
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 8
static struct fmt_tests tests[] = {
// ciphertext, plaintext
{"445ff82636a7ba59", "probe"},
{"60671c896665c3fa", "a"},
{"1acbed4a27b20da3", "hash"},
{"77ff75006118bab8", "hacker"},
{"1b38cd9c2f809809", "hacktivity2008"},
{"1b38cd9c2f809809", "hacktivity 2008"},
{"6fc81597422015a8", "johnmodule"},
{"30f098972cc8924d", "http://guh.nu"},
{"3fc56f6037218993", "Andrew Hintz"},
{"697a7de87c5390b2", "drew"},
{"1eb71cf460712b3e", "http://4tphi.net"},
{"28ff8d49159ffbaf", "http://violating.us"},
{"5d2e19393cc5ef67", "password"},
{"5030573512345671", ""},
{"723d80f65bf9d670", "UPPERCASE"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static uint32_t (*crypt_key)[BINARY_SIZE / 4];
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_key));
}
static void done(void)
{
MEM_FREE(crypt_key);
MEM_FREE(saved_key);
}
static int valid(char* ciphertext, struct fmt_main *self)
{
unsigned int i;
if (strnlen(ciphertext, CIPHERTEXT_LENGTH + 1) != CIPHERTEXT_LENGTH)
return 0;
for (i = 0; i < CIPHERTEXT_LENGTH; i++)
if (atoi16[ARCH_INDEX(ciphertext[i])] > 15)
return 0;
return 1;
}
static char *split(char *ciphertext, int index, struct fmt_main *self)
{
static char out[CIPHERTEXT_LENGTH + 1];
memcpy(out, ciphertext, CIPHERTEXT_LENGTH);
out[CIPHERTEXT_LENGTH] = 0;
strlwr(out);
return out;
}
static void *get_binary_size(char *ciphertext, int size)
{
/* maybe bigger than BINARY_SIZE for use from cmp_exact() */
static uint32_t buff_[8];
unsigned char *buff = (unsigned char *)buff_;
unsigned int i;
for (i = 0; i < size; i++) {
#if ARCH_LITTLE_ENDIAN
buff[(i & ~3U) | (3 - (i & 3))] = atoi16[ARCH_INDEX(ciphertext[i * 2])] * 16 + atoi16[ARCH_INDEX(ciphertext[i * 2 + 1])];
#else
buff[i] = atoi16[ARCH_INDEX(ciphertext[i * 2])] * 16 + atoi16[ARCH_INDEX(ciphertext[i * 2 + 1])];
#endif
}
return buff;
}
static void *get_binary(char *ciphertext)
{
return get_binary_size(ciphertext, BINARY_SIZE);
}
static void set_key(char* key, int index)
{
strnzcpy(saved_key[index], key, PLAINTEXT_LENGTH + 1);
}
static char* get_key(int index)
{
return saved_key[index];
}
static int cmp_one(void* binary, int index)
{
return *(uint32_t *)binary == crypt_key[index][0];
}
static int cmp_all(void* binary, int count)
{
int i;
#ifdef _OPENMP
int retval = 0;
#pragma omp parallel for default(none) private(i) shared(count, binary, crypt_key, retval)
for (i = 0; i < count; i++)
if (*(uint32_t *)binary == crypt_key[i][0])
#pragma omp atomic
retval |= 1;
return retval;
#else
for (i = 0; i < count; i++)
if (*(uint32_t *)binary == crypt_key[i][0])
return 1;
return 0;
#endif
}
static int cmp_exact(char* source, int index)
{
register uint32_t nr = 1345345333, add = 7, nr2 = 0x12345671;
register uint32_t tmp;
unsigned char *p;
p = (unsigned char *)saved_key[index];
for (; *p; p++) {
if (*p == ' ' || *p == '\t')
continue;
tmp = (uint32_t)*p;
nr ^= (((nr & 63) + add) * tmp) + (nr << 8);
nr2 += (nr2 << 8) ^ nr;
add += tmp;
}
#if 0
{
char ctmp[CIPHERTEXT_LENGTH + 1];
sprintf(ctmp, "%08x%08x", nr & (((uint32_t)1 << 31) - 1), nr2 & (((uint32_t)1 << 31) - 1));
return !memcmp(source, ctmp, CIPHERTEXT_LENGTH);
}
#else
{
uint32_t *binary = get_binary_size(source, 8);
return
binary[0] == (nr & (((uint32_t)1 << 31) - 1)) &&
binary[1] == (nr2 & (((uint32_t)1 << 31) - 1));
}
#endif
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int i = 0;
#ifdef _OPENMP
#pragma omp parallel for default(none) private(i) shared(count, saved_key, crypt_key)
#endif
#if MAX_KEYS_PER_CRYPT > 1 || defined(_OPENMP)
for (i = 0; i < count; i++)
#endif
{
unsigned char *p = (unsigned char *)saved_key[i];
if (*p) {
uint32_t nr, add;
uint32_t tmp;
while (*p == ' ' || *p == '\t')
p++;
tmp = (uint32_t) (unsigned char) *p++;
nr = 1345345333 ^ ((((1345345333 & 63) + 7) * tmp) + (1345345333U << 8));
add = 7 + tmp;
for (; *p; p++) {
if (*p == ' ' || *p == '\t')
continue;
tmp = (uint32_t) (unsigned char) *p;
nr ^= (((nr & 63) + add) * tmp) + (nr << 8);
add += tmp;
}
crypt_key[i][0] = (nr & (((uint32_t)1 << 31) - 1));
#if MAX_KEYS_PER_CRYPT > 1 || defined(_OPENMP)
continue;
#else
return count;
#endif
}
crypt_key[i][0] = (1345345333 & (((uint32_t)1 << 31) - 1));
}
return count;
}
static int get_hash_0(int index)
{
return crypt_key[index][0] & PH_MASK_0;
}
static int get_hash_1(int index)
{
return crypt_key[index][0] & PH_MASK_1;
}
static int get_hash_2(int index)
{
return crypt_key[index][0] & PH_MASK_2;
}
static int get_hash_3(int index)
{
return crypt_key[index][0] & PH_MASK_3;
}
static int get_hash_4(int index)
{
return crypt_key[index][0] & PH_MASK_4;
}
static int get_hash_5(int index)
{
return crypt_key[index][0] & PH_MASK_5;
}
static int get_hash_6(int index)
{
return crypt_key[index][0] & PH_MASK_6;
}
struct fmt_main fmt_MYSQL_fast =
{
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
#ifdef _OPENMP
FMT_OMP | FMT_OMP_BAD |
#endif
FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE,
{ NULL },
{ NULL },
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
split,
get_binary,
fmt_default_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
fmt_default_set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
hist_util.h | /*!
* Copyright 2017 by Contributors
* \file hist_util.h
* \brief Utility for fast histogram aggregation
* \author Philip Cho, Tianqi Chen
*/
#ifndef XGBOOST_COMMON_HIST_UTIL_H_
#define XGBOOST_COMMON_HIST_UTIL_H_
#include <xgboost/data.h>
#include <xgboost/generic_parameters.h>
#include <limits>
#include <vector>
#include <algorithm>
#include <memory>
#include <utility>
#include "row_set.h"
#include "../tree/param.h"
#include "./quantile.h"
#include "./timer.h"
#include "random.h"
namespace xgboost {
/*!
* \brief A C-style array with in-stack allocation. As long as the array is smaller than
* MaxStackSize, it will be allocated inside the stack. Otherwise, it will be
* heap-allocated.
*/
template<typename T, size_t MaxStackSize>
class MemStackAllocator {
public:
explicit MemStackAllocator(size_t required_size): required_size_(required_size) {
}
T* Get() {
if (!ptr_) {
if (MaxStackSize >= required_size_) {
ptr_ = stack_mem_;
} else {
ptr_ = reinterpret_cast<T*>(malloc(required_size_ * sizeof(T)));
do_free_ = true;
}
}
return ptr_;
}
~MemStackAllocator() {
if (do_free_) free(ptr_);
}
private:
T* ptr_ = nullptr;
bool do_free_ = false;
size_t required_size_;
T stack_mem_[MaxStackSize];
};
namespace common {
/*
* \brief A thin wrapper around dynamically allocated C-style array.
* Make sure to call resize() before use.
*/
template<typename T>
struct SimpleArray {
~SimpleArray() {
free(ptr_);
ptr_ = nullptr;
}
void resize(size_t n) {
T* ptr = static_cast<T*>(malloc(n*sizeof(T)));
memcpy(ptr, ptr_, n_ * sizeof(T));
free(ptr_);
ptr_ = ptr;
n_ = n;
}
T& operator[](size_t idx) {
return ptr_[idx];
}
T& operator[](size_t idx) const {
return ptr_[idx];
}
size_t size() const {
return n_;
}
T back() const {
return ptr_[n_-1];
}
T* data() {
return ptr_;
}
const T* data() const {
return ptr_;
}
T* begin() {
return ptr_;
}
const T* begin() const {
return ptr_;
}
T* end() {
return ptr_ + n_;
}
const T* end() const {
return ptr_ + n_;
}
private:
T* ptr_ = nullptr;
size_t n_ = 0;
};
/*!
* \brief A single row in global histogram index.
* Directly represent the global index in the histogram entry.
*/
using GHistIndexRow = Span<uint32_t const>;
// A CSC matrix representing histogram cuts, used in CPU quantile hist.
class HistogramCuts {
// Using friends to avoid creating a virtual class, since HistogramCuts is used as value
// object in many places.
friend class SparseCuts;
friend class DenseCuts;
friend class CutsBuilder;
protected:
using BinIdx = uint32_t;
common::Monitor monitor_;
std::vector<bst_float> cut_values_;
std::vector<uint32_t> cut_ptrs_;
std::vector<float> min_vals_; // storing minimum value in a sketch set.
public:
HistogramCuts();
HistogramCuts(HistogramCuts const& that) = delete;
HistogramCuts(HistogramCuts&& that) noexcept(true) {
*this = std::forward<HistogramCuts&&>(that);
}
HistogramCuts& operator=(HistogramCuts const& that) = delete;
HistogramCuts& operator=(HistogramCuts&& that) noexcept(true) {
monitor_ = std::move(that.monitor_);
cut_ptrs_ = std::move(that.cut_ptrs_);
cut_values_ = std::move(that.cut_values_);
min_vals_ = std::move(that.min_vals_);
return *this;
}
/* \brief Build histogram cuts. */
void Build(DMatrix* dmat, uint32_t const max_num_bins);
/* \brief How many bins a feature has. */
uint32_t FeatureBins(uint32_t feature) const {
return cut_ptrs_.at(feature+1) - cut_ptrs_[feature];
}
// Getters. Cuts should be of no use after building histogram indices, but currently
// it's deeply linked with quantile_hist, gpu sketcher and gpu_hist. So we preserve
// these for now.
std::vector<uint32_t> const& Ptrs() const { return cut_ptrs_; }
std::vector<float> const& Values() const { return cut_values_; }
std::vector<float> const& MinValues() const { return min_vals_; }
size_t TotalBins() const { return cut_ptrs_.back(); }
BinIdx SearchBin(float value, uint32_t column_id) {
auto beg = cut_ptrs_.at(column_id);
auto end = cut_ptrs_.at(column_id + 1);
auto it = std::upper_bound(cut_values_.cbegin() + beg, cut_values_.cbegin() + end, value);
if (it == cut_values_.cend()) {
it = cut_values_.cend() - 1;
}
BinIdx idx = it - cut_values_.cbegin();
return idx;
}
BinIdx SearchBin(Entry const& e) {
return SearchBin(e.fvalue, e.index);
}
};
/* \brief An interface for building quantile cuts.
*
* `DenseCuts' always assumes there are `max_bins` for each feature, which makes it not
* suitable for sparse dataset. On the other hand `SparseCuts' uses `GetColumnBatches',
* which doubles the memory usage, hence can not be applied to dense dataset.
*/
class CutsBuilder {
public:
using WXQSketch = common::WXQuantileSketch<bst_float, bst_float>;
protected:
HistogramCuts* p_cuts_;
/* \brief return whether group for ranking is used. */
static bool UseGroup(DMatrix* dmat);
public:
explicit CutsBuilder(HistogramCuts* p_cuts) : p_cuts_{p_cuts} {}
virtual ~CutsBuilder() = default;
static uint32_t SearchGroupIndFromRow(
std::vector<bst_uint> const& group_ptr, size_t const base_rowid) {
using KIt = std::vector<bst_uint>::const_iterator;
KIt res = std::lower_bound(group_ptr.cbegin(), group_ptr.cend() - 1, base_rowid);
// Cannot use CHECK_NE because it will try to print the iterator.
bool const found = res != group_ptr.cend() - 1;
if (!found) {
LOG(FATAL) << "Row " << base_rowid << " does not lie in any group!";
}
uint32_t group_ind = std::distance(group_ptr.cbegin(), res);
return group_ind;
}
void AddCutPoint(WXQSketch::SummaryContainer const& summary) {
if (summary.size > 1 && summary.size <= 16) {
/* specialized code categorial / ordinal data -- use midpoints */
for (size_t i = 1; i < summary.size; ++i) {
bst_float cpt = (summary.data[i].value + summary.data[i - 1].value) / 2.0f;
if (i == 1 || cpt > p_cuts_->cut_values_.back()) {
p_cuts_->cut_values_.push_back(cpt);
}
}
} else {
for (size_t i = 2; i < summary.size; ++i) {
bst_float cpt = summary.data[i - 1].value;
if (i == 2 || cpt > p_cuts_->cut_values_.back()) {
p_cuts_->cut_values_.push_back(cpt);
}
}
}
}
/* \brief Build histogram indices. */
virtual void Build(DMatrix* dmat, uint32_t const max_num_bins) = 0;
};
/*! \brief Cut configuration for sparse dataset. */
class SparseCuts : public CutsBuilder {
/* \brief Distrbute columns to each thread according to number of entries. */
static std::vector<size_t> LoadBalance(SparsePage const& page, size_t const nthreads);
Monitor monitor_;
public:
explicit SparseCuts(HistogramCuts* container) :
CutsBuilder(container) {
monitor_.Init(__FUNCTION__);
}
/* \brief Concatonate the built cuts in each thread. */
void Concat(std::vector<std::unique_ptr<SparseCuts>> const& cuts, uint32_t n_cols);
/* \brief Build histogram indices in single thread. */
void SingleThreadBuild(SparsePage const& page, MetaInfo const& info,
uint32_t max_num_bins,
bool const use_group_ind,
uint32_t beg, uint32_t end, uint32_t thread_id);
void Build(DMatrix* dmat, uint32_t const max_num_bins) override;
};
/*! \brief Cut configuration for dense dataset. */
class DenseCuts : public CutsBuilder {
protected:
Monitor monitor_;
public:
explicit DenseCuts(HistogramCuts* container) :
CutsBuilder(container) {
monitor_.Init(__FUNCTION__);
}
void Init(std::vector<WXQSketch>* sketchs, uint32_t max_num_bins);
void Build(DMatrix* p_fmat, uint32_t max_num_bins) override;
};
// FIXME(trivialfis): Merge this into generic cut builder.
/*! \brief Builds the cut matrix on the GPU.
*
* \return The row stride across the entire dataset.
*/
size_t DeviceSketch
(const tree::TrainParam& param, const GenericParameter &learner_param, int gpu_batch_nrows,
DMatrix* dmat, HistogramCuts* hmat);
/*!
* \brief preprocessed global index matrix, in CSR format
* Transform floating values to integer index in histogram
* This is a global histogram index.
*/
struct GHistIndexMatrix {
/*! \brief row pointer to rows by element position */
// std::vector<size_t> row_ptr;
SimpleArray<size_t> row_ptr;
/*! \brief The index data */
SimpleArray<uint32_t> index;
/*! \brief hit count of each index */
std::vector<size_t> hit_count;
/*! \brief The corresponding cuts */
HistogramCuts cut;
// Create a global histogram matrix, given cut
void Init(DMatrix* p_fmat, int max_num_bins);
// get i-th row
inline GHistIndexRow operator[](size_t i) const {
return {&index[0] + row_ptr[i],
static_cast<GHistIndexRow::index_type>(
row_ptr[i + 1] - row_ptr[i])};
}
inline void GetFeatureCounts(size_t* counts) const {
auto nfeature = cut.Ptrs().size() - 1;
for (unsigned fid = 0; fid < nfeature; ++fid) {
auto ibegin = cut.Ptrs()[fid];
auto iend = cut.Ptrs()[fid + 1];
for (auto i = ibegin; i < iend; ++i) {
counts[fid] += hit_count[i];
}
}
}
private:
std::vector<size_t> hit_count_tloc_;
};
struct GHistIndexBlock {
const size_t* row_ptr;
const uint32_t* index;
inline GHistIndexBlock(const size_t* row_ptr, const uint32_t* index)
: row_ptr(row_ptr), index(index) {}
// get i-th row
inline GHistIndexRow operator[](size_t i) const {
return {&index[0] + row_ptr[i], detail::ptrdiff_t(row_ptr[i + 1] - row_ptr[i])};
}
};
class ColumnMatrix;
class GHistIndexBlockMatrix {
public:
void Init(const GHistIndexMatrix& gmat,
const ColumnMatrix& colmat,
const tree::TrainParam& param);
inline GHistIndexBlock operator[](size_t i) const {
return {blocks_[i].row_ptr_begin, blocks_[i].index_begin};
}
inline size_t GetNumBlock() const {
return blocks_.size();
}
private:
std::vector<size_t> row_ptr_;
std::vector<uint32_t> index_;
const HistogramCuts* cut_;
struct Block {
const size_t* row_ptr_begin;
const size_t* row_ptr_end;
const uint32_t* index_begin;
const uint32_t* index_end;
};
std::vector<Block> blocks_;
};
/*!
* \brief used instead of GradStats to have float instead of double to reduce histograms
* this improves performance by 10-30% and memory consumption for histograms by 2x
* accuracy in both cases is the same
*/
struct GradStatHist {
typedef float GradType;
/*! \brief sum gradient statistics */
GradType sum_grad;
/*! \brief sum hessian statistics */
GradType sum_hess;
GradStatHist() : sum_grad{0}, sum_hess{0} {
static_assert(sizeof(GradStatHist) == 8,
"Size of GradStatHist is not 8 bytes.");
}
inline void Add(const GradStatHist& b) {
sum_grad += b.sum_grad;
sum_hess += b.sum_hess;
}
inline void Add(const tree::GradStats& b) {
sum_grad += b.sum_grad;
sum_hess += b.sum_hess;
}
inline void Add(const GradientPair& p) {
this->Add(p.GetGrad(), p.GetHess());
}
inline void Add(const GradType& grad, const GradType& hess) {
sum_grad += grad;
sum_hess += hess;
}
inline tree::GradStats ToGradStat() const {
return tree::GradStats(sum_grad, sum_hess);
}
inline void SetSubstract(const GradStatHist& a, const GradStatHist& b) {
sum_grad = a.sum_grad - b.sum_grad;
sum_hess = a.sum_hess - b.sum_hess;
}
inline void SetSubstract(const tree::GradStats& a, const GradStatHist& b) {
sum_grad = a.sum_grad - b.sum_grad;
sum_hess = a.sum_hess - b.sum_hess;
}
inline GradType GetGrad() const { return sum_grad; }
inline GradType GetHess() const { return sum_hess; }
inline static void Reduce(GradStatHist& a, const GradStatHist& b) { // NOLINT(*)
a.Add(b);
}
};
using GHistRow = Span<GradStatHist>;
/*!
* \brief histogram of gradient statistics for multiple nodes
*/
class HistCollection {
public:
// access histogram for i-th node
inline GHistRow operator[](bst_uint nid) {
AddHistRow(nid);
return { const_cast<GradStatHist*>(dmlc::BeginPtr(data_arr_[nid])), nbins_};
}
// have we computed a histogram for i-th node?
inline bool RowExists(bst_uint nid) const {
return nid < data_arr_.size();
}
// initialize histogram collection
inline void Init(uint32_t nbins) {
if (nbins_ != nbins) {
data_arr_.clear();
nbins_ = nbins;
}
}
// create an empty histogram for i-th node
inline void AddHistRow(bst_uint nid) {
if (data_arr_.size() <= nid) {
size_t prev = data_arr_.size();
data_arr_.resize(nid + 1);
for (size_t i = prev; i < data_arr_.size(); ++i) {
data_arr_[i].resize(nbins_);
}
}
}
private:
/*! \brief number of all bins over all features */
uint32_t nbins_ = 0;
std::vector<std::vector<GradStatHist>> data_arr_;
};
/*!
* \brief builder for histograms of gradient statistics
*/
class GHistBuilder {
public:
// initialize builder
inline void Init(size_t nthread, uint32_t nbins) {
nthread_ = nthread;
nbins_ = nbins;
}
void BuildBlockHist(const std::vector<GradientPair>& gpair,
const RowSetCollection::Elem row_indices,
const GHistIndexBlockMatrix& gmatb,
GHistRow hist) {
constexpr int kUnroll = 8; // loop unrolling factor
const int32_t nblock = gmatb.GetNumBlock();
const size_t nrows = row_indices.end - row_indices.begin;
const size_t rest = nrows % kUnroll;
#pragma omp parallel for
for (int32_t bid = 0; bid < nblock; ++bid) {
auto gmat = gmatb[bid];
for (size_t i = 0; i < nrows - rest; i += kUnroll) {
size_t rid[kUnroll];
size_t ibegin[kUnroll];
size_t iend[kUnroll];
GradientPair stat[kUnroll];
for (int k = 0; k < kUnroll; ++k) {
rid[k] = row_indices.begin[i + k];
}
for (int k = 0; k < kUnroll; ++k) {
ibegin[k] = gmat.row_ptr[rid[k]];
iend[k] = gmat.row_ptr[rid[k] + 1];
}
for (int k = 0; k < kUnroll; ++k) {
stat[k] = gpair[rid[k]];
}
for (int k = 0; k < kUnroll; ++k) {
for (size_t j = ibegin[k]; j < iend[k]; ++j) {
const uint32_t bin = gmat.index[j];
hist[bin].Add(stat[k]);
}
}
}
for (size_t i = nrows - rest; i < nrows; ++i) {
const size_t rid = row_indices.begin[i];
const size_t ibegin = gmat.row_ptr[rid];
const size_t iend = gmat.row_ptr[rid + 1];
const GradientPair stat = gpair[rid];
for (size_t j = ibegin; j < iend; ++j) {
const uint32_t bin = gmat.index[j];
hist[bin].Add(stat);
}
}
}
}
uint32_t GetNumBins() {
return nbins_;
}
private:
/*! \brief number of threads for parallel computation */
size_t nthread_;
/*! \brief number of all bins over all features */
uint32_t nbins_;
};
void BuildHistLocalDense(size_t istart, size_t iend, size_t nrows, const size_t* rid,
const uint32_t* index, const GradientPair::ValueT* pgh, const size_t* row_ptr,
GradStatHist::GradType* data_local_hist, GradStatHist* grad_stat);
void BuildHistLocalSparse(size_t istart, size_t iend, size_t nrows, const size_t* rid,
const uint32_t* index, const GradientPair::ValueT* pgh, const size_t* row_ptr,
GradStatHist::GradType* data_local_hist, GradStatHist* grad_stat);
void SubtractionTrick(GHistRow self, GHistRow sibling, GHistRow parent);
} // namespace common
} // namespace xgboost
#endif // XGBOOST_COMMON_HIST_UTIL_H_
|
bins_dynamic_objects.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Nelson Lafontaine
// Carlos A. Roig
#if !defined(KRATOS_BINS_DYNAMIC_OBJECTS_CONTAINER_H_INCLUDED)
#define KRATOS_BINS_DYNAMIC_OBJECTS_CONTAINER_H_INCLUDED
// System includes
#include <string>
#include <iostream>
#include <cmath>
#include <algorithm>
#include <array>
// Project includes
#include "tree.h"
#include "cell.h"
#ifdef _OPENMP
#include <omp.h>
#endif
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/// Short class definition.
/** Detail class definition.
*/
template<class TConfigure>
class BinsObjectDynamic {
public:
///@name Type Definitions
///@{
enum { Dimension = TConfigure::Dimension };
typedef TConfigure Configure;
typedef typename TConfigure::PointType PointType;
typedef typename TConfigure::PointerType PointerType;
typedef typename TConfigure::ContainerType ContainerType;
typedef typename TConfigure::IteratorType IteratorType;
typedef typename TConfigure::ResultContainerType ResultContainerType;
typedef typename TConfigure::ResultIteratorType ResultIteratorType;
typedef typename TConfigure::DistanceIteratorType DistanceIteratorType;
typedef TreeNode<Dimension, PointType, PointerType, IteratorType, typename TConfigure::DistanceIteratorType> TreeNodeType;
typedef typename TreeNodeType::CoordinateType CoordinateType; // double
typedef typename TreeNodeType::SizeType SizeType; // std::size_t
typedef typename TreeNodeType::IndexType IndexType; // std::size_t
typedef Tvector<CoordinateType,Dimension> CoordinateArray;
typedef Tvector<SizeType,Dimension> SizeArray;
typedef Tvector<IndexType,Dimension> IndexArray;
///Contact Pair
typedef typename TConfigure::ContainerContactType ContainerContactType;
typedef typename TConfigure::IteratorContactType IteratorContactType;
///typedef TreeNodeType LeafType;
typedef typename TreeNodeType::IteratorIteratorType IteratorIteratorType;
typedef typename TreeNodeType::SearchStructureType SearchStructureType;
// Global Container
typedef Cell<Configure> CellType;
typedef std::vector<CellType> CellContainerType;
typedef typename CellContainerType::iterator CellContainerIterator;
/// Pointer definition of BinsObjectDynamic
KRATOS_CLASS_POINTER_DEFINITION(BinsObjectDynamic);
///@}
///@name Life Cycle
///@{
/// Default constructor.
BinsObjectDynamic() {}
/// Constructor de bins a bounding box
/**
* @brief Constructs a new BinsObjectDynamic
*
* Construct a new BinsObjectDynamic using a list of objects and an automatically calculate cell size.
*
* @param ObjectsBegin Iterator to the first object of the bins
* @param ObjectsEnd Iterator to the last object of the bins
*/
BinsObjectDynamic (IteratorType const& ObjectsBegin, IteratorType const& ObjectsEnd)
: mObjectsBegin(ObjectsBegin), mObjectsEnd(ObjectsEnd) {
mObjectsSize = SearchUtils::PointerDistance(mObjectsBegin,mObjectsEnd);
CalculateBoundingBox(); // Calculate mMinPoint, mMaxPoint
CalculateCellSize(mObjectsSize); // Calculate number of Cells
AllocateContainer(); // Allocate cell list
GenerateBins(); // Fill Cells with objects
}
/**
* @brief Constructs a new BinsObjectDynamic
*
* Constructs a new BinsObjectDynamic using a list of objects and an user provided cell size.
*
* @param ObjectsBegin Iterator to the first object of the bins
* @param ObjectsEnd Iterator to the last object of the bins
* @param CellSize Size of the cells (equal for every dimension)
*/
BinsObjectDynamic (IteratorType const& ObjectsBegin, IteratorType const& ObjectsEnd, CoordinateType CellSize)
: mObjectsBegin(ObjectsBegin), mObjectsEnd(ObjectsEnd) {
mObjectsSize = SearchUtils::PointerDistance(mObjectsBegin,mObjectsEnd);
CalculateBoundingBox(); // Calculate mMinPoint, mMaxPoint
AssignCellSize(CellSize); // Calculate number of Cells
AllocateContainer(); // Allocate cell list
GenerateBins(); // Fill Cells with objects
}
/**
* @brief Constructs a new BinsObjectDynamic
*
* Constructs a new BinsObjectDynamic using a given bounding box and a user provided cell size.
*
* @param MinPoint Min point of the boundingbox containing the bins
* @param MaxPoint Max point of the boundingbox containing the bins
* @param CellSize Size of the cells (equal for very dimension)
*/
BinsObjectDynamic (const PointType& MinPoint, const PointType& MaxPoint, CoordinateType CellSize)
: mObjectsSize(0), mObjectsBegin(0), mObjectsEnd(0) {
for(SizeType i = 0; i < Dimension; i++) {
mMinPoint[i] = MinPoint[i];
mMaxPoint[i] = MaxPoint[i];
}
AssignCellSize(CellSize); // Calculate number of Cells
AllocateContainer(); // Allocate cell list
}
/**
* @brief Constructs a new BinsObjectDynamic object
*
* Constructs a new BinsObjectDynamic using a given bounding box and a provided aproximation of the number
* of objects that will be added to the bins.
*
* @param MinPoint Min point of the boundingbox containing the bins
* @param MaxPoint Max point of the boundingbox containing the bins
* @param NumPoints Expected number of elements in the bins
*/
BinsObjectDynamic (const PointType& MinPoint, const PointType& MaxPoint, SizeType NumPoints)
: mObjectsSize(0), mObjectsBegin(0), mObjectsEnd(0) {
for(SizeType i = 0; i < Dimension; i++) {
mMinPoint[i] = MinPoint[i];
mMaxPoint[i] = MaxPoint[i];
}
CalculateCellSize(NumPoints); // Calculate number of Cells
AllocateContainer(); // Allocate cell list
}
/// Destructor.
virtual ~BinsObjectDynamic() {}
/// Single search API
/**
* [SearchObjects description]
* @param ThisObject [description]
* @param Result [description]
* @return [description]
*/
SizeType SearchObjects(PointerType& ThisObject, ResultContainerType& Result) {
PointType Low, High;
SearchStructureType Box;
TConfigure::CalculateBoundingBox(ThisObject, Low, High);
Box.Set( CalculateCell(Low), CalculateCell(High), mN );
SearchInBoxLocal(ThisObject, Result, Box );
return Result.size();
}
/**
* [SearchObjects description]
* @param ThisObject [description]
* @param Result [description]
* @param MaxNumberOfResults [description]
* @return [description]
*/
SizeType SearchObjects(PointerType& ThisObject, ResultIteratorType& Result, const SizeType& MaxNumberOfResults) {
PointType Low, High;
SearchStructureType Box;
SizeType NumberOfResults = 0;
TConfigure::CalculateBoundingBox(ThisObject, Low, High);
Box.Set( CalculateCell(Low), CalculateCell(High), mN );
SearchInBoxLocal(ThisObject, Result, NumberOfResults, MaxNumberOfResults, Box );
return NumberOfResults;
}
/**
* [SearchObjectsInCell description]
* @param ThisPoint [description]
* @param Result [description]
* @return [description]
*/
SizeType SearchObjectsInCell(const PointType& ThisPoint, ResultIteratorType Result) {
/// Missing API for 'SearchObjectsInCell' without 'MaxNumberOfResults'
KRATOS_ERROR << "Missing implementation of SearchObjectsInCell(PointerType, ResultIteratorType)" << std::endl;
}
/**
* [SearchObjectsInCell description]
* @param ThisPoint [description]
* @param Result [description]
* @param MaxNumberOfResults [description]
* @return [description]
*/
SizeType SearchObjectsInCell(const PointType& ThisPoint, ResultIteratorType Result, const SizeType& MaxNumberOfResults) {
IndexType icell = CalculateIndex(ThisPoint);
if(mCells[icell].Size() < MaxNumberOfResults) {
for(IteratorType i_object = mCells[icell].Begin() ; i_object != mCells[icell].End(); i_object++, Result++) {
*Result = *i_object;
}
return mCells[icell].Size();
} else {
return std::numeric_limits<SizeType>::max();
}
}
/**
* [SearchObjectsExclusive description]
* @param ThisObject [description]
* @param Result [description]
* @param MaxNumberOfResults [description]
* @return [description]
*/
SizeType SearchObjectsExclusive(PointerType& ThisObject, ResultIteratorType& Result) {
PointType Low, High;
SearchStructureType Box;
TConfigure::CalculateBoundingBox(ThisObject, Low, High);
Box.Set( CalculateCell(Low), CalculateCell(High), mN );
SearchObjectLocalExclusive(ThisObject, Result, Box );
return Result.size();
}
/**
* [SearchObjectsExclusive description]
* @param ThisObject [description]
* @param Result [description]
* @param MaxNumberOfResults [description]
* @return [description]
*/
SizeType SearchObjectsExclusive(PointerType& ThisObject, ResultIteratorType& Result, const SizeType& MaxNumberOfResults) {
PointType Low, High;
SearchStructureType Box;
SizeType NumberOfResults = 0;
TConfigure::CalculateBoundingBox(ThisObject, Low, High);
Box.Set( CalculateCell(Low), CalculateCell(High), mN );
SearchObjectLocalExclusive(ThisObject, Result, NumberOfResults, MaxNumberOfResults, Box );
return NumberOfResults;
}
/**
* [SearchObjectsInRadius description]
* @param ThisObject [description]
* @param Radius [description]
* @param Results [description]
* @return [description]
*/
SizeType SearchObjectsInRadius(PointerType& ThisObject, const double& Radius, ResultIteratorType& Results) {
/// Missing API for 'SearchObjectsInRadius' without 'MaxNumberOfResults'
KRATOS_ERROR << "Missing implementation of SearchObjectsInRadius(PointerType, const double, ResultIteratorType)" << std::endl;
}
/**
* [SearchObjectsInRadius description]
* @param ThisObject [description]
* @param Radius [description]
* @param Results [description]
* @param MaxNumberOfResults [description]
* @return [description]
*/
SizeType SearchObjectsInRadius(PointerType& ThisObject, const double& Radius, ResultIteratorType& Results, const SizeType& MaxNumberOfResults) {
PointType Low, High;
SearchStructureType Box;
SizeType NumberOfResults = 0;
TConfigure::CalculateBoundingBox(ThisObject, Low, High, Radius);
Box.Set( CalculateCell(Low), CalculateCell(High), mN );
SearchInRadius(ThisObject, Radius, Results, NumberOfResults, MaxNumberOfResults, Box );
return NumberOfResults;
}
/**
* [SearchObjectsInRadius description]
* @param ThisObject [description]
* @param Radius [description]
* @param Results [description]
* @param ResultDistances [description]
* @return [description]
*/
SizeType SearchObjectsInRadius(PointerType& ThisObject, const double& Radius, ResultIteratorType& Results, DistanceIteratorType ResultDistances) {
/// Missing API for 'SearchObjectsInRadius' without 'MaxNumberOfResults'
KRATOS_ERROR << "Missing implementation of SearchObjectsInRadius(PointerType, const double, ResultIteratorType, DistanceIteratorType)" << std::endl;
}
/**
* [SearchObjectsInRadius description]
* @param ThisObject [description]
* @param Radius [description]
* @param Results [description]
* @param ResultDistances [description]
* @param MaxNumberOfResults [description]
* @return [description]
*/
SizeType SearchObjectsInRadius(PointerType& ThisObject, const double& Radius, ResultIteratorType& Results, DistanceIteratorType ResultDistances, const SizeType& MaxNumberOfResults) {
PointType Low, High;
SearchStructureType Box;
SizeType NumberOfResults = 0;
TConfigure::CalculateBoundingBox(ThisObject, Low, High, Radius);
Box.Set( CalculateCell(Low), CalculateCell(High), mN );
SearchInRadius(ThisObject, Radius, Results, ResultDistances, NumberOfResults, MaxNumberOfResults, Box );
return NumberOfResults;
}
/**
* [SearchObjectsInRadiusExclusive description]
* @param ThisObject [description]
* @param Radius [description]
* @param Results [description]
* @param MaxNumberOfResults [description]
* @return [description]
*/
virtual SizeType SearchObjectsInRadiusExclusive(PointerType& ThisObject, const double& Radius, ResultIteratorType& Results) {
/// Missing API for 'SearchObjectsInRadiusExclusive' without 'MaxNumberOfResults'
KRATOS_ERROR << "Missing implementation of SearchObjectsInRadiusExclusive(PointerType, const double, ResultIteratorType)" << std::endl;
}
/**
* [SearchObjectsInRadiusExclusive description]
* @param ThisObject [description]
* @param Radius [description]
* @param Results [description]
* @param MaxNumberOfResults [description]
* @return [description]
*/
virtual SizeType SearchObjectsInRadiusExclusive(PointerType& ThisObject, const double& Radius, ResultIteratorType& Results, const SizeType& MaxNumberOfResults) {
PointType Low, High;
SearchStructureType Box;
SizeType NumberOfResults = 0;
TConfigure::CalculateBoundingBox(ThisObject, Low, High, Radius);
Box.Set( CalculateCell(Low), CalculateCell(High), mN );
SearchInRadiusExclusive(ThisObject, Radius, Results, NumberOfResults, MaxNumberOfResults, Box );
return NumberOfResults;
}
/**
* [SearchObjectsInRadiusExclusive description]
* @param ThisObject [description]
* @param Radius [description]
* @param Results [description]
* @param ResultDistances [description]
* @return [description]
*/
virtual SizeType SearchObjectsInRadiusExclusive(PointerType& ThisObject, const double& Radius, ResultIteratorType& Results, DistanceIteratorType ResultDistances) {
/// Missing API for 'SearchObjectsInRadiusExclusive' without 'MaxNumberOfResults'
KRATOS_ERROR << "Missing implementation of SearchObjectsInRadiusExclusive(PointerType, const double, ResultIteratorType, DistanceIteratorType)" << std::endl;
}
/**
* [SearchObjectsInRadiusExclusive description]
* @param ThisObject [description]
* @param Radius [description]
* @param Results [description]
* @param ResultDistances [description]
* @param MaxNumberOfResults [description]
* @return [description]
*/
virtual SizeType SearchObjectsInRadiusExclusive(PointerType& ThisObject, const double& Radius, ResultIteratorType& Results, DistanceIteratorType ResultDistances, const SizeType& MaxNumberOfResults) {
PointType Low, High;
SearchStructureType Box;
SizeType NumberOfResults = 0;
TConfigure::CalculateBoundingBox(ThisObject, Low, High, Radius);
Box.Set( CalculateCell(Low), CalculateCell(High), mN );
SearchInRadiusExclusive(ThisObject, Radius, Results, ResultDistances, NumberOfResults, MaxNumberOfResults, Box );
return NumberOfResults;
}
/// Batch search API (needs to be extended with the missing functions)
/**
* [SearchObjectsInRadius description]
* @param ThisObjects [description]
* @param NumberOfObjects [description]
* @param Radius [description]
* @param Results [description]
* @param NumberOfResults [description]
* @param MaxNumberOfResults [description]
*/
void SearchObjectsInRadius(IteratorType const& ThisObjects, SizeType const& NumberOfObjects, std::vector<double>& Radius, std::vector<std::vector<PointerType> >& Results, std::vector<SizeType>& NumberOfResults, SizeType const& MaxNumberOfResults) {
struct tls_type
{
PointType Low;
PointType High;
SearchStructureType Box;
};
IndexPartition<std::size_t>(NumberOfObjects).for_each(tls_type(), [&](std::size_t i, tls_type& rTLS){
ResultIteratorType ResultsPointer = Results[i].begin();
NumberOfResults[i] = 0;
TConfigure::CalculateBoundingBox(ThisObjects[i], rTLS.Low, rTLS.High, Radius[i]);
rTLS.Box.Set( CalculateCell(rTLS.Low), CalculateCell(rTLS.High), mN );
SearchInRadius(ThisObjects[i], Radius[i], ResultsPointer, NumberOfResults[i], MaxNumberOfResults, rTLS.Box );
});
}
/**
* [SearchObjectsInRadius description]
* @param ThisObjects [description]
* @param NumberOfObjects [description]
* @param Radius [description]
* @param Results [description]
* @param ResultsDistances [description]
* @param NumberOfResults [description]
* @param MaxNumberOfResults [description]
*/
void SearchObjectsInRadius(IteratorType const& ThisObjects, SizeType const& NumberOfObjects, std::vector<double>& Radius, std::vector<std::vector<PointerType> >& Results, std::vector<std::vector<double> >& ResultsDistances, std::vector<SizeType>& NumberOfResults, SizeType const& MaxNumberOfResults) {
PointType Low, High;
SearchStructureType Box;
#pragma omp parallel for private(Low,High,Box)
for(int i = 0; i < static_cast<int>(NumberOfObjects); i++) {
ResultIteratorType ResultsPointer = Results[i].begin();
DistanceIteratorType ResultsDistancesPointer = ResultsDistances[i].begin();
NumberOfResults[i] = 0;
TConfigure::CalculateBoundingBox(ThisObjects[i], Low, High, Radius[i]);
Box.Set( CalculateCell(Low), CalculateCell(High), mN );
SearchInRadius(ThisObjects[i], Radius[i], ResultsPointer, ResultsDistancesPointer, NumberOfResults[i], MaxNumberOfResults, Box );
}
}
/**
* [SearchObjectsInRadiusExclusive description]
* @param ThisObjects [description]
* @param NumberOfObjects [description]
* @param Radius [description]
* @param Results [description]
* @param NumberOfResults [description]
* @param MaxNumberOfResults [description]
*/
virtual void SearchObjectsInRadiusExclusive(IteratorType const& ThisObjects, SizeType const& NumberOfObjects, std::vector<double>& Radius, std::vector<std::vector<PointerType> >& Results, std::vector<SizeType>& NumberOfResults, SizeType const& MaxNumberOfResults) {
PointType Low, High;
SearchStructureType Box;
#pragma omp parallel for private(Low,High,Box)
for(int i = 0; i < static_cast<int>(NumberOfObjects); i++) {
ResultIteratorType ResultsPointer = Results[i].begin();
NumberOfResults[i] = 0;
TConfigure::CalculateBoundingBox(ThisObjects[i], Low, High, Radius[i]);
Box.Set( CalculateCell(Low), CalculateCell(High), mN );
SearchInRadiusExclusive(ThisObjects[i], Radius[i], ResultsPointer, NumberOfResults[i], MaxNumberOfResults, Box );
}
}
/**
* [SearchObjectsInRadiusExclusive description]
* @param ThisObjects [description]
* @param NumberOfObjects [description]
* @param Radius [description]
* @param Results [description]
* @param ResultsDistances [description]
* @param NumberOfResults [description]
* @param MaxNumberOfResults [description]
*/
virtual void SearchObjectsInRadiusExclusive(IteratorType const& ThisObjects, SizeType const& NumberOfObjects, std::vector<double>& Radius, std::vector<std::vector<PointerType> >& Results, std::vector<std::vector<double> >& ResultsDistances, std::vector<SizeType>& NumberOfResults, SizeType const& MaxNumberOfResults) {
PointType Low, High;
SearchStructureType Box;
#pragma omp parallel for private(Low,High,Box)
for(int i = 0; i < static_cast<int>(NumberOfObjects); i++) {
ResultIteratorType ResultsPointer = Results[i].begin();
DistanceIteratorType ResultsDistancesPointer = ResultsDistances[i].begin();
NumberOfResults[i] = 0;
TConfigure::CalculateBoundingBox(ThisObjects[i], Low, High, Radius[i]);
Box.Set( CalculateCell(Low), CalculateCell(High), mN );
SearchInRadiusExclusive(ThisObjects[i], Radius[i], ResultsPointer, ResultsDistancesPointer, NumberOfResults[i], MaxNumberOfResults, Box );
}
}
/// Contact search API
/**
* [SearchContact description]
* NOTE[Charlie]: Why this function does not return the number of results like the others?
* @param Result [description]
*/
void SearchContact(ContainerContactType& Result) {
for (CellContainerIterator icell = mCells.begin() ; icell!= mCells.end(); icell++) {
icell->SearchContact(Result);
}
}
/**
* [SearchContact description]
* @param Result [description]
* @param MaxNumberOfResults [description]
* @return [description]
*/
SizeType SearchContact(IteratorContactType& Result, const SizeType& MaxNumberOfResults ) {
SizeType NumberOfResults = 0;
for (CellContainerIterator icell = mCells.begin() ; icell!= mCells.end(); icell++) {
icell->SearchContact(Result, NumberOfResults, MaxNumberOfResults);
}
return NumberOfResults;
}
/// Add/Remove
/**
* [AddObject description]
* @param ThisObject [description]
*/
virtual void AddObject(const PointerType& ThisObject) {
PointType Low, High;
SearchStructureType Box;
TConfigure::CalculateBoundingBox(ThisObject, Low, High);
Box.Set( CalculateCell(Low), CalculateCell(High), mN );
FillObject(Box,ThisObject);
mObjectsSize++;
}
/**
* [RemoveObject description]
* @param ThisObject [description]
*/
void RemoveObject(const PointerType& ThisObject) {
PointType Low, High;
SearchStructureType Box;
TConfigure::CalculateBoundingBox(ThisObject, Low, High);
Box.Set( CalculateCell(Low), CalculateCell(High), mN );
RemoveObjectLocal(Box,ThisObject);
mObjectsSize--;
}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
/** Calculates the IndexArray (x[,y[,z]]) of the provided object.
* Calculates the IndexArray (x[,y[,z]]) of the provided object.
* The provided object must provide its coordinates through the [] operator.
* @param ThisObject Input Object
* @return Cell coordinates of 'ThisObject' in the bins
*/
template<class GenericCoordType>
IndexArray CalculateCell(const GenericCoordType& ThisObject) {
IndexArray IndexCell;
for(SizeType i = 0 ; i < Dimension ; i++) {
IndexCell[i] = CalculatePosition(ThisObject[i],i);
}
return IndexCell;
}
/** Calculates the Index of the provided object.
* Calculates the Index of the provided object.
* The provided object must provide its coordinates through the [] operator.
* @param ThisObject Input Object
* @return Cell index of 'ThisObject' in the bins
*/
template<class GenericCoordType>
IndexType CalculateIndex(const GenericCoordType& ThisObject) {
IndexType Index = 0;
for(SizeType iDim = Dimension-1 ; iDim > 0 ; iDim--) {
Index += CalculatePosition(ThisObject[iDim],iDim);
Index *= mN[iDim-1];
}
Index += CalculatePosition(ThisObject[0],0);
return Index;
}
/**
* [CalculatePosition description]
* @param ThisCoord [description]
* @param ThisDimension [description]
* @return [description]
*/
virtual IndexType CalculatePosition(CoordinateType const& ThisCoord, const SizeType& ThisDimension) {
CoordinateType d_index = (ThisCoord - mMinPoint[ThisDimension]) * mInvCellSize[ThisDimension];
IndexType index = static_cast<IndexType>( (d_index < 0.00) ? 0.00 : d_index );
return (index > mN[ThisDimension]-1) ? mN[ThisDimension]-1 : index;
}
///@}
///@name Access
///@{
/**
* @brief Get the Cell Container object
*
* @return CellContainerType& The Cell Container object
*/
CellContainerType& GetCellContainer() {
return mCells;
}
/**
* @brief Get the Divisions object
*
* @return SizeArray& Array containing the number of Cells in each dimension
*/
SizeArray& GetDivisions() {
return mN;
}
/**
* @brief Get the Cell Size object
*
* @return CoordinateArray& Array containing the size of the Cell in each dimension
*/
CoordinateArray& GetCellSize() {
return mCellSize;
}
/**
* @brief Get the Min Point object
*
* @return PointType& Min point of the bins
*/
PointType& GetMinPoint() {
return mMinPoint;
}
/**
* @brief Get the Max Point object
*
* @return PointType& Max point of the bins
*/
PointType& GetMaxPoint() {
return mMaxPoint;
}
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
virtual std::string Info() const {
return "BinsObjectDynamic" ;
}
/** Print information about this object.
* Print information about this object.
* @param rOStream [description]
*/
virtual void PrintInfo(std::ostream& rOStream) const {
rOStream << Info();
}
/** Print object's data.
* Print object's data.
* @param rOStream [description]
* @param Perfix [description]
*/
virtual void PrintData(std::ostream& rOStream, std::string const& Perfix = std::string()) const {
rOStream << " BinsSize: ";
for(SizeType i = 0 ; i < Dimension ; i++) {
rOStream << "[" << mN[i] << "]";
}
rOStream << std::endl;
rOStream << " CellSize: ";
for(SizeType i = 0 ; i < Dimension ; i++) {
rOStream << "[" << mCellSize[i] << "]";
}
rOStream << std::endl;
SizeType nn = 0;
for(SizeType i = 0 ; i < mCells.size(); i++) {
nn += mCells[i].Size();
}
rOStream << "NumPointers: " << nn << std::endl;
}
/** Print Size of Container
* Print Size of Container
* @param rout [description]
*/
void PrintSize(std::ostream& rout) {
rout << " BinsSize: ";
for(SizeType i = 0 ; i < Dimension ; i++) {
rout << "[" << mN[i] << "]";
}
rout << std::endl;
}
/** Print Limits Points of the Container
* Print Limits Points of the Container
* @param rout [description]
*/
void PrintBox(std::ostream& rout) {
rout << " BinsBox: Min [";
mMinPoint.Print(rout);
rout << "]; Max [";
mMaxPoint.Print(rout);
rout << "]; Size [";
mCellSize.Print(rout);
rout << "]" << std::endl;
}
protected:
///@}
///@name Friends
///@{
///@}
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
/// It computes each object's boundinx box and uses it to find the max and min points
virtual void CalculateBoundingBox()
{
PointType Low, High;
TConfigure::CalculateBoundingBox(*mObjectsBegin,mMinPoint,mMaxPoint);
#ifdef _OPENMP
SizeType number_of_threads = omp_get_max_threads();
#else
SizeType number_of_threads = 1;
#endif
std::vector<SizeType> node_partition;
CreatePartition(number_of_threads, mObjectsSize, node_partition);
std::vector<PointType> Max(number_of_threads);
std::vector<PointType> Min(number_of_threads);
for(SizeType k=0; k<number_of_threads; k++ )
{
Max[k] = mMaxPoint;
Min[k] = mMinPoint;
}
IteratorType i_begin = mObjectsBegin;
IteratorType i_end = mObjectsEnd;
for (IteratorType i_object = i_begin ; i_object != i_end ; i_object++ )
{
TConfigure::CalculateBoundingBox(*i_object, Low, High);
for(SizeType i = 0 ; i < Dimension ; i++)
{
mMaxPoint[i] = (mMaxPoint[i] < High[i]) ? High[i] : mMaxPoint[i];
mMinPoint[i] = (mMinPoint[i] > Low[i]) ? Low[i] : mMinPoint[i];
}
}
auto Epsilon = PointType{mMaxPoint - mMinPoint};
for(SizeType i = 0 ; i < Dimension ; i++)
{
mMaxPoint[i] += Epsilon[i] * 0.01;
mMinPoint[i] -= Epsilon[i] * 0.01;
}
}
/**
* @brief Calculates the cell size of the bins.
*
* Calculates the cell size of the bins using an average aproximation of the objects in the bins.
*
* @param ApproximatedSize Aproximate number of objects that will be stored in the bins
*/
void CalculateCellSize(std::size_t ApproximatedSize)
{
std::size_t average_number_of_cells = static_cast<std::size_t>(std::pow(static_cast<double>(ApproximatedSize), 1.00 / Dimension));
std::array<double, 3> lengths;
double average_length = 0.00;
for (int i = 0; i < Dimension; i++) {
lengths[i] = mMaxPoint[i] - mMinPoint[i];
average_length += lengths[i];
}
average_length *= 1.00 / 3.00;
if (average_length < std::numeric_limits<double>::epsilon()) {
for(int i = 0; i < Dimension; i++) {
mN[i] = 1;
}
return;
}
for (int i = 0; i < Dimension; i++) {
mN[i] = static_cast<std::size_t>(lengths[i] / average_length * (double)average_number_of_cells) + 1;
if (mN[i] > 1) {
mCellSize[i] = lengths[i] / mN[i];
} else {
mCellSize[i] = average_length;
}
mInvCellSize[i] = 1.00 / mCellSize[i];
}
}
/**
* @brief Assigns the cell size of the bins using the provided CellSize.
*
* Assigns the cell size of the bins using the provided CellSize.
*
* @param CellSize Desired size of the cells.
*/
void AssignCellSize(CoordinateType CellSize)
{
for(SizeType i = 0 ; i < Dimension ; i++)
{
mCellSize[i] = CellSize;
mInvCellSize[i] = 1.00 / mCellSize[i];
mN[i] = static_cast<SizeType>( (mMaxPoint[i]-mMinPoint[i]) / mCellSize[i]) + 1;
}
}
virtual void GenerateBins()
{
PointType Low, High;
SearchStructureType Box;
/// Fill container with objects
for(IteratorType i_object = mObjectsBegin ; i_object != mObjectsEnd ; i_object++)
{
TConfigure::CalculateBoundingBox(*i_object, Low, High);
Box.Set( CalculateCell(Low), CalculateCell(High), mN );
FillObject(Box, *i_object);
}
}
// **** THREAD SAFE
// Dimension = 1
void SearchInBoxLocal(PointerType& ThisObject, ResultIteratorType& Result, SizeType& NumberOfResults, const SizeType& MaxNumberOfResults,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,1>& Box )
{
PointType MinCell, MaxCell;
PointType MinBox, MaxBox;
MinCell[0] = static_cast<CoordinateType>(Box.Axis[0].Min) * mCellSize[0] + mMinPoint[0]; //
MaxCell[0] = MinCell[0] + mCellSize[0];
for(IndexType I = Box.Axis[0].Begin() ; I <= Box.Axis[0].End() ; I += Box.Axis[0].Block, MinCell[0] += mCellSize[0], MaxCell[0] += mCellSize[0])
if(TConfigure::IntersectionBox(ThisObject, MinCell, MaxCell))
mCells[I].SearchObjects(ThisObject, Result, NumberOfResults, MaxNumberOfResults);
}
// Dimension = 2
void SearchInBoxLocal(PointerType& ThisObject, ResultIteratorType& Result, SizeType& NumberOfResults, const SizeType& MaxNumberOfResults,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,2>& Box )
{
PointType MinCell, MaxCell;
PointType MinBox, MaxBox;
for(SizeType i = 0; i < 2; i++)
{
MinBox[i] = static_cast<CoordinateType>(Box.Axis[i].Min) * mCellSize[i] + mMinPoint[i];
MaxBox[i] = MinBox[i] + mCellSize[i];
}
MinCell[1] = MinBox[1];
MaxCell[1] = MaxBox[1];
for(IndexType II = Box.Axis[1].Begin() ; II <= Box.Axis[1].End() ; II += Box.Axis[1].Block, MinCell[1] += mCellSize[1], MaxCell[1] += mCellSize[1] )
{
MinCell[0] = MinBox[0];
MaxCell[0] = MaxBox[0];
for(IndexType I = II + Box.Axis[0].Begin() ; I <= II + Box.Axis[0].End() ; I += Box.Axis[0].Block, MinCell[0] += mCellSize[0], MaxCell[0] += mCellSize[0] )
{
if(TConfigure::IntersectionBox(ThisObject, MinCell, MaxCell))
mCells[I].SearchObjects(ThisObject, Result, NumberOfResults, MaxNumberOfResults);
}
}
}
// Dimension = 3
void SearchInBoxLocal(PointerType& ThisObject, ResultIteratorType& Result,
SizeType& NumberOfResults, const SizeType& MaxNumberOfResults,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,3>& Box )
{
PointType MinCell, MaxCell;
PointType MinBox, MaxBox;
for(SizeType i = 0; i < 3; i++)
{
MinBox[i] = static_cast<CoordinateType>(Box.Axis[i].Min) * mCellSize[i] + mMinPoint[i]; //
MaxBox[i] = MinBox[i] + mCellSize[i];
}
MinCell[2] = MinBox[2];
MaxCell[2] = MaxBox[2];
for(IndexType III = Box.Axis[2].Begin() ; III <= Box.Axis[2].End() ; III += Box.Axis[2].Block, MinCell[2] += mCellSize[2], MaxCell[2] += mCellSize[2] )
{
MinCell[1] = MinBox[1];
MaxCell[1] = MaxBox[1];
for(IndexType II = III + Box.Axis[1].Begin() ; II <= III + Box.Axis[1].End() ; II += Box.Axis[1].Block, MinCell[1] += mCellSize[1], MaxCell[1] += mCellSize[1] )
{
MinCell[0] = MinBox[0];
MaxCell[0] = MaxBox[0];
for(IndexType I = II + Box.Axis[0].Begin() ; I <= II + Box.Axis[0].End() ; I += Box.Axis[0].Block, MinCell[0] += mCellSize[0], MaxCell[0] += mCellSize[0] )
{
if(TConfigure::IntersectionBox(ThisObject, MinCell, MaxCell))
{
mCells[I].SearchObjects(ThisObject, Result, NumberOfResults, MaxNumberOfResults);
}
}
}
}
}
// **** THREAD SAFE
// Dimension = 1
void SearchInBoxLocal(PointerType& ThisObject, ResultContainerType& Result,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,1>& Box )
{
PointType MinCell, MaxCell;
PointType MinBox, MaxBox;
MinCell[0] = static_cast<CoordinateType>(Box.Axis[0].Min) * mCellSize[0] + mMinPoint[0]; //
MaxCell[0] = MinCell[0] + mCellSize[0];
for(IndexType I = Box.Axis[0].Begin() ; I <= Box.Axis[0].End() ; I += Box.Axis[0].Block )
{
if(TConfigure::IntersectionBox(ThisObject, MinCell, MaxCell))
mCells[I].SearchObjects(ThisObject, Result);
}
}
// Dimension = 2
void SearchInBoxLocal(PointerType& ThisObject, ResultContainerType& Result,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,2>& Box )
{
PointType MinCell, MaxCell;
PointType MinBox, MaxBox;
for(SizeType i = 0; i < 2; i++)
{
MinBox[i] = static_cast<CoordinateType>(Box.Axis[i].Min) * mCellSize[i] + mMinPoint[i];
MaxBox[i] = MinBox[i] + mCellSize[i];
}
MinCell[1] = MinBox[1];
MaxCell[1] = MaxBox[1];
for(IndexType II = Box.Axis[1].Begin() ; II <= Box.Axis[1].End() ; II += Box.Axis[1].Block )
{
MinCell[0] = MinBox[0];
MaxCell[0] = MaxBox[0];
for(IndexType I = II + Box.Axis[0].Begin() ; I <= II + Box.Axis[0].End() ; I += Box.Axis[0].Block )
{
if(TConfigure::IntersectionBox(ThisObject, MinCell, MaxCell))
mCells[I].SearchObjects(ThisObject, Result);
}
}
}
// Dimension = 3
void SearchInBoxLocal(PointerType& ThisObject, ResultContainerType& Result,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,3>& Box )
{
PointType MinCell, MaxCell;
PointType MinBox, MaxBox;
for(SizeType i = 0; i < 3; i++)
{
MinBox[i] = static_cast<CoordinateType>(Box.Axis[i].Min) * mCellSize[i] + mMinPoint[i]; //
MaxBox[i] = MinBox[i] + mCellSize[i];
}
MinCell[2] = MinBox[2];
MaxCell[2] = MaxBox[2];
for(IndexType III = Box.Axis[2].Begin() ; III <= Box.Axis[2].End() ; III += Box.Axis[2].Block )
{
MinCell[1] = MinBox[1];
MaxCell[1] = MaxBox[1];
for(IndexType II = III + Box.Axis[1].Begin() ; II <= III + Box.Axis[1].End() ; II += Box.Axis[1].Block )
{
MinCell[0] = MinBox[0];
MaxCell[0] = MaxBox[0];
for(IndexType I = II + Box.Axis[0].Begin() ; I <= II + Box.Axis[0].End() ; I += Box.Axis[0].Block )
{
if(TConfigure::IntersectionBox(ThisObject, MinCell, MaxCell))
{
mCells[I].SearchObjects(ThisObject, Result);
}
}
}
}
}
// **** THREAD SAFE
// Dimension = 1
void SearchObjectLocalExclusive(PointerType& ThisObject, ResultIteratorType& Result, SizeType& NumberOfResults, const SizeType& MaxNumberOfResults,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,1>& Box )
{
PointType MinCell, MaxCell;
PointType MinBox, MaxBox;
MinCell[0] = static_cast<CoordinateType>(Box.Axis[0].Min) * mCellSize[0] + mMinPoint[0]; //
MaxCell[0] = MinCell[0] + mCellSize[0];
for(IndexType I = Box.Axis[0].Begin() ; I <= Box.Axis[0].End() ; I += Box.Axis[0].Block, MinCell[0] += mCellSize[0], MaxCell[0] += mCellSize[0])
if(TConfigure::IntersectionBox(ThisObject, MinCell, MaxCell))
mCells[I].SearchObjectsExclusive(ThisObject, Result, NumberOfResults, MaxNumberOfResults);
}
// Dimension = 2
void SearchObjectLocalExclusive(PointerType& ThisObject, ResultIteratorType& Result, SizeType& NumberOfResults, const SizeType& MaxNumberOfResults,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,2>& Box )
{
PointType MinCell, MaxCell;
PointType MinBox, MaxBox;
for(SizeType i = 0; i < 2; i++)
{
MinBox[i] = static_cast<CoordinateType>(Box.Axis[i].Min) * mCellSize[i] + mMinPoint[i];
MaxBox[i] = MinBox[i] + mCellSize[i];
}
MinCell[1] = MinBox[1];
MaxCell[1] = MaxBox[1];
for(IndexType II = Box.Axis[1].Begin() ; II <= Box.Axis[1].End() ; II += Box.Axis[1].Block, MinCell[1] += mCellSize[1], MaxCell[1] += mCellSize[1] )
{
MinCell[0] = MinBox[0];
MaxCell[0] = MaxBox[0];
for(IndexType I = II + Box.Axis[0].Begin() ; I <= II + Box.Axis[0].End() ; I += Box.Axis[0].Block, MinCell[0] += mCellSize[0], MaxCell[0] += mCellSize[0] )
{
if(TConfigure::IntersectionBox(ThisObject, MinCell, MaxCell))
mCells[I].SearchObjectsExclusive(ThisObject, Result, NumberOfResults, MaxNumberOfResults);
}
}
}
// Dimension = 3
void SearchObjectLocalExclusive(PointerType& ThisObject, ResultIteratorType& Result,
SizeType& NumberOfResults, const SizeType& MaxNumberOfResults,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,3>& Box )
{
PointType MinCell, MaxCell;
PointType MinBox, MaxBox;
for(SizeType i = 0; i < 3; i++)
{
MinBox[i] = static_cast<CoordinateType>(Box.Axis[i].Min) * mCellSize[i] + mMinPoint[i]; //
MaxBox[i] = MinBox[i] + mCellSize[i];
}
MinCell[2] = MinBox[2];
MaxCell[2] = MaxBox[2];
for(IndexType III = Box.Axis[2].Begin() ; III <= Box.Axis[2].End() ; III += Box.Axis[2].Block, MinCell[2] += mCellSize[2], MaxCell[2] += mCellSize[2] )
{
MinCell[1] = MinBox[1];
MaxCell[1] = MaxBox[1];
for(IndexType II = III + Box.Axis[1].Begin() ; II <= III + Box.Axis[1].End() ; II += Box.Axis[1].Block, MinCell[1] += mCellSize[1], MaxCell[1] += mCellSize[1] )
{
MinCell[0] = MinBox[0];
MaxCell[0] = MaxBox[0];
for(IndexType I = II + Box.Axis[0].Begin() ; I <= II + Box.Axis[0].End() ; I += Box.Axis[0].Block, MinCell[0] += mCellSize[0], MaxCell[0] += mCellSize[0] )
{
if(TConfigure::IntersectionBox(ThisObject, MinCell, MaxCell))
{
mCells[I].SearchObjectsExclusive(ThisObject, Result, NumberOfResults, MaxNumberOfResults);
}
}
}
}
}
// **** THREAD SAFE
// Dimension = 1
void SearchObjectLocalExclusive(PointerType& ThisObject, ResultContainerType& Result,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,1>& Box )
{
PointType MinCell, MaxCell;
PointType MinBox, MaxBox;
MinCell[0] = static_cast<CoordinateType>(Box.Axis[0].Min) * mCellSize[0] + mMinPoint[0]; //
MaxCell[0] = MinCell[0] + mCellSize[0];
for(IndexType I = Box.Axis[0].Begin() ; I <= Box.Axis[0].End() ; I += Box.Axis[0].Block )
if(TConfigure::IntersectionBox(ThisObject, MinCell, MaxCell))
mCells[I].SearchObjectsExclusive(ThisObject, Result);
}
// Dimension = 2
void SearchObjectLocalExclusive(PointerType& ThisObject, ResultContainerType& Result,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,2>& Box )
{
PointType MinCell, MaxCell;
PointType MinBox, MaxBox;
for(SizeType i = 0; i < 2; i++)
{
MinBox[i] = static_cast<CoordinateType>(Box.Axis[i].Min) * mCellSize[i] + mMinPoint[i];
MaxBox[i] = MinBox[i] + mCellSize[i];
}
MinCell[1] = MinBox[1];
MaxCell[1] = MaxBox[1];
for(IndexType II = Box.Axis[1].Begin() ; II <= Box.Axis[1].End() ; II += Box.Axis[1].Block )
{
MinCell[0] = MinBox[0];
MaxCell[0] = MaxBox[0];
for(IndexType I = II + Box.Axis[0].Begin() ; I <= II + Box.Axis[0].End() ; I += Box.Axis[0].Block )
{
if(TConfigure::IntersectionBox(ThisObject, MinCell, MaxCell))
mCells[I].SearchObjectsExclusive(ThisObject, Result);
}
}
}
// Dimension = 3
void SearchObjectLocalExclusive(PointerType& ThisObject, ResultContainerType& Result,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,3>& Box )
{
PointType MinCell, MaxCell;
PointType MinBox, MaxBox;
for(SizeType i = 0; i < 3; i++)
{
MinBox[i] = static_cast<CoordinateType>(Box.Axis[i].Min) * mCellSize[i] + mMinPoint[i]; //
MaxBox[i] = MinBox[i] + mCellSize[i];
}
MinCell[2] = MinBox[2];
MaxCell[2] = MaxBox[2];
for(IndexType III = Box.Axis[2].Begin() ; III <= Box.Axis[2].End() ; III += Box.Axis[2].Block )
{
MinCell[2] = MinBox[2];
MaxCell[2] = MaxBox[2];
for(IndexType II = III + Box.Axis[1].Begin() ; II <= III + Box.Axis[1].End() ; II += Box.Axis[1].Block )
{
MinCell[0] = MinBox[0];
MaxCell[0] = MaxBox[0];
for(IndexType I = II + Box.Axis[0].Begin() ; I <= II + Box.Axis[0].End() ; I += Box.Axis[0].Block )
{
if(TConfigure::IntersectionBox(ThisObject, MinCell, MaxCell))
{
mCells[I].SearchObjectsExclusive(ThisObject, Result);
}
}
}
}
}
// **** THREAD SAFE
// Dimension = 1
void SearchInRadius(PointerType& ThisObject, CoordinateType const& Radius, ResultIteratorType& Result, SizeType& NumberOfResults, const SizeType& MaxNumberOfResults,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,1>& Box )
{
PointType MinCell, MaxCell;
PointType MinBox, MaxBox;
MinCell[0] = static_cast<CoordinateType>(Box.Axis[0].Min) * mCellSize[0] + mMinPoint[0]; //
MaxCell[0] = MinCell[0] + mCellSize[0];
for(IndexType I = Box.Axis[0].Begin() ; I <= Box.Axis[0].End() ; I += Box.Axis[0].Block, MinCell[0] += mCellSize[0], MaxCell[0] += mCellSize[0])
if(TConfigure::IntersectionBox(ThisObject, MinCell, MaxCell, Radius))
mCells[I].SearchObjectsInRaius(ThisObject, Radius, Result, NumberOfResults, MaxNumberOfResults);
}
// Dimension = 2
void SearchInRadius(PointerType& ThisObject, CoordinateType const& Radius, ResultIteratorType& Result, SizeType& NumberOfResults, const SizeType& MaxNumberOfResults,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,2>& Box )
{
PointType MinCell, MaxCell;
PointType MinBox, MaxBox;
for(SizeType i = 0; i < 2; i++)
{
MinBox[i] = static_cast<CoordinateType>(Box.Axis[i].Min) * mCellSize[i] + mMinPoint[i];
MaxBox[i] = MinBox[i] + mCellSize[i];
}
MinCell[1] = MinBox[1];
MaxCell[1] = MaxBox[1];
for(IndexType II = Box.Axis[1].Begin() ; II <= Box.Axis[1].End() ; II += Box.Axis[1].Block, MinCell[1] += mCellSize[1], MaxCell[1] += mCellSize[1] )
{
MinCell[0] = MinBox[0];
MaxCell[0] = MaxBox[0];
for(IndexType I = II + Box.Axis[0].Begin() ; I <= II + Box.Axis[0].End() ; I += Box.Axis[0].Block, MinCell[0] += mCellSize[0], MaxCell[0] += mCellSize[0] )
{
if(TConfigure::IntersectionBox(ThisObject, MinCell, MaxCell, Radius))
mCells[I].SearchObjectsInRaius(ThisObject, Radius, Result, NumberOfResults, MaxNumberOfResults);
}
}
}
// Dimension = 3
void SearchInRadius(PointerType& ThisObject, CoordinateType const& Radius, ResultIteratorType& Result, SizeType& NumberOfResults, const SizeType& MaxNumberOfResults,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,3>& Box )
{
PointType MinCell, MaxCell;
PointType MinBox, MaxBox;
for(SizeType i = 0; i < 3; i++)
{
MinBox[i] = static_cast<CoordinateType>(Box.Axis[i].Min) * mCellSize[i] + mMinPoint[i]; //
MaxBox[i] = MinBox[i] + mCellSize[i];
}
MinCell[2] = MinBox[2];
MaxCell[2] = MaxBox[2];
for(IndexType III = Box.Axis[2].Begin() ; III <= Box.Axis[2].End() ; III += Box.Axis[2].Block, MinCell[2] += mCellSize[2], MaxCell[2] += mCellSize[2] )
{
MinCell[1] = MinBox[1];
MaxCell[1] = MaxBox[1];
for(IndexType II = III + Box.Axis[1].Begin() ; II <= III + Box.Axis[1].End() ; II += Box.Axis[1].Block, MinCell[1] += mCellSize[1], MaxCell[1] += mCellSize[1] )
{
MinCell[0] = MinBox[0];
MaxCell[0] = MaxBox[0];
for(IndexType I = II + Box.Axis[0].Begin() ; I <= II + Box.Axis[0].End() ; I += Box.Axis[0].Block, MinCell[0] += mCellSize[0], MaxCell[0] += mCellSize[0] )
{
if(TConfigure::IntersectionBox(ThisObject, MinCell, MaxCell, Radius))
{
mCells[I].SearchObjectsInRadius(ThisObject, Radius, Result, NumberOfResults, MaxNumberOfResults);
}
}
}
}
}
// **** THREAD SAFE
// Dimension = 1
void SearchInRadius(PointerType& ThisObject, CoordinateType const& Radius, ResultIteratorType& Result, DistanceIteratorType ResultDistances, SizeType& NumberOfResults, const SizeType& MaxNumberOfResults,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,1>& Box )
{
PointType MinCell, MaxCell;
PointType MinBox, MaxBox;
MinCell[0] = static_cast<CoordinateType>(Box.Axis[0].Min) * mCellSize[0] + mMinPoint[0]; //
MaxCell[0] = MinCell[0] + mCellSize[0];
for(IndexType I = Box.Axis[0].Begin() ; I <= Box.Axis[0].End() ; I += Box.Axis[0].Block, MinCell[0] += mCellSize[0], MaxCell[0] += mCellSize[0])
if(TConfigure::IntersectionBox(ThisObject, MinCell, MaxCell, Radius))
mCells[I].SearchObjectsInRaius(ThisObject, Radius, Result, ResultDistances, NumberOfResults, MaxNumberOfResults);
}
// Dimension = 2
void SearchInRadius(PointerType& ThisObject, CoordinateType const& Radius, ResultIteratorType& Result, DistanceIteratorType ResultDistances, SizeType& NumberOfResults, const SizeType& MaxNumberOfResults,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,2>& Box )
{
PointType MinCell, MaxCell;
PointType MinBox, MaxBox;
for(SizeType i = 0; i < 2; i++)
{
MinBox[i] = static_cast<CoordinateType>(Box.Axis[i].Min) * mCellSize[i] + mMinPoint[i];
MaxBox[i] = MinBox[i] + mCellSize[i];
}
MinCell[1] = MinBox[1];
MaxCell[1] = MaxBox[1];
for(IndexType II = Box.Axis[1].Begin() ; II <= Box.Axis[1].End() ; II += Box.Axis[1].Block, MinCell[1] += mCellSize[1], MaxCell[1] += mCellSize[1] )
{
MinCell[0] = MinBox[0];
MaxCell[0] = MaxBox[0];
for(IndexType I = II + Box.Axis[0].Begin() ; I <= II + Box.Axis[0].End() ; I += Box.Axis[0].Block, MinCell[0] += mCellSize[0], MaxCell[0] += mCellSize[0] )
{
if(TConfigure::IntersectionBox(ThisObject, MinCell, MaxCell, Radius))
mCells[I].SearchObjectsInRaius(ThisObject, Radius, Result, ResultDistances, NumberOfResults, MaxNumberOfResults);
}
}
}
// Dimension = 3
void SearchInRadius(PointerType& ThisObject, CoordinateType const& Radius, ResultIteratorType& Result, DistanceIteratorType ResultDistances, SizeType& NumberOfResults, const SizeType& MaxNumberOfResults,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,3>& Box )
{
PointType MinCell, MaxCell;
PointType MinBox, MaxBox;
for(SizeType i = 0; i < 3; i++)
{
MinBox[i] = static_cast<CoordinateType>(Box.Axis[i].Min) * mCellSize[i] + mMinPoint[i]; //
MaxBox[i] = MinBox[i] + mCellSize[i];
}
MinCell[2] = MinBox[2];
MaxCell[2] = MaxBox[2];
for(IndexType III = Box.Axis[2].Begin() ; III <= Box.Axis[2].End() ; III += Box.Axis[2].Block, MinCell[2] += mCellSize[2], MaxCell[2] += mCellSize[2] )
{
MinCell[1] = MinBox[1];
MaxCell[1] = MaxBox[1];
for(IndexType II = III + Box.Axis[1].Begin() ; II <= III + Box.Axis[1].End() ; II += Box.Axis[1].Block, MinCell[1] += mCellSize[1], MaxCell[1] += mCellSize[1] )
{
MinCell[0] = MinBox[0];
MaxCell[0] = MaxBox[0];
for(IndexType I = II + Box.Axis[0].Begin() ; I <= II + Box.Axis[0].End() ; I += Box.Axis[0].Block, MinCell[0] += mCellSize[0], MaxCell[0] += mCellSize[0] )
{
if(TConfigure::IntersectionBox(ThisObject, MinCell, MaxCell, Radius))
{
mCells[I].SearchObjectsInRadius(ThisObject, Radius, Result, ResultDistances, NumberOfResults, MaxNumberOfResults);
}
}
}
}
}
// **** THREAD SAFE
// Dimension = 1
virtual void SearchInRadiusExclusive(PointerType& ThisObject, CoordinateType const& Radius, ResultIteratorType& Result, SizeType& NumberOfResults, const SizeType& MaxNumberOfResults,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,1>& Box )
{
PointType MinCell, MaxCell;
PointType MinBox, MaxBox;
MinCell[0] = static_cast<CoordinateType>(Box.Axis[0].Min) * mCellSize[0] + mMinPoint[0]; //
MaxCell[0] = MinCell[0] + mCellSize[0];
for(IndexType I = Box.Axis[0].Begin() ; I <= Box.Axis[0].End() ; I += Box.Axis[0].Block, MinCell[0] += mCellSize[0], MaxCell[0] += mCellSize[0])
if(TConfigure::IntersectionBox(ThisObject, MinCell, MaxCell, Radius))
mCells[I].SearchObjectsInRadiusExclusive(ThisObject, Radius, Result, NumberOfResults, MaxNumberOfResults);
}
// Dimension = 2
virtual void SearchInRadiusExclusive(PointerType& ThisObject, CoordinateType const& Radius, ResultIteratorType& Result, SizeType& NumberOfResults, const SizeType& MaxNumberOfResults,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,2>& Box )
{
PointType MinCell, MaxCell;
PointType MinBox, MaxBox;
for(SizeType i = 0; i < 2; i++)
{
MinBox[i] = static_cast<CoordinateType>(Box.Axis[i].Min) * mCellSize[i] + mMinPoint[i];
MaxBox[i] = MinBox[i] + mCellSize[i];
}
MinCell[1] = MinBox[1];
MaxCell[1] = MaxBox[1];
for(IndexType II = Box.Axis[1].Begin() ; II <= Box.Axis[1].End() ; II += Box.Axis[1].Block, MinCell[1] += mCellSize[1], MaxCell[1] += mCellSize[1] )
{
MinCell[0] = MinBox[0];
MaxCell[0] = MaxBox[0];
for(IndexType I = II + Box.Axis[0].Begin() ; I <= II + Box.Axis[0].End() ; I += Box.Axis[0].Block, MinCell[0] += mCellSize[0], MaxCell[0] += mCellSize[0] )
{
if(TConfigure::IntersectionBox(ThisObject, MinCell, MaxCell, Radius))
mCells[I].SearchObjectsInRadiusExclusive(ThisObject, Radius, Result, NumberOfResults, MaxNumberOfResults);
}
}
}
// Dimension = 3
virtual void SearchInRadiusExclusive(PointerType& ThisObject, CoordinateType const& Radius, ResultIteratorType& Result, SizeType& NumberOfResults, const SizeType& MaxNumberOfResults,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,3>& Box )
{
PointType MinCell, MaxCell;
PointType MinBox, MaxBox;
for(SizeType i = 0; i < 3; i++)
{
MinBox[i] = static_cast<CoordinateType>(Box.Axis[i].Min) * mCellSize[i] + mMinPoint[i]; //
MaxBox[i] = MinBox[i] + mCellSize[i];
}
MinCell[2] = MinBox[2];
MaxCell[2] = MaxBox[2];
for(IndexType III = Box.Axis[2].Begin() ; III <= Box.Axis[2].End() ; III += Box.Axis[2].Block, MinCell[2] += mCellSize[2], MaxCell[2] += mCellSize[2] )
{
MinCell[1] = MinBox[1];
MaxCell[1] = MaxBox[1];
for(IndexType II = III + Box.Axis[1].Begin() ; II <= III + Box.Axis[1].End() ; II += Box.Axis[1].Block, MinCell[1] += mCellSize[1], MaxCell[1] += mCellSize[1] )
{
MinCell[0] = MinBox[0];
MaxCell[0] = MaxBox[0];
for(IndexType I = II + Box.Axis[0].Begin() ; I <= II + Box.Axis[0].End() ; I += Box.Axis[0].Block, MinCell[0] += mCellSize[0], MaxCell[0] += mCellSize[0] )
{
if(TConfigure::IntersectionBox(ThisObject, MinCell, MaxCell, Radius))
{
mCells[I].SearchObjectsInRadiusExclusive(ThisObject, Radius, Result, NumberOfResults, MaxNumberOfResults);
}
}
}
}
}
// **** THREAD SAFE
// Dimension = 1
virtual void SearchInRadiusExclusive(PointerType& ThisObject, CoordinateType const& Radius, ResultIteratorType& Result, DistanceIteratorType ResultDistances, SizeType& NumberOfResults, const SizeType& MaxNumberOfResults,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,1>& Box )
{
PointType MinCell, MaxCell;
PointType MinBox, MaxBox;
MinCell[0] = static_cast<CoordinateType>(Box.Axis[0].Min) * mCellSize[0] + mMinPoint[0]; //
MaxCell[0] = MinCell[0] + mCellSize[0];
for(IndexType I = Box.Axis[0].Begin() ; I <= Box.Axis[0].End() ; I += Box.Axis[0].Block, MinCell[0] += mCellSize[0], MaxCell[0] += mCellSize[0])
if(TConfigure::IntersectionBox(ThisObject, MinCell, MaxCell, Radius))
mCells[I].SearchObjectsInRadiusExclusive(ThisObject, Radius, Result, ResultDistances, NumberOfResults, MaxNumberOfResults);
}
// Dimension = 2
virtual void SearchInRadiusExclusive(PointerType& ThisObject, CoordinateType const& Radius, ResultIteratorType& Result, DistanceIteratorType ResultDistances, SizeType& NumberOfResults, const SizeType& MaxNumberOfResults,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,2>& Box )
{
PointType MinCell, MaxCell;
PointType MinBox, MaxBox;
for(SizeType i = 0; i < 2; i++)
{
MinBox[i] = static_cast<CoordinateType>(Box.Axis[i].Min) * mCellSize[i] + mMinPoint[i];
MaxBox[i] = MinBox[i] + mCellSize[i];
}
MinCell[1] = MinBox[1];
MaxCell[1] = MaxBox[1];
for(IndexType II = Box.Axis[1].Begin() ; II <= Box.Axis[1].End() ; II += Box.Axis[1].Block, MinCell[1] += mCellSize[1], MaxCell[1] += mCellSize[1] )
{
MinCell[0] = MinBox[0];
MaxCell[0] = MaxBox[0];
for(IndexType I = II + Box.Axis[0].Begin() ; I <= II + Box.Axis[0].End() ; I += Box.Axis[0].Block, MinCell[0] += mCellSize[0], MaxCell[0] += mCellSize[0] )
{
if(TConfigure::IntersectionBox(ThisObject, MinCell, MaxCell, Radius))
mCells[I].SearchObjectsInRadiusExclusive(ThisObject, Radius, Result, ResultDistances, NumberOfResults, MaxNumberOfResults);
}
}
}
// Dimension = 3
virtual void SearchInRadiusExclusive(PointerType& ThisObject, CoordinateType const& Radius, ResultIteratorType& Result, DistanceIteratorType ResultDistances, SizeType& NumberOfResults, const SizeType& MaxNumberOfResults,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,3>& Box )
{
PointType MinCell, MaxCell;
PointType MinBox, MaxBox;
for(SizeType i = 0; i < 3; i++)
{
MinBox[i] = static_cast<CoordinateType>(Box.Axis[i].Min) * mCellSize[i] + mMinPoint[i]; //
MaxBox[i] = MinBox[i] + mCellSize[i];
}
MinCell[2] = MinBox[2];
MaxCell[2] = MaxBox[2];
for(IndexType III = Box.Axis[2].Begin() ; III <= Box.Axis[2].End() ; III += Box.Axis[2].Block, MinCell[2] += mCellSize[2], MaxCell[2] += mCellSize[2] )
{
MinCell[1] = MinBox[1];
MaxCell[1] = MaxBox[1];
for(IndexType II = III + Box.Axis[1].Begin() ; II <= III + Box.Axis[1].End() ; II += Box.Axis[1].Block, MinCell[1] += mCellSize[1], MaxCell[1] += mCellSize[1] )
{
MinCell[0] = MinBox[0];
MaxCell[0] = MaxBox[0];
for(IndexType I = II + Box.Axis[0].Begin() ; I <= II + Box.Axis[0].End() ; I += Box.Axis[0].Block, MinCell[0] += mCellSize[0], MaxCell[0] += mCellSize[0] )
{
if(TConfigure::IntersectionBox(ThisObject, MinCell, MaxCell, Radius))
{
mCells[I].SearchObjectsInRadiusExclusive(ThisObject, Radius, Result, ResultDistances, NumberOfResults, MaxNumberOfResults);
}
}
}
}
}
// Dimension = 1
void FillObject( SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,1>& Box, const PointerType& i_object)
{
PointType MinCell, MaxCell;
MinCell[0] = static_cast<CoordinateType>(Box.Axis[0].Min) * mCellSize[0] + mMinPoint[0]; //
MaxCell[0] = MinCell[0] + mCellSize[0];
for(IndexType I = Box.Axis[0].Begin() ; I <= Box.Axis[0].End() ; I += Box.Axis[0].Block, MinCell[0]+=mCellSize[0], MaxCell[0]+=mCellSize[0] )
{
if(TConfigure::IntersectionBox(i_object, MinCell, MaxCell))
mCells[I].Add(i_object);
}
}
// Dimension = 2
void FillObject( SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,2>& Box, const PointerType& i_object)
{
PointType MinCell, MaxCell;
PointType MinBox, MaxBox;
for(SizeType i = 0; i < 2; i++)
{
MinBox[i] = static_cast<CoordinateType>(Box.Axis[i].Min) * mCellSize[i] + mMinPoint[i];
MaxBox[i] = MinBox[i] + mCellSize[i];
}
MinCell[1] = MinBox[1];
MaxCell[1] = MaxBox[1];
for(IndexType II = Box.Axis[1].Begin() ; II <= Box.Axis[1].End() ; II += Box.Axis[1].Block, MinCell[1]+=mCellSize[1], MaxCell[1]+=mCellSize[1] )
{
MinCell[0] = MinBox[0];
MaxCell[0] = MaxBox[0];
for(IndexType I = II + Box.Axis[0].Begin() ; I <= II + Box.Axis[0].End() ; I += Box.Axis[0].Block, MinCell[0]+=mCellSize[0], MaxCell[0]+=mCellSize[0] )
{
if(TConfigure::IntersectionBox(i_object,MinCell,MaxCell))
mCells[I].Add(i_object);
}
}
}
// Dimension = 3
virtual void FillObject( SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,3>& Box, const PointerType& i_object)
{
PointType MinCell, MaxCell;
PointType MinBox, MaxBox;
for(SizeType i = 0; i < 3; i++)
{
MinBox[i] = static_cast<CoordinateType>(Box.Axis[i].Min) * mCellSize[i] + mMinPoint[i]; //
MaxBox[i] = MinBox[i] + mCellSize[i];
}
MinCell[2] = MinBox[2];
MaxCell[2] = MaxBox[2];
for(IndexType III = Box.Axis[2].Begin() ; III <= Box.Axis[2].End() ; III += Box.Axis[2].Block, MinCell[2]+=mCellSize[2], MaxCell[2]+=mCellSize[2] )
{
MinCell[1] = MinBox[1];
MaxCell[1] = MaxBox[1];
for(IndexType II = III + Box.Axis[1].Begin() ; II <= III + Box.Axis[1].End() ; II += Box.Axis[1].Block, MinCell[1]+=mCellSize[1], MaxCell[1]+=mCellSize[1] )
{
MinCell[0] = MinBox[0];
MaxCell[0] = MaxBox[0];
for(IndexType I = II + Box.Axis[0].Begin() ; I <= II + Box.Axis[0].End() ; I += Box.Axis[0].Block, MinCell[0]+=mCellSize[0], MaxCell[0]+=mCellSize[0] )
{
if(TConfigure::IntersectionBox(i_object,MinCell,MaxCell))
mCells[I].Add(i_object);
}
}
}
}
// Dimension = 1
void RemoveObjectLocal( SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,1>& Box, const PointerType& i_object)
{
PointType MinCell, MaxCell;
MinCell[0] = static_cast<CoordinateType>(Box.Axis[0].Min) * mCellSize[0] + mMinPoint[0]; //
MaxCell[0] = MinCell[0] + mCellSize[0];
for(IndexType I = Box.Axis[0].Begin() ; I <= Box.Axis[0].End() ; I += Box.Axis[0].Block, MinCell[0]+=mCellSize[0], MaxCell[0]+=mCellSize[0] )
{
if(TConfigure::IntersectionBox(i_object, MinCell, MaxCell))
mCells[I].Remove(i_object);
}
}
// Dimension = 2
void RemoveObjectLocal( SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,2>& Box, const PointerType& i_object)
{
PointType MinCell, MaxCell;
PointType MinBox, MaxBox;
for(SizeType i = 0; i < 2; i++)
{
MinBox[i] = static_cast<CoordinateType>(Box.Axis[i].Min) * mCellSize[i] + mMinPoint[i];
MaxBox[i] = MinBox[i] + mCellSize[i];
}
MinCell[1] = MinBox[1];
MaxCell[1] = MaxBox[1];
for(IndexType II = Box.Axis[1].Begin() ; II <= Box.Axis[1].End() ; II += Box.Axis[1].Block, MinCell[1]+=mCellSize[1], MaxCell[1]+=mCellSize[1] )
{
MinCell[0] = MinBox[0];
MaxCell[0] = MaxBox[0];
for(IndexType I = II + Box.Axis[0].Begin() ; I <= II + Box.Axis[0].End() ; I += Box.Axis[0].Block, MinCell[0]+=mCellSize[0], MaxCell[0]+=mCellSize[0] )
{
if(TConfigure::IntersectionBox(i_object,MinCell,MaxCell))
mCells[I].Remove(i_object);
}
}
}
// Dimension = 3
void RemoveObjectLocal( SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,3>& Box, const PointerType& i_object)
{
PointType MinCell, MaxCell;
PointType MinBox, MaxBox;
for(SizeType i = 0; i < 3; i++)
{
MinBox[i] = static_cast<CoordinateType>(Box.Axis[i].Min) * mCellSize[i] + mMinPoint[i]; //
MaxBox[i] = MinBox[i] + mCellSize[i];
}
MinCell[2] = MinBox[2];
MaxCell[2] = MaxBox[2];
for(IndexType III = Box.Axis[2].Begin() ; III <= Box.Axis[2].End() ; III += Box.Axis[2].Block, MinCell[2]+=mCellSize[2], MaxCell[2]+=mCellSize[2] )
{
MinCell[1] = MinBox[1];
MaxCell[1] = MaxBox[1];
for(IndexType II = III + Box.Axis[1].Begin() ; II <= III + Box.Axis[1].End() ; II += Box.Axis[1].Block, MinCell[1]+=mCellSize[1], MaxCell[1]+=mCellSize[1] )
{
MinCell[0] = MinBox[0];
MaxCell[0] = MaxBox[0];
for(IndexType I = II + Box.Axis[0].Begin() ; I <= II + Box.Axis[0].End() ; I += Box.Axis[0].Block, MinCell[0]+=mCellSize[0], MaxCell[0]+=mCellSize[0] )
{
if(TConfigure::IntersectionBox(i_object,MinCell,MaxCell))
mCells[I].Remove(i_object);
}
}
}
}
void AllocateContainer()
{
SizeType Size = mN[0];
for(SizeType i = 1 ; i < Dimension ; i++)
Size *= mN[i];
mCells.resize(Size);
}
inline void CreatePartition(SizeType number_of_threads, const SizeType number_of_rows, std::vector<SizeType>& partitions)
{
partitions.resize(number_of_threads+1);
SizeType partition_size = number_of_rows / number_of_threads;
partitions[0] = 0;
partitions[number_of_threads] = number_of_rows;
for(SizeType i = 1; i<number_of_threads; i++)
partitions[i] = partitions[i-1] + partition_size ;
}
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
PointType mMinPoint;
PointType mMaxPoint;
SizeType mObjectsSize;
IteratorType mObjectsBegin;
IteratorType mObjectsEnd;
CoordinateArray mCellSize;
CoordinateArray mInvCellSize;
SizeArray mN;
CellContainerType mCells; ///The bin
private:
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
public:
/// Assignment operator.
BinsObjectDynamic<TConfigure> & operator=(const BinsObjectDynamic<TConfigure> & rOther)
{
mMinPoint = rOther.mMinPoint;
mMaxPoint = rOther.mMaxPoint;
mObjectsBegin = rOther.mObjectsBegin;
mObjectsEnd = rOther.mObjectsEnd;
mObjectsSize = rOther.mObjectsSize;
mCellSize = rOther.mCellSize;
mInvCellSize = rOther.mInvCellSize;
mN = rOther.mN;
mCells = rOther.mCells;
return *this;
}
/// Copy constructor.
BinsObjectDynamic(const BinsObjectDynamic& rOther)
{
*this = rOther;
}
/// Copy constructor.
template<class T>
BinsObjectDynamic(const BinsObjectDynamic<T>& rOther)
{
*this = rOther;
}
}; // Class BinsObjectDynamic
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
/// input stream function
template<class TConfigure>
inline std::istream& operator >> (std::istream& rIStream,
BinsObjectDynamic<TConfigure>& rThis)
{
return rIStream;
}
/// output stream function
template<class TConfigure>
inline std::ostream& operator << (std::ostream& rOStream,
const BinsObjectDynamic<TConfigure> & rThis)
{
rThis.PrintInfo(rOStream);
rOStream << std::endl;
rThis.PrintData(rOStream);
return rOStream;
}
///@}
} // namespace Kratos.
#endif // KRATOS_FILENAME_H_INCLUDED defined
|
THTensorConv.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "generic/THTensorConv.c"
#else
/*
2D Input, 2D kernel : convolve given image with the given kernel.
*/
void THTensor_(validXCorr2Dptr)(real *r_,
real alpha,
real *t_, int64_t ir, int64_t ic,
real *k_, int64_t kr, int64_t kc,
int64_t sr, int64_t sc)
{
int64_t or = (ir - kr) / sr + 1;
int64_t oc = (ic - kc) / sc + 1;
int64_t xx, yy, kx, ky;
if ((sc != 1) || (oc < 4)) {
/* regular convolution */
for(yy = 0; yy < or; yy++) {
for(xx = 0; xx < oc; xx++) {
/* Dot product in two dimensions... (between input image and the mask) */
real *pi_ = t_ + yy*sr*ic + xx*sc;
real *pw_ = k_;
real sum = 0;
for(ky = 0; ky < kr; ky++) {
for(kx = 0; kx < kc; kx++) {
sum += pi_[kx]*pw_[kx];
}
pi_ += ic; /* next input line */
pw_ += kc; /* next mask line */
}
/* Update output */
*r_++ += alpha*sum;
}
}
} else {
/* SSE-based convolution */
for(yy = 0; yy < or; yy++) {
real *pi_ = t_ + yy*sr*ic;
real *pw_ = k_;
for (ky = 0; ky < kr; ky++) {
real *pis_ = pi_;
for (kx = 0; kx < kc; kx++) {
THVector_(cadd)(r_, r_, pis_, alpha*pw_[kx], oc);
pis_++;
}
pi_ += ic; /* next input line */
pw_ += kc; /* next mask line */
}
r_ += oc;
}
}
}
/*
2D Input, 2D kernel : convolve given image with the given kernel.
*/
void THTensor_(validConv2Dptr)(real *r_,
real alpha,
real *t_, int64_t ir, int64_t ic,
real *k_, int64_t kr, int64_t kc,
int64_t sr, int64_t sc)
{
int64_t or = (ir - kr) / sr + 1;
int64_t oc = (ic - kc) / sc + 1;
int64_t xx, yy, kx, ky;
if ((sc != 1) || (oc < 4)) {
/* regular convolution */
for(yy = 0; yy < or; yy++) {
for(xx = 0; xx < oc; xx++) {
/* Dot product in two dimensions... (between input image and the mask) */
real *pi_ = t_ + yy*sr*ic + xx*sc;
real *pw_ = k_ + kr*kc - 1;
real sum = 0;
for(ky = 0; ky < kr; ky++) {
for(kx = 0; kx < kc; kx++) {
sum += pi_[kx]*pw_[-kx];
}
pi_ += ic; /* next input line */
pw_ -= kc; /* next mask line */
}
/* Update output */
*r_++ += alpha*sum;
}
}
} else {
/* SSE-based convolution */
for(yy = 0; yy < or; yy++) {
real *pw_ = k_ + kr*kc - 1;
real *pi_ = t_ + yy*sr*ic;
for (ky = 0; ky < kr; ky++) {
real *pis_ = pi_;
for (kx = 0; kx < kc; kx++) {
THVector_(cadd)(r_, r_, pis_, alpha*pw_[-kx], oc);
pis_++;
}
pi_ += ic; /* next input line */
pw_ -= kc; /* next mask line */
}
r_ += oc;
}
}
}
/*
2D Input, 2D kernel : convolve given image with the given kernel, full convolution.
*/
void THTensor_(fullConv2Dptr)(real *r_,
real alpha,
real *t_, int64_t ir, int64_t ic,
real *k_, int64_t kr, int64_t kc,
int64_t sr, int64_t sc)
{
int64_t oc = (ic - 1) * sc + kc;
int64_t xx, yy, kx, ky;
if ((sc != 1) || (ic < 4)) {
/* regular convolution */
for(yy = 0; yy < ir; yy++) {
for(xx = 0; xx < ic; xx++) {
/* Outer product in two dimensions... (between input image and the mask) */
real *po_ = r_ + yy*sr*oc + xx*sc;
real *pw_ = k_;
for(ky = 0; ky < kr; ky++)
{
real z = *t_ * alpha;
for(kx = 0; kx < kc; kx++) {
po_[kx] += z * pw_[kx];
}
po_ += oc; /* next input line */
pw_ += kc; /* next mask line */
}
t_++;
}
}
} else {
/* SSE-based convolution */
for(yy = 0; yy < ir; yy++) {
real *po_ = r_ + yy*sr*oc;
real *pw_ = k_;
for (ky = 0; ky < kr; ky++) {
real *pos_ = po_;
for (kx = 0; kx < kc; kx++) {
THVector_(cadd)(pos_, pos_, t_, alpha*pw_[kx], ic);
pos_++;
}
po_ += oc; /* next input line */
pw_ += kc; /* next mask line */
}
t_ += ic;
}
}
}
/*
2D Input, 2D kernel : convolve given image with the given kernel, full convolution.
*/
void THTensor_(fullXCorr2Dptr)(real *r_,
real alpha,
real *t_, int64_t ir, int64_t ic,
real *k_, int64_t kr, int64_t kc,
int64_t sr, int64_t sc)
{
int64_t oc = (ic - 1) * sc + kc;
int64_t xx, yy, kx, ky;
if ((sc != 1) || (ic < 4)) {
/* regular convolution */
for(yy = 0; yy < ir; yy++) {
for(xx = 0; xx < ic; xx++) {
/* Outer product in two dimensions... (between input image and the mask) */
real *po_ = r_ + yy*sr*oc + xx*sc;
real *pw_ = k_ + kr*kc -1;
int64_t kx, ky;
for(ky = 0; ky < kr; ky++)
{
real z = *t_ * alpha;
for(kx = 0; kx < kc; kx++) {
po_[kx] += z * pw_[-kx];
}
po_ += oc; /* next input line */
pw_ -= kc; /* next mask line */
}
t_++;
}
}
} else {
/* SSE-based convolution */
for(yy = 0; yy < ir; yy++) {
real *po_ = r_ + yy*sr*oc;
real *pw_ = k_ + kr*kc -1;
for (ky = 0; ky < kr; ky++) {
real *pos_ = po_;
for (kx = 0; kx < kc; kx++) {
THVector_(cadd)(pos_, pos_, t_, pw_[-kx]*alpha, ic);
pos_++;
}
po_ += oc; /* next input line */
pw_ -= kc; /* next mask line */
}
t_ += ic;
}
}
}
/*
2D Input, 2D kernel : convolve given image with the given kernel, valid convolution.
for sr,sc=1 this is equivalent to validXCorr2Dptr, but otherwise it is useful for
calculating derivatives wrt a kernel that is applied with stride sr,sc != 1
*/
void THTensor_(validXCorr2DRevptr)(real *r_,
real alpha,
real *t_, int64_t ir, int64_t ic,
real *k_, int64_t kr, int64_t kc,
int64_t sr, int64_t sc)
{
int64_t or = ir - (kr - 1) * sr;
int64_t oc = ic - (kc - 1) * sc;
int64_t xx, yy, kx, ky;
if ((sc != 1) || (kc < 4)) {
/* regular convolution */
for(yy = 0; yy < kr; yy++) {
for(xx = 0; xx < kc; xx++) {
real *po_ = r_;
real *pi_ = t_ + yy*sr*ic + xx*sc;
real z = *k_++ * alpha;
for(ky = 0; ky < or; ky++) {
for(kx = 0; kx < oc; kx++)
po_[kx] += z * pi_[kx];
pi_ += ic;
po_ += oc;
}
}
}
} else {
/* SSE-based convolution */
for(yy = 0; yy < kr; yy++) {
for(xx = 0; xx < kc; xx++) {
real *po_ = r_;
real *pi_ = t_ + yy*sr*ic + xx*sc;
real z = *k_++ * alpha;
for(ky = 0; ky < or; ky++) {
THVector_(cadd)(po_, po_, pi_, z, oc);
pi_ += ic;
po_ += oc;
}
}
}
}
}
/*
3D Input, 3D kernel : convolve given volume with the given kernel.
*/
void THTensor_(validXCorr3Dptr)(real *r_,
real alpha,
real *t_, int64_t it, int64_t ir, int64_t ic,
real *k_, int64_t kt, int64_t kr, int64_t kc,
int64_t st, int64_t sr, int64_t sc)
{
int64_t ot = (it - kt) / st + 1;
int64_t or = (ir - kr) / sr + 1;
int64_t oc = (ic - kc) / sc + 1;
int64_t zz, xx, yy;
for (zz = 0; zz < ot; zz++)
{
for(yy = 0; yy < or; yy++)
{
for(xx = 0; xx < oc; xx++)
{
/* Dot product in two dimensions... (between input image and the mask) */
real *pi_ = t_ + zz*st*ir*ic + yy*sr*ic + xx*sc;
real *pw_ = k_;
real sum = 0;
int64_t kz, kx, ky;
for(kz = 0; kz < kt; kz++)
{
for(ky = 0; ky < kr; ky++)
{
for(kx = 0; kx < kc; kx++) {
sum += pi_[kx]*pw_[kx];
}
pi_ += ic; /* next input line */
pw_ += kc; /* next mask line */
}
pi_ += (ir-kr)*ic; /* next input slice */
}
/* Update output */
*r_++ += sum*alpha;
}
}
}
}
/*
3D Input, 3D kernel : convolve given volume with the given kernel.
*/
void THTensor_(validConv3Dptr)(real *r_,
real alpha,
real *t_, int64_t it, int64_t ir, int64_t ic,
real *k_, int64_t kt, int64_t kr, int64_t kc,
int64_t st, int64_t sr, int64_t sc)
{
int64_t ot = (it - kt) / st + 1;
int64_t or = (ir - kr) / sr + 1;
int64_t oc = (ic - kc) / sc + 1;
int64_t zz, xx, yy;
for(zz = 0; zz < ot; zz++)
{
for(yy = 0; yy < or; yy++)
{
for(xx = 0; xx < oc; xx++)
{
/* Dot product in two dimensions... (between input image and the mask) */
real *pi_ = t_ + zz*st*ir*ic + yy*sr*ic + xx*sc;
real *pw_ = k_ + kt*kr*kc - 1;
real sum = 0;
int64_t kz, kx, ky;
for(kz = 0; kz < kt; kz++)
{
for(ky = 0; ky < kr; ky++)
{
for(kx = 0; kx < kc; kx++) {
sum += pi_[kx]*pw_[-kx];
}
pi_ += ic; /* next input line */
pw_ -= kc; /* next mask line */
}
pi_ += (ir-kr)*ic; /* next input slice */
}
/* Update output */
*r_++ += alpha*sum;
}
}
}
}
/*
3D Input, 3D kernel : convolve given volume with the given kernel, full convolution.
*/
void THTensor_(fullConv3Dptr)(real *r_,
real alpha,
real *t_, int64_t it, int64_t ir, int64_t ic,
real *k_, int64_t kt, int64_t kr, int64_t kc,
int64_t st, int64_t sr, int64_t sc)
{
int64_t or = (ir - 1) * sr + kr;
int64_t oc = (ic - 1) * sc + kc;
int64_t zz, xx, yy;
for(zz = 0; zz < it; zz++)
{
for(yy = 0; yy < ir; yy++)
{
for(xx = 0; xx < ic; xx++)
{
/* Outer product in two dimensions... (between input image and the mask) */
real *po_ = r_ + zz*st*or*oc + yy*sr*oc + xx*sc;
real *pw_ = k_;
int64_t kz, kx, ky;
/* printf("Output Plane : %ld,%ld,%ld, input val=%g\n",zz,yy,xx,*t_); */
for(kz = 0; kz < kt; kz++)
{
for(ky = 0; ky < kr; ky++)
{
real z = *t_ * alpha;
for(kx = 0; kx < kc; kx++) {
/* printf("o=%g,k=%g," , po_[kx],pw_[kx]); */
po_[kx] += z * pw_[kx];
/* printf("o=%g " , po_[kx]); */
}
/* printf("\n"); */
po_ += oc; /* next input line */
pw_ += kc; /* next mask line */
}
po_ += (or-kr)*oc; /* next output slice */
/* printf("\n"); */
}
t_++;
}
}
}
}
/*
3D Input, 3D kernel : convolve given volume with the given kernel, full convolution.
*/
void THTensor_(fullXCorr3Dptr)(real *r_,
real alpha,
real *t_, int64_t it, int64_t ir, int64_t ic,
real *k_, int64_t kt, int64_t kr, int64_t kc,
int64_t st, int64_t sr, int64_t sc)
{
int64_t or = (ir - 1) * sr + kr;
int64_t oc = (ic - 1) * sc + kc;
int64_t zz, xx, yy;
for(zz = 0; zz < it; zz++)
{
for(yy = 0; yy < ir; yy++)
{
for(xx = 0; xx < ic; xx++)
{
/* Outer product in two dimensions... (between input image and the mask) */
real *po_ = r_ + zz*st*or*oc + yy*sr*oc + xx*sc;
real *pw_ = k_ + kt*kr*kc -1;
int64_t kz, kx, ky;
for(kz = 0; kz < kt; kz++)
{
for(ky = 0; ky < kr; ky++)
{
real z = *t_ * alpha;
for(kx = 0; kx < kc; kx++) {
po_[kx] += z * pw_[-kx];
}
po_ += oc; /* next input line */
pw_ -= kc; /* next mask line */
}
po_ += (or-kr)*oc; /* next output slice */
}
t_++;
}
}
}
}
/*
3D Input, 3D kernel : convolve given image with the given kernel, valid convolution.
for sr,sc=1 this is equivalent to validXCorr3Dptr, but otherwise it is useful for
calculating derivatives wrt a kernel that is applied with stride sr,sc != 1
*/
void THTensor_(validXCorr3DRevptr)(real *r_,
real alpha,
real *t_, int64_t it, int64_t ir, int64_t ic,
real *k_, int64_t kt, int64_t kr, int64_t kc,
int64_t st, int64_t sr, int64_t sc)
{
int64_t ot = it - (kt - 1) * st;
int64_t or = ir - (kr - 1) * sr;
int64_t oc = ic - (kc - 1) * sc;
int64_t zz, xx, yy;
for(zz = 0; zz < kt; zz++)
{
for(yy = 0; yy < kr; yy++)
{
for(xx = 0; xx < kc; xx++)
{
real *po_ = r_;
real *pi_ = t_ + zz*st*ir*ic + yy*sr*ic + xx*sc;
real z = *k_++ * alpha;
int64_t kz, kx, ky;
for(kz = 0; kz < ot; kz++)
{
for(ky = 0; ky < or; ky++)
{
for(kx = 0; kx < oc; kx++)
po_[kx] += z * pi_[kx];
pi_ += ic;
po_ += oc;
}
pi_ += (ir-or)*ic; /* next input slice */
}
}
}
}
}
void THTensor_(conv2d)(real* output_data,
real alpha,
real* ptr_input, int64_t nInputRows, int64_t nInputCols,
real* ptr_weight, int64_t nKernelRows, int64_t nKernelCols,
int64_t srow, int64_t scol,
const char *vf, const char *xc)
{
THArgCheck(*vf == 'V' || *vf == 'F', 7, "type of convolution can be 'V' or 'F'");
THArgCheck(*xc == 'C' || *xc == 'X', 7, "type of convolution can be 'X' or 'C'");
if (*vf == 'F')
if (*xc == 'X')
THTensor_(fullXCorr2Dptr)(output_data,
alpha,
ptr_input, nInputRows, nInputCols,
ptr_weight, nKernelRows, nKernelCols,
srow, scol);
else
THTensor_(fullConv2Dptr)(output_data,
alpha,
ptr_input, nInputRows, nInputCols,
ptr_weight, nKernelRows, nKernelCols,
srow, scol);
else
if (*xc == 'X')
THTensor_(validXCorr2Dptr)(output_data,
alpha,
ptr_input, nInputRows, nInputCols,
ptr_weight, nKernelRows, nKernelCols,
srow, scol);
else
THTensor_(validConv2Dptr)(output_data,
alpha,
ptr_input, nInputRows, nInputCols,
ptr_weight, nKernelRows, nKernelCols,
srow, scol);
}
void THTensor_(conv3d)(real* output_data,
real alpha,
real* ptr_input, int64_t nInputDepth, int64_t nInputRows, int64_t nInputCols,
real* ptr_weight, int64_t nKernelDepth, int64_t nKernelRows, int64_t nKernelCols,
int64_t sdepth, int64_t srow, int64_t scol,
const char *vf, const char *xc)
{
THArgCheck(*vf == 'V' || *vf == 'F', 7, "type of convolution can be 'V' or 'F'");
THArgCheck(*xc == 'C' || *xc == 'X', 7, "type of convolution can be 'X' or 'C'");
if (*vf == 'F')
if (*xc == 'X')
THTensor_(fullXCorr3Dptr)(output_data,
alpha,
ptr_input, nInputDepth, nInputRows, nInputCols,
ptr_weight, nKernelDepth, nKernelRows, nKernelCols,
sdepth, srow, scol);
else
THTensor_(fullConv3Dptr)(output_data,
alpha,
ptr_input, nInputDepth, nInputRows, nInputCols,
ptr_weight, nKernelDepth, nKernelRows, nKernelCols,
sdepth, srow, scol);
else
if (*xc == 'X')
THTensor_(validXCorr3Dptr)(output_data,
alpha,
ptr_input, nInputDepth, nInputRows, nInputCols,
ptr_weight, nKernelDepth, nKernelRows, nKernelCols,
sdepth, srow, scol);
else
THTensor_(validConv3Dptr)(output_data,
alpha,
ptr_input, nInputDepth, nInputRows, nInputCols,
ptr_weight, nKernelDepth, nKernelRows, nKernelCols,
sdepth, srow, scol);
}
int64_t THTensor_(convsize)(int64_t x, int64_t k, int64_t s, const char* vf)
{
THArgCheck(*vf == 'V' || *vf == 'F', 1, "type of convolution can be 'V' or 'F'");
if (*vf == 'V')
return (x-k)/s + 1;
else
return (x-1)*s + k;
}
/*
3D input, 3D kernel, 4D output
like rank1 update
A <- xx' + beta*A
for sr,sc=1 this is equivalent to conv2Dger, but otherwise it is useful for
calculating derivatives wrt a kernel that is applied with stride sr,sc != 1
*/
void THTensor_(conv2DRevger)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, int64_t srow, int64_t scol)
{
int64_t nInputPlane, nInputRows, nInputCols;
int64_t nKernelPlane, nKernelRows, nKernelCols;
int64_t nOutputPlane, nOutputRows, nOutputCols;
int64_t istride0, kstride0;
THTensor *input;
THTensor *kernel;
real *input_data;
real *weight_data;
real *output_data;
ptrdiff_t nelem;
int64_t k;
THArgCheck(t_->nDimension == 3 , 3, "input: 3D Tensor expected");
THArgCheck(k_->nDimension == 3 , 4, "kernel: 3D Tensor expected");
THArgCheck(srow >= 1, 5, "Stride should be a positive integer");
THArgCheck(scol >= 1, 6, "Stride should be a positive integer");
input = THTensor_(newContiguous)(t_);
kernel = THTensor_(newContiguous)(k_);
nInputPlane = input->size[0];
istride0 = input->stride[0];
nInputRows = input->size[1];
nInputCols = input->size[2];
kstride0 = kernel->stride[0];
nKernelPlane = kernel->size[0];
nKernelRows = kernel->size[1];
nKernelCols = kernel->size[2];
nOutputPlane = nInputPlane * kernel->size[0];
THArgCheck(nInputRows >= nKernelRows && nInputCols >= nKernelCols , 2, "covn2DRevger : Input image is smaller than kernel");
nOutputRows = nInputRows - (nKernelRows - 1) * srow;
nOutputCols = nInputCols - (nKernelCols - 1) * scol;
nelem = THTensor_(nElement)(r_);
THTensor_(resize4d)(r_,nKernelPlane, nInputPlane, nOutputRows, nOutputCols);
input_data = THTensor_(data)(input);
weight_data = THTensor_(data)(kernel);
output_data = THTensor_(data)(r_);
if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_))
{
/*THTensor_(zero)(r_);*/
#pragma omp parallel for private(k)
for (k = 0; k < r_->size[0]*r_->size[1]; k++)
{
real* ptr_output = output_data + k*nOutputCols*nOutputRows;
int64_t l;
for (l = 0; l < nOutputRows*nOutputCols; l++)
ptr_output[l] = 0.0;
}
}
else if (beta != 1)
{
/*THTensor_(mul)(r_, beta);*/
#pragma omp parallel for private(k)
for (k = 0; k < r_->size[0]*r_->size[1]; k++)
{
real* ptr_output = output_data + k*nOutputCols*nOutputRows;
int64_t l;
for (l = 0; l < nOutputRows*nOutputCols; l++)
ptr_output[l] *= beta;
}
}
#pragma omp parallel for private(k)
for(k = 0; k < nKernelPlane; k++)
{
int64_t i;
/* get kernel */
real *ptr_weight = weight_data+k*kstride0;
for(i = 0; i < nInputPlane; i++)
{
/* get output */
real *ptr_output = output_data + k*nInputPlane*nOutputCols*nOutputRows + i*nOutputCols*nOutputRows;
/* get input */
real *ptr_input = input_data+i*istride0;
/* do image, kernel convolution */
THTensor_(validXCorr2DRevptr)(ptr_output,
alpha,
ptr_input, nInputRows, nInputCols,
ptr_weight, nKernelRows, nKernelCols,
srow, scol);
/* Next output plane */
/* output_data += nOutputCols*nOutputRows; */
}
}
THTensor_(free)(input);
THTensor_(free)(kernel);
}
/*
3D input, 3D kernel, 4D output
like rank1 update
A <- xx' + beta*A
for sr,sc=1 this is equivalent to conv2Dger, but otherwise it is useful for
calculating derivatives wrt a kernel that is applied with stride sr,sc != 1
*/
void THTensor_(conv2DRevgerm)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, int64_t srow, int64_t scol)
{
int64_t nbatch, nInputPlane, nInputRows, nInputCols;
int64_t nKernelPlane, nKernelRows, nKernelCols;
int64_t nOutputRows, nOutputCols;
int64_t istride0, kstride0, istride1, kstride1;
THTensor *input;
THTensor *kernel;
real *input_data;
real *weight_data;
real *output_data;
ptrdiff_t nelem;
int64_t k;
THArgCheck(t_->nDimension == 4 , 3, "input: 4D Tensor expected");
THArgCheck(k_->nDimension == 4 , 4, "kernel: 4D Tensor expected");
THArgCheck(srow >= 1, 5, "Stride should be a positive integer");
THArgCheck(scol >= 1, 6, "Stride should be a positive integer");
input = THTensor_(newContiguous)(t_);
kernel = THTensor_(newContiguous)(k_);
istride0 = input->stride[0];
istride1 = input->stride[1];
nbatch = input->size[0];
nInputPlane = input->size[1];
nInputRows = input->size[2];
nInputCols = input->size[3];
kstride0 = kernel->stride[0];
kstride1 = kernel->stride[1];
nKernelPlane = kernel->size[1];
nKernelRows = kernel->size[2];
nKernelCols = kernel->size[3];
THArgCheck(nInputRows >= nKernelRows && nInputCols >= nKernelCols , 2, "conv2DRevger : Input image is smaller than kernel");
THArgCheck(kernel->size[0] == input->size[0] , 2, "conv2DRevger : Input batch and kernel batch is not same size");
nOutputRows = nInputRows - (nKernelRows - 1) * srow;
nOutputCols = nInputCols - (nKernelCols - 1) * scol;
nelem = THTensor_(nElement)(r_);
THTensor_(resize4d)(r_,nKernelPlane, nInputPlane, nOutputRows, nOutputCols);
input_data = THTensor_(data)(input);
weight_data = THTensor_(data)(kernel);
output_data = THTensor_(data)(r_);
if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_))
{
/*THTensor_(zero)(r_);*/
#pragma omp parallel for private(k)
for (k = 0; k < r_->size[0]*r_->size[1]; k++)
{
real* ptr_output = output_data + k*nOutputCols*nOutputRows;
int64_t l;
for (l = 0; l < nOutputRows*nOutputCols; l++)
ptr_output[l] = 0.0;
}
}
else if (beta != 1)
{
/*THTensor_(mul)(r_, beta);*/
#pragma omp parallel for private(k)
for (k = 0; k < r_->size[0]*r_->size[1]; k++)
{
real* ptr_output = output_data + k*nOutputCols*nOutputRows;
int64_t l;
for (l = 0; l < nOutputRows*nOutputCols; l++)
ptr_output[l] *= beta;
}
}
#pragma omp parallel for private(k)
for(k = 0; k < nKernelPlane; k++)
{
int64_t i;
for(i = 0; i < nInputPlane; i++)
{
int64_t p;
for(p = 0; p < nbatch; p++)
{
/* get kernel */
real *ptr_weight = weight_data + p*kstride0 + k*kstride1;
/* get output */
real *ptr_output = output_data + k*nInputPlane*nOutputCols*nOutputRows + i*nOutputCols*nOutputRows;
/* get input */
real *ptr_input = input_data + p*istride0 + i*istride1;
/* do image, kernel convolution */
THTensor_(validXCorr2DRevptr)(ptr_output,
alpha,
ptr_input, nInputRows, nInputCols,
ptr_weight, nKernelRows, nKernelCols,
srow, scol);
/* Next output plane */
/* output_data += nOutputCols*nOutputRows; */
}
}
}
THTensor_(free)(input);
THTensor_(free)(kernel);
}
/*
3D input, 3D kernel, 4D output
like rank1 update
A <- xx' + beta*A
*/
void THTensor_(conv2Dger)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, int64_t srow, int64_t scol, const char *vf, const char *xc)
{
int64_t nInputPlane, nInputRows, nInputCols;
int64_t nKernelPlane, nKernelRows, nKernelCols;
int64_t nOutputPlane, nOutputRows, nOutputCols;
int64_t istride0, kstride0;
THTensor *input;
THTensor *kernel;
real *input_data;
real *weight_data;
real *output_data;
ptrdiff_t nelem;
int64_t k;
THArgCheck(t_->nDimension == 3 , 3, "input: 3D Tensor expected");
THArgCheck(k_->nDimension == 3 , 4, "kernel: 3D Tensor expected");
THArgCheck(srow >= 1, 5, "Stride should be a positive integer");
THArgCheck(scol >= 1, 6, "Stride should be a positive integer");
THArgCheck(*vf == 'V' || *vf == 'F', 7, "type of convolution can 'V' or 'F'");
THArgCheck(*xc == 'C' || *xc == 'X', 7, "type of convolution can 'X' or 'C'");
input = THTensor_(newContiguous)(t_);
kernel = THTensor_(newContiguous)(k_);
nInputPlane = input->size[0];
istride0 = input->stride[0];
nInputRows = input->size[1];
nInputCols = input->size[2];
kstride0 = kernel->stride[0];
nKernelPlane = kernel->size[0];
nKernelRows = kernel->size[1];
nKernelCols = kernel->size[2];
nOutputPlane = nInputPlane * kernel->size[0];
THArgCheck((nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv2Dger : Input image is smaller than kernel");
if (*vf == 'F') {
nOutputRows = (nInputRows - 1) * srow + nKernelRows;
nOutputCols = (nInputCols - 1) * scol + nKernelCols;
} else { /* valid */
nOutputRows = (nInputRows - nKernelRows) / srow + 1;
nOutputCols = (nInputCols - nKernelCols) / scol + 1;
}
nelem = THTensor_(nElement)(r_);
THTensor_(resize4d)(r_, nKernelPlane, nInputPlane, nOutputRows, nOutputCols);
input_data = THTensor_(data)(input);
weight_data = THTensor_(data)(kernel);
output_data = THTensor_(data)(r_);
if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_))
{
/*THTensor_(zero)(r_);*/
#pragma omp parallel for private(k)
for (k = 0; k < r_->size[0]*r_->size[1]; k++)
{
real* ptr_output = output_data + k*nOutputCols*nOutputRows;
int64_t l;
for (l = 0; l < nOutputRows*nOutputCols; l++)
ptr_output[l] = 0.0;
}
}
else if (beta != 1)
{
/*THTensor_(mul)(r_, beta);*/
#pragma omp parallel for private(k)
for (k = 0; k < r_->size[0]*r_->size[1]; k++)
{
real* ptr_output = output_data + k*nOutputCols*nOutputRows;
int64_t l;
for (l = 0; l < nOutputRows*nOutputCols; l++)
ptr_output[l] *= beta;
}
}
#pragma omp parallel for private(k)
for(k = 0; k < nKernelPlane; k++)
{
int64_t i;
/* get kernel */
real *ptr_weight = weight_data+k*kstride0;
for(i = 0; i < nInputPlane; i++)
{
/* get output */
real *ptr_output = output_data + k*nInputPlane*nOutputCols*nOutputRows + i*nOutputCols*nOutputRows;
/* get input */
real *ptr_input = input_data+i*istride0;
/* do image, kernel convolution */
if (*vf == 'F')
if (*xc == 'X')
THTensor_(fullXCorr2Dptr)(ptr_output,
alpha,
ptr_input, nInputRows, nInputCols,
ptr_weight, nKernelRows, nKernelCols,
srow, scol);
else
THTensor_(fullConv2Dptr)(ptr_output,
alpha,
ptr_input, nInputRows, nInputCols,
ptr_weight, nKernelRows, nKernelCols,
srow, scol);
else
if (*xc == 'X')
THTensor_(validXCorr2Dptr)(ptr_output,
alpha,
ptr_input, nInputRows, nInputCols,
ptr_weight, nKernelRows, nKernelCols,
srow, scol);
else
THTensor_(validConv2Dptr)(ptr_output,
alpha,
ptr_input, nInputRows, nInputCols,
ptr_weight, nKernelRows, nKernelCols,
srow, scol);
/* Next output plane */
/* output_data += nOutputCols*nOutputRows; */
}
}
THTensor_(free)(input);
THTensor_(free)(kernel);
}
/*
3D input, 4D kernel, 3D output
matrix vector product like
y <- Ax + beta*y
*/
void THTensor_(conv2Dmv)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, int64_t srow, int64_t scol, const char *vf, const char *xc)
{
int64_t nInputPlane, nInputRows, nInputCols;
int64_t nKernelRows, nKernelCols;
int64_t nOutputPlane, nOutputRows, nOutputCols;
int64_t istride0, kstride0, kstride1;
THTensor *input;
THTensor* kernel;
real *input_data;
real *weight_data;
real *output_data;
ptrdiff_t nelem;
int64_t k;
THArgCheck(t_->nDimension == 3 , 3, "input: 3D Tensor expected");
THArgCheck(k_->nDimension == 4 , 4, "kernel: 4D Tensor expected");
THArgCheck(srow >= 1, 5, "Stride should be a positive integer");
THArgCheck(scol >= 1, 6, "Stride should be a positive integer");
THArgCheck(*vf == 'V' || *vf == 'F', 7, "type of convolution can 'V' or 'F'");
THArgCheck(*xc == 'C' || *xc == 'X', 7, "type of convolution can 'X' or 'C'");
input = THTensor_(newContiguous)(t_);
if (!(k_->stride[3] == 1) || !(k_->stride[2] == k_->size[3])) {
kernel = THTensor_(newContiguous)(k_);
} else {
THTensor_(retain)(k_);
kernel = k_;
}
nInputPlane = input->size[0];
istride0 = input->stride[0];
nInputRows = input->size[1];
nInputCols = input->size[2];
kstride0 = kernel->stride[0];
kstride1 = kernel->stride[1];
nKernelRows = kernel->size[2];
nKernelCols = kernel->size[3];
nOutputPlane = kernel->size[0];
THArgCheck(kernel->size[1] == nInputPlane, 2, "invalid number of input planes");
THArgCheck( (nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv2Dmv : Input image is smaller than kernel");
if (*vf == 'F') {
nOutputRows = (nInputRows - 1) * srow + nKernelRows;
nOutputCols = (nInputCols - 1) * scol + nKernelCols;
} else { /* valid */
nOutputRows = (nInputRows - nKernelRows) / srow + 1;
nOutputCols = (nInputCols - nKernelCols) / scol + 1;
}
nelem = THTensor_(nElement)(r_);
THTensor_(resize3d)(r_, nOutputPlane, nOutputRows, nOutputCols);
input_data = THTensor_(data)(input);
weight_data = THTensor_(data)(kernel);
output_data = THTensor_(data)(r_);
if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_))
{
/*THTensor_(zero)(r_);*/
#pragma omp parallel for private(k)
for (k = 0; k < r_->size[0]; k++)
{
real* ptr_output = output_data + k*nOutputCols*nOutputRows;
int64_t l;
for (l = 0; l < nOutputRows*nOutputCols; l++)
ptr_output[l] = 0.0;
}
}
else if (beta != 1)
{
/*THTensor_(mul)(r_, beta);*/
#pragma omp parallel for private(k)
for (k = 0; k < r_->size[0]; k++)
{
real* ptr_output = output_data + k*nOutputCols*nOutputRows;
int64_t l;
for (l = 0; l < nOutputRows*nOutputCols; l++)
ptr_output[l] *= beta;
}
}
#pragma omp parallel for private(k)
for(k = 0; k < nOutputPlane; k++)
{
int64_t i;
/* get output */
real *ptr_output = output_data + k*nOutputCols*nOutputRows;
for(i = 0; i < nInputPlane; i++)
{
/* get kernel */
real *ptr_weight = weight_data + k*kstride0 + i*kstride1;
/* get input */
real *ptr_input = input_data + i*istride0;
/* do image, kernel convolution */
if (*vf == 'F')
if (*xc == 'X')
THTensor_(fullXCorr2Dptr)(ptr_output,
alpha,
ptr_input, nInputRows, nInputCols,
ptr_weight, nKernelRows, nKernelCols,
srow, scol);
else
THTensor_(fullConv2Dptr)(ptr_output,
alpha,
ptr_input, nInputRows, nInputCols,
ptr_weight, nKernelRows, nKernelCols,
srow, scol);
else
if (*xc == 'X')
THTensor_(validXCorr2Dptr)(ptr_output,
alpha,
ptr_input, nInputRows, nInputCols,
ptr_weight, nKernelRows, nKernelCols,
srow, scol);
else
THTensor_(validConv2Dptr)(ptr_output,
alpha,
ptr_input, nInputRows, nInputCols,
ptr_weight, nKernelRows, nKernelCols,
srow, scol);
}
/* Next output plane */
/* output_data += nOutputCols*nOutputRows;*/
}
THTensor_(free)(input);
THTensor_(free)(kernel);
}
/*
3D input, 4D kernel, 3D output
matrix vector product like
y <- Ax + beta*y
*/
void THTensor_(conv2Dmm)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, int64_t srow, int64_t scol, const char *vf, const char *xc)
{
int64_t nInputPlane, nInputRows, nInputCols;
int64_t nKernelRows, nKernelCols;
int64_t nOutputPlane, nOutputRows, nOutputCols;
int64_t kstride0, kstride1;
THTensor *input;
THTensor* kernel;
int64_t nbatch;
ptrdiff_t nelem;
real *input_data;
real *weight_data;
real *output_data;
int64_t p;
THArgCheck(t_->nDimension == 4 , 3, "input: 4D Tensor expected");
THArgCheck(k_->nDimension == 4 , 4, "kernel: 4D Tensor expected");
THArgCheck(srow >= 1, 5, "Stride should be a positive integer");
THArgCheck(scol >= 1, 6, "Stride should be a positive integer");
THArgCheck(*vf == 'V' || *vf == 'F', 7, "type of convolution can 'V' or 'F'");
THArgCheck(*xc == 'C' || *xc == 'X', 7, "type of convolution can 'X' or 'C'");
input = THTensor_(newContiguous)(t_);
if (!(k_->stride[3] == 1) || !(k_->stride[2] == k_->size[3])) {
kernel = THTensor_(newContiguous)(k_);
} else {
THTensor_(retain)(k_);
kernel = k_;
}
nbatch = input->size[0];
nInputPlane = input->size[1];
nInputRows = input->size[2];
nInputCols = input->size[3];
kstride0 = kernel->stride[0];
kstride1 = kernel->stride[1];
nKernelRows = kernel->size[2];
nKernelCols = kernel->size[3];
nOutputPlane = kernel->size[0];
THArgCheck(kernel->size[1] == nInputPlane, 2, "invalid number of input planes");
THArgCheck( (nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv2Dmv : Input image is smaller than kernel");
if (*vf == 'F') {
nOutputRows = (nInputRows - 1) * srow + nKernelRows;
nOutputCols = (nInputCols - 1) * scol + nKernelCols;
} else { /* valid */
nOutputRows = (nInputRows - nKernelRows) / srow + 1;
nOutputCols = (nInputCols - nKernelCols) / scol + 1;
}
nelem = THTensor_(nElement)(r_);
THTensor_(resize4d)(r_, nbatch, nOutputPlane, nOutputRows, nOutputCols);
input_data = THTensor_(data)(input);
weight_data = THTensor_(data)(kernel);
output_data = THTensor_(data)(r_);
if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_))
{
/*THTensor_(zero)(r_);*/
#pragma omp parallel for private(p)
for (p=0; p < r_->size[0]; p++)
{
int64_t k;
for (k = 0; k < r_->size[1]; k++)
{
real* ptr_output = output_data + p*nOutputPlane*nOutputRows*nOutputCols + k*nOutputCols*nOutputRows;
int64_t l;
for (l = 0; l < nOutputRows*nOutputCols; l++)
ptr_output[l] = 0.0;
}
}
}
else if (beta != 1)
{
/*THTensor_(mul)(r_, beta);*/
#pragma omp parallel for private(p)
for(p=0; p < r_->size[0]; p++)
{
int64_t k;
for (k = 0; k < r_->size[1]; k++)
{
real* ptr_output = output_data + p*nOutputPlane*nOutputRows*nOutputCols + k*nOutputCols*nOutputRows;
int64_t l;
for (l = 0; l < nOutputRows*nOutputCols; l++)
ptr_output[l] *= beta;
}
}
}
#pragma omp parallel for private(p)
for(p=0; p < nbatch; p++)
{
int64_t k;
for(k = 0; k < nOutputPlane; k++)
{
int64_t i;
/* get output */
real *ptr_output = output_data + p*nOutputPlane*nOutputCols*nOutputRows + k*nOutputCols*nOutputRows;
for(i = 0; i < nInputPlane; i++)
{
/* get kernel */
real *ptr_weight = weight_data + k*kstride0 + i*kstride1;
/* get input */
real *ptr_input = input_data + p*nInputPlane*nInputRows*nInputCols + i*nInputRows*nInputCols;
/* do image, kernel convolution */
if (*vf == 'F')
if (*xc == 'X')
THTensor_(fullXCorr2Dptr)(ptr_output,
alpha,
ptr_input, nInputRows, nInputCols,
ptr_weight, nKernelRows, nKernelCols,
srow, scol);
else
THTensor_(fullConv2Dptr)(ptr_output,
alpha,
ptr_input, nInputRows, nInputCols,
ptr_weight, nKernelRows, nKernelCols,
srow, scol);
else
if (*xc == 'X')
THTensor_(validXCorr2Dptr)(ptr_output,
alpha,
ptr_input, nInputRows, nInputCols,
ptr_weight, nKernelRows, nKernelCols,
srow, scol);
else
THTensor_(validConv2Dptr)(ptr_output,
alpha,
ptr_input, nInputRows, nInputCols,
ptr_weight, nKernelRows, nKernelCols,
srow, scol);
}
/* Next output plane */
/* output_data += nOutputCols*nOutputRows;*/
}
}
THTensor_(free)(input);
THTensor_(free)(kernel);
}
/*
2D input, 2D kernel, 2D output
scalar multiplication like
y <- x*y + beta*y
*/
void THTensor_(conv2Dmul)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, int64_t srow, int64_t scol, const char *vf, const char *xc)
{
THTensor *input;
THTensor* kernel;
int64_t nInputRows;
int64_t nInputCols;
int64_t nKernelRows;
int64_t nKernelCols;
int64_t nOutputRows, nOutputCols;
real *ptr_input;
real *ptr_weight;
real *output_data;
ptrdiff_t nelem;
THArgCheck(t_->nDimension == 2 , 3, "input: 2D Tensor expected");
THArgCheck(k_->nDimension == 2 , 4, "kernel: 2D Tensor expected");
THArgCheck(srow >= 1, 5, "Stride should be a positive integer");
THArgCheck(scol >= 1, 6, "Stride should be a positive integer");
input = THTensor_(newContiguous)(t_);
kernel = THTensor_(newContiguous)(k_);
nInputRows = input->size[0];
nInputCols = input->size[1];
nKernelRows = kernel->size[0];
nKernelCols = kernel->size[1];
THArgCheck((nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv2Dmul : Input image is smaller than kernel");
nOutputRows = THTensor_(convsize)(nInputRows, nKernelRows, srow, vf);
nOutputCols = THTensor_(convsize)(nInputCols, nKernelCols, scol, vf);
nelem = THTensor_(nElement)(r_);
THTensor_(resize2d)(r_, nOutputRows, nOutputCols);
if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_))
THTensor_(zero)(r_);
else if (beta != 1)
THTensor_(mul)(r_, r_, beta);
ptr_input = THTensor_(data)(input);
ptr_weight = THTensor_(data)(kernel);
output_data = THTensor_(data)(r_);
/* do image, kernel convolution */
THTensor_(conv2d)(output_data,
alpha,
ptr_input, nInputRows, nInputCols,
ptr_weight, nKernelRows, nKernelCols,
srow, scol, vf, xc);
THTensor_(free)(input);
THTensor_(free)(kernel);
}
/*
3D input, 3D kernel, 3D output
component wise multiplication like
y <- y.*x + beta*y
*/
void THTensor_(conv2Dcmul)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, int64_t srow, int64_t scol, const char *vf, const char *xc)
{
int64_t nInputPlane, nInputRows, nInputCols;
int64_t nKernelRows, nKernelCols;
int64_t nOutputPlane, nOutputRows, nOutputCols;
int64_t istride0, kstride0;
THTensor *input;
THTensor *kernel;
real *input_data;
real *weight_data;
real *output_data;
ptrdiff_t nelem;
int64_t k;
THArgCheck(t_->nDimension == 3 , 3, "input: 3D Tensor expected");
THArgCheck(k_->nDimension == 3 , 4, "kernel: 3D Tensor expected");
THArgCheck(srow >= 1, 5, "Stride should be a positive integer");
THArgCheck(scol >= 1, 6, "Stride should be a positive integer");
input = THTensor_(newContiguous)(t_);
kernel = THTensor_(newContiguous)(k_);
istride0 = input->stride[0];
nInputPlane = input->size[0];
nInputRows = input->size[1];
nInputCols = input->size[2];
kstride0 = kernel->stride[0];
nOutputPlane = kernel->size[0];
nKernelRows = kernel->size[1];
nKernelCols = kernel->size[2];
THArgCheck(nOutputPlane == nInputPlane, 2, "invalid number of input/kernel planes");
THArgCheck( (nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv2Dcmul : Input image is smaller than kernel");
nOutputRows = THTensor_(convsize)(nInputRows, nKernelRows, srow, vf);
nOutputCols = THTensor_(convsize)(nInputCols, nKernelCols, scol, vf);
nelem = THTensor_(nElement)(r_);
THTensor_(resize3d)(r_, nOutputPlane, nOutputRows, nOutputCols);
if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_))
{
THTensor_(zero)(r_);
}
else if (beta != 1)
THTensor_(mul)(r_, r_, beta);
input_data = THTensor_(data)(input);
weight_data = THTensor_(data)(kernel);
output_data = THTensor_(data)(r_);
for(k = 0; k < nOutputPlane; k++)
{
/* get kernel */
real *ptr_weight = weight_data + k*kstride0;
/* get input */
real *ptr_input = input_data + k*istride0;
/* do image, kernel convolution */
THTensor_(conv2d)(output_data,
alpha,
ptr_input, nInputRows, nInputCols,
ptr_weight, nKernelRows, nKernelCols,
srow, scol, vf, xc);
/* Next output plane */
output_data += nOutputCols*nOutputRows;
}
THTensor_(free)(input);
THTensor_(free)(kernel);
}
/*
3D input, 3D kernel, 3D output
component wise multiplication like with a permutation map
y <- y.*x + beta*y
*/
void THTensor_(conv2Dmap)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, THTensor *map, int64_t srow, int64_t scol, const char *vf, const char *xc)
{
int64_t nInputPlane, nInputRows, nInputCols;
int64_t nKernelRows, nKernelCols;
int64_t nOutputPlane, nOutputRows, nOutputCols;
int64_t istride0, kstride0;
THTensor *input;
THTensor* kernel;
real *input_data;
real *weight_data;
real *output_data;
int64_t nmaps;
ptrdiff_t nelem;
int64_t k;
THArgCheck(t_->nDimension == 3 , 3, "input: 3D Tensor expected");
THArgCheck(k_->nDimension == 3 , 4, "kernel: 3D Tensor expected");
THArgCheck(map->nDimension == 2 , 4, "map: 2D Tensor expected");
THArgCheck(srow >= 1, 6, "Stride should be a positive integer");
THArgCheck(scol >= 1, 7, "Stride should be a positive integer");
input = THTensor_(newContiguous)(t_);
kernel = THTensor_(newContiguous)(k_);
istride0 = input->stride[0];
nInputPlane = input->size[0];
nInputRows = input->size[1];
nInputCols = input->size[2];
kstride0 = kernel->stride[0];
nOutputPlane = kernel->size[0];
nKernelRows = kernel->size[1];
nKernelCols = kernel->size[2];
THArgCheck(nOutputPlane == nInputPlane, 2, "invalid number of input/kernel planes");
THArgCheck( (nInputRows >= nKernelRows && nInputCols >= nKernelCols)
|| *vf == 'F', 2, "conv2Dmap : Input image is smaller than kernel");
nOutputRows = THTensor_(convsize)(nInputRows, nKernelRows, srow, vf);
nOutputCols = THTensor_(convsize)(nInputCols, nKernelCols, scol, vf);
nelem = THTensor_(nElement)(r_);
THTensor_(resize3d)(r_, nOutputPlane, nOutputRows, nOutputCols);
if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_))
{
THTensor_(zero)(r_);
}
else if (beta != 1)
THTensor_(mul)(r_, r_, beta);
input_data = THTensor_(data)(input);
weight_data = THTensor_(data)(kernel);
output_data = THTensor_(data)(r_);
nmaps = map->size[0];
for(k = 0; k < nmaps; k++)
{
/* get indices */
int64_t from = (int64_t)THTensor_(get2d)(map,k,0)-1;
int64_t to = (int64_t)THTensor_(get2d)(map,k,1)-1;
/* get kernel */
real *ptr_weight = weight_data + k*kstride0;
/* get input */
real *ptr_input = input_data + from*istride0;
/* get output */
real *ptr_output = output_data + to*nOutputRows*nOutputCols;
/* do image, kernel convolution */
THTensor_(conv2d)(ptr_output,
alpha,
ptr_input, nInputRows, nInputCols,
ptr_weight, nKernelRows, nKernelCols,
srow, scol, vf, xc);
}
THTensor_(free)(input);
THTensor_(free)(kernel);
}
/*
4D input, 4D kernel, 5D output
like rank1 update
A <- xx' + beta*A
for sr,sc=1 this is equivalent to xcorr2Dger, but otherwise it is useful for
calculating derivatives wrt a kernel that is applied with stride sr,sc != 1
*/
void THTensor_(conv3DRevger)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_,
int64_t sdepth, int64_t srow, int64_t scol)
{
int64_t nInputPlane, nInputDepth, nInputRows, nInputCols;
int64_t nKernelPlane, nKernelDepth, nKernelRows, nKernelCols;
int64_t nOutputPlane, nOutputDepth, nOutputRows, nOutputCols;
int64_t istride0, kstride0;
THTensor *input;
THTensor *kernel;
real *input_data;
real *weight_data;
real *output_data;
ptrdiff_t nelem;
int64_t k, i;
THArgCheck(t_->nDimension == 4 , 3, "input: 4D Tensor expected");
THArgCheck(k_->nDimension == 4 , 4, "kernel: 4D Tensor expected");
THArgCheck(sdepth >= 1, 5, "Stride should be a positive integer");
THArgCheck(srow >= 1, 6, "Stride should be a positive integer");
THArgCheck(scol >= 1, 7, "Stride should be a positive integer");
input = THTensor_(newContiguous)(t_);
kernel = THTensor_(newContiguous)(k_);
nInputPlane = input->size[0];
istride0 = input->stride[0];
nInputDepth = input->size[1];
nInputRows = input->size[2];
nInputCols = input->size[3];
kstride0 = kernel->stride[0];
nKernelPlane = kernel->size[0];
nKernelDepth= kernel->size[1];
nKernelRows = kernel->size[2];
nKernelCols = kernel->size[3];
nOutputPlane = nInputPlane * kernel->size[0];
THArgCheck(nInputDepth >= nKernelDepth && nInputRows >= nKernelRows && nInputCols >= nKernelCols , 2, "conv3DRevger : Input image is smaller than kernel");
nOutputDepth = nInputDepth - (nKernelDepth - 1) * sdepth;
nOutputRows = nInputRows - (nKernelRows - 1) * srow;
nOutputCols = nInputCols - (nKernelCols - 1) * scol;
nelem = THTensor_(nElement)(r_);
THTensor_(resize5d)(r_,nKernelPlane, nInputPlane, nOutputDepth, nOutputRows, nOutputCols);
if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_))
{
THTensor_(zero)(r_);
}
else if (beta != 1)
THTensor_(mul)(r_, r_, beta);
input_data = THTensor_(data)(input);
weight_data = THTensor_(data)(kernel);
output_data = THTensor_(data)(r_);
for(k = 0; k < nKernelPlane; k++)
{
/* get kernel */
real *ptr_weight = weight_data+k*kstride0;
for(i = 0; i < nInputPlane; i++)
{
/* get input */
real *ptr_input = input_data+i*istride0;
/* do image, kernel convolution */
THTensor_(validXCorr3DRevptr)(output_data,
alpha,
ptr_input, nInputDepth, nInputRows, nInputCols,
ptr_weight, nKernelDepth, nKernelRows, nKernelCols,
sdepth, srow, scol);
/* Next output plane */
output_data += nOutputDepth*nOutputCols*nOutputRows;
}
}
THTensor_(free)(input);
THTensor_(free)(kernel);
}
/*
4D input, 4D kernel, 5D output
like rank1 update
A <- xx' + beta*A
*/
void THTensor_(conv3Dger)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_,
int64_t sdepth, int64_t srow, int64_t scol, const char *vf, const char *xc)
{
int64_t nInputPlane, nInputDepth, nInputRows, nInputCols;
int64_t nKernelPlane, nKernelDepth, nKernelRows, nKernelCols;
int64_t nOutputPlane, nOutputDepth, nOutputRows, nOutputCols;
int64_t istride0, kstride0;
THTensor *input;
THTensor *kernel;
real *input_data;
real *weight_data;
real *output_data;
ptrdiff_t nelem;
int64_t k, i;
THArgCheck(t_->nDimension == 4 , 3, "input: 4D Tensor expected");
THArgCheck(k_->nDimension == 4 , 4, "kernel: 4D Tensor expected");
THArgCheck(sdepth >= 1, 5, "Stride should be a positive integer");
THArgCheck(srow >= 1, 6, "Stride should be a positive integer");
THArgCheck(scol >= 1, 7, "Stride should be a positive integer");
THArgCheck(*vf == 'V' || *vf == 'F', 8, "type of convolution can 'V' or 'F'");
THArgCheck(*xc == 'C' || *xc == 'X', 8, "type of convolution can 'X' or 'C'");
input = THTensor_(newContiguous)(t_);
kernel = THTensor_(newContiguous)(k_);
nInputPlane = input->size[0];
istride0 = input->stride[0];
nInputDepth = input->size[1];
nInputRows = input->size[2];
nInputCols = input->size[3];
kstride0 = kernel->stride[0];
nKernelPlane = kernel->size[0];
nKernelDepth = kernel->size[1];
nKernelRows = kernel->size[2];
nKernelCols = kernel->size[3];
nOutputPlane = nInputPlane * kernel->size[0];
THArgCheck((nInputDepth >= nKernelDepth
&& nInputRows >= nKernelRows
&& nInputCols >= nKernelCols)
|| *vf == 'F', 2, "conv3Dger : Input image is smaller than kernel");
nOutputDepth = THTensor_(convsize)(nInputDepth, nKernelDepth, sdepth, vf);
nOutputRows = THTensor_(convsize)(nInputRows, nKernelRows, srow, vf);
nOutputCols = THTensor_(convsize)(nInputCols, nKernelCols, scol, vf);
nelem = THTensor_(nElement)(r_);
THTensor_(resize5d)(r_,nKernelPlane, nInputPlane, nOutputDepth, nOutputRows, nOutputCols);
if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_))
{
THTensor_(zero)(r_);
}
else if (beta != 1)
THTensor_(mul)(r_, r_, beta);
input_data = THTensor_(data)(input);
weight_data = THTensor_(data)(kernel);
output_data = THTensor_(data)(r_);
for(k = 0; k < nKernelPlane; k++)
{
/* get kernel */
real *ptr_weight = weight_data+k*kstride0;
for(i = 0; i < nInputPlane; i++)
{
/* get input */
real *ptr_input = input_data+i*istride0;
/* do image, kernel convolution */
THTensor_(conv3d)(output_data,
alpha,
ptr_input, nInputDepth, nInputRows, nInputCols,
ptr_weight, nKernelDepth, nKernelRows, nKernelCols,
sdepth, srow, scol, vf, xc);
/* Next output plane */
output_data += nOutputDepth*nOutputCols*nOutputRows;
}
}
THTensor_(free)(input);
THTensor_(free)(kernel);
}
/*
4D input, 5D kernel, 4D output
matrix vector product like
y <- Ax + beta*y
*/
void THTensor_(conv3Dmv)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_,
int64_t sdepth, int64_t srow, int64_t scol, const char *vf, const char *xc)
{
int64_t nInputPlane, nInputDepth, nInputRows, nInputCols;
int64_t nKernelDepth, nKernelRows, nKernelCols;
int64_t nOutputPlane, nOutputDepth, nOutputRows, nOutputCols;
int64_t istride0, kstride0, kstride1;
THTensor *input;
THTensor *kernel;
real *input_data;
real *weight_data;
real *output_data;
ptrdiff_t nelem;
int64_t k, i;
THArgCheck(t_->nDimension == 4 , 3, "input: 4D Tensor expected");
THArgCheck(k_->nDimension == 5 , 4, "kernel: 5D Tensor expected");
THArgCheck(sdepth >= 1, 5, "Stride should be a positive integer");
THArgCheck(srow >= 1, 6, "Stride should be a positive integer");
THArgCheck(scol >= 1, 7, "Stride should be a positive integer");
THArgCheck(*vf == 'V' || *vf == 'F', 8, "type of convolution can 'V' or 'F'");
THArgCheck(*xc == 'C' || *xc == 'X', 8, "type of convolution can 'X' or 'C'");
input = THTensor_(newContiguous)(t_);
if (!(k_->stride[4] == 1) || !(k_->stride[3] == k_->size[4])) {
kernel = THTensor_(newContiguous)(k_);
} else {
THTensor_(retain)(k_);
kernel = k_;
}
nInputPlane = input->size[0];
istride0 = input->stride[0];
nInputDepth = input->size[1];
nInputRows = input->size[2];
nInputCols = input->size[3];
kstride0 = kernel->stride[0];
kstride1 = kernel->stride[1];
nKernelDepth = kernel->size[2];
nKernelRows = kernel->size[3];
nKernelCols = kernel->size[4];
nOutputPlane = kernel->size[0];
THArgCheck(kernel->size[1] == nInputPlane, 2, "invalid number of input planes");
THArgCheck( (nInputDepth >= nKernelDepth && nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv3Dmv : Input image is smaller than kernel");
nOutputDepth = THTensor_(convsize)(nInputDepth, nKernelDepth, sdepth, vf);
nOutputRows = THTensor_(convsize)(nInputRows, nKernelRows, srow, vf);
nOutputCols = THTensor_(convsize)(nInputCols, nKernelCols, scol, vf);
nelem = THTensor_(nElement)(r_);
THTensor_(resize4d)(r_, nOutputPlane, nOutputDepth, nOutputRows, nOutputCols);
if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_))
{
THTensor_(zero)(r_);
}
else if (beta != 1)
THTensor_(mul)(r_, r_, beta);
input_data = THTensor_(data)(input);
weight_data = THTensor_(data)(kernel);
output_data = THTensor_(data)(r_);
for(k = 0; k < nOutputPlane; k++)
{
for(i = 0; i < nInputPlane; i++)
{
/* get kernel */
real *ptr_weight = weight_data + k*kstride0 + i*kstride1;
/* get input */
real *ptr_input = input_data + i*istride0;
/* do image, kernel convolution */
THTensor_(conv3d)(output_data,
alpha,
ptr_input, nInputDepth, nInputRows, nInputCols,
ptr_weight, nKernelDepth, nKernelRows, nKernelCols,
sdepth, srow, scol, vf, xc);
}
/* Next output plane */
output_data += nOutputDepth*nOutputCols*nOutputRows;
}
THTensor_(free)(input);
THTensor_(free)(kernel);
}
/*
3D input, 3D kernel, 3D output
scalar multiplication like
y <- x*y + beta*y
*/
void THTensor_(conv3Dmul)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_,
int64_t sdepth, int64_t srow, int64_t scol, const char *vf, const char *xc)
{
THTensor *input;
THTensor* kernel;
int64_t nInputDepth;
int64_t nInputRows;
int64_t nInputCols;
int64_t nKernelDepth;
int64_t nKernelRows;
int64_t nKernelCols;
int64_t nOutputDepth, nOutputRows, nOutputCols;
real *ptr_input;
real *ptr_weight;
real *output_data;
ptrdiff_t nelem;
THArgCheck(t_->nDimension == 3 , 3, "input: 3D Tensor expected");
THArgCheck(k_->nDimension == 3 , 4, "kernel: 3D Tensor expected");
THArgCheck(sdepth >= 1, 5, "Stride should be a positive integer");
THArgCheck(srow >= 1, 6, "Stride should be a positive integer");
THArgCheck(scol >= 1, 7, "Stride should be a positive integer");
THArgCheck(*vf == 'V' || *vf == 'F', 8, "type of convolution can 'V' or 'F'");
THArgCheck(*xc == 'C' || *xc == 'X', 8, "type of convolution can 'X' or 'C'");
input = THTensor_(newContiguous)(t_);
kernel = THTensor_(newContiguous)(k_);
nInputDepth = input->size[0];
nInputRows = input->size[1];
nInputCols = input->size[2];
nKernelDepth = kernel->size[0];
nKernelRows = kernel->size[1];
nKernelCols = kernel->size[2];
THArgCheck((nInputDepth >= nKernelDepth && nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv3Dmul : Input image is smaller than kernel");
nOutputDepth = THTensor_(convsize)(nInputDepth, nKernelDepth, sdepth, vf);
nOutputRows = THTensor_(convsize)(nInputRows, nKernelRows, srow, vf);
nOutputCols = THTensor_(convsize)(nInputCols, nKernelCols, scol, vf);
nelem = THTensor_(nElement)(r_);
THTensor_(resize3d)(r_, nOutputDepth, nOutputRows, nOutputCols);
if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_))
THTensor_(zero)(r_);
else if (beta != 1)
THTensor_(mul)(r_, r_, beta);
ptr_input = THTensor_(data)(input);
ptr_weight = THTensor_(data)(kernel);
output_data = THTensor_(data)(r_);
/* do image, kernel convolution */
THTensor_(conv3d)(output_data,
alpha,
ptr_input, nInputDepth, nInputRows, nInputCols,
ptr_weight, nKernelDepth, nKernelRows, nKernelCols,
sdepth, srow, scol, vf, xc);
THTensor_(free)(input);
THTensor_(free)(kernel);
}
/*
4D input, 4D kernel, 4D output
component wise multiplication like
y <- y.*x + beta*y
*/
void THTensor_(conv3Dcmul)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_,
int64_t sdepth, int64_t srow, int64_t scol, const char *vf, const char *xc)
{
int64_t nInputPlane, nInputDepth, nInputRows, nInputCols;
int64_t nKernelDepth, nKernelRows, nKernelCols;
int64_t nOutputPlane, nOutputDepth, nOutputRows, nOutputCols;
int64_t istride0, kstride0;
THTensor *input;
THTensor *kernel;
real *input_data;
real *weight_data;
real *output_data;
ptrdiff_t nelem;
int64_t k;
THArgCheck(t_->nDimension == 4 , 3, "input: 3D Tensor expected");
THArgCheck(k_->nDimension == 4 , 4, "kernel: 3D Tensor expected");
THArgCheck(srow >= 1, 5, "Stride should be a positive integer");
THArgCheck(scol >= 1, 6, "Stride should be a positive integer");
THArgCheck(*vf == 'V' || *vf == 'F', 7, "type of convolution can 'V' or 'F'");
THArgCheck(*xc == 'C' || *xc == 'X', 7, "type of convolution can 'X' or 'C'");
input = THTensor_(newContiguous)(t_);
kernel = THTensor_(newContiguous)(k_);
istride0 = input->stride[0];
nInputPlane = input->size[0];
nInputDepth = input->size[1];
nInputRows = input->size[2];
nInputCols = input->size[3];
kstride0 = kernel->stride[0];
nOutputPlane = kernel->size[0];
nKernelDepth = kernel->size[1];
nKernelRows = kernel->size[2];
nKernelCols = kernel->size[3];
THArgCheck(nOutputPlane == nInputPlane, 2, "invalid number of input/kernel planes");
THArgCheck( (nInputDepth >= nKernelDepth && nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv3Dcmul : Input image is smaller than kernel");
nOutputDepth = THTensor_(convsize)(nInputDepth, nKernelDepth, sdepth, vf);
nOutputRows = THTensor_(convsize)(nInputRows, nKernelRows, srow, vf);
nOutputCols = THTensor_(convsize)(nInputCols, nKernelCols, scol, vf);
nelem = THTensor_(nElement)(r_);
THTensor_(resize4d)(r_, nOutputPlane, nOutputDepth, nOutputRows, nOutputCols);
if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_))
{
THTensor_(zero)(r_);
}
else if (beta != 1)
THTensor_(mul)(r_, r_, beta);
input_data = THTensor_(data)(input);
weight_data = THTensor_(data)(kernel);
output_data = THTensor_(data)(r_);
for(k = 0; k < nOutputPlane; k++)
{
/* get kernel */
real *ptr_weight = weight_data + k*kstride0;
/* get input */
real *ptr_input = input_data + k*istride0;
/* do image, kernel convolution */
THTensor_(conv3d)(output_data,
alpha,
ptr_input, nInputDepth, nInputRows, nInputCols,
ptr_weight, nKernelDepth, nKernelRows, nKernelCols,
sdepth, srow, scol, vf, xc);
/* Next output plane */
output_data += nOutputDepth*nOutputCols*nOutputRows;
}
THTensor_(free)(input);
THTensor_(free)(kernel);
}
/*
4D input, 4D kernel, 4D output
component wise multiplication like with a permutation map
y <- y.*x + beta*y
*/
void THTensor_(conv3Dmap)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, THTensor *map,
int64_t sdepth, int64_t srow, int64_t scol, const char *vf, const char *xc)
{
int64_t nInputPlane, nInputDepth, nInputRows, nInputCols;
int64_t nKernelDepth, nKernelRows, nKernelCols;
int64_t nOutputPlane, nOutputDepth, nOutputRows, nOutputCols;
int64_t istride0, kstride0;
THTensor *input;
THTensor *kernel;
ptrdiff_t nelem;
real *input_data;
real *weight_data;
real *output_data;
int64_t nmaps;
int64_t k;
THArgCheck(t_->nDimension == 4 , 3, "input: 4D Tensor expected");
THArgCheck(k_->nDimension == 4 , 4, "kernel: 4D Tensor expected");
THArgCheck(map->nDimension == 2 , 4, "map: 2D Tensor expected");
THArgCheck(srow >= 1, 6, "Stride should be a positive integer");
THArgCheck(scol >= 1, 7, "Stride should be a positive integer");
THArgCheck(*vf == 'V' || *vf == 'F', 8, "type of convolution can 'V' or 'F'");
THArgCheck(*xc == 'C' || *xc == 'X', 8, "type of convolution can 'X' or 'C'");
input = THTensor_(newContiguous)(t_);
kernel = THTensor_(newContiguous)(k_);
istride0 = input->stride[0];
nInputPlane = input->size[0];
nInputDepth = input->size[1];
nInputRows = input->size[2];
nInputCols = input->size[3];
kstride0 = kernel->stride[0];
nOutputPlane = kernel->size[0];
nKernelDepth = kernel->size[1];
nKernelRows = kernel->size[2];
nKernelCols = kernel->size[3];
THArgCheck(nOutputPlane == nInputPlane, 2, "invalid number of input/kernel planes");
THArgCheck((nInputDepth >= nKernelDepth
&& nInputRows >= nKernelRows
&& nInputCols >= nKernelCols) || *vf == 'F',
2, "conv3Dmap : Input image is smaller than kernel");
nOutputDepth = THTensor_(convsize)(nInputDepth, nKernelDepth, sdepth, vf);
nOutputRows = THTensor_(convsize)(nInputRows, nKernelRows, srow, vf);
nOutputCols = THTensor_(convsize)(nInputCols, nKernelCols, scol, vf);
nelem = THTensor_(nElement)(r_);
THTensor_(resize4d)(r_, nOutputPlane, nOutputDepth, nOutputRows, nOutputCols);
if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_))
{
THTensor_(zero)(r_);
}
else if (beta != 1)
THTensor_(mul)(r_, r_, beta);
input_data = THTensor_(data)(input);
weight_data = THTensor_(data)(kernel);
output_data = THTensor_(data)(r_);
nmaps = map->size[0];
for(k = 0; k < nmaps; k++)
{
/* get indices */
int64_t from = (int64_t)THTensor_(get2d)(map,k,0)-1;
int64_t to = (int64_t)THTensor_(get2d)(map,k,1)-1;
/* get kernel */
real *ptr_weight = weight_data + k*kstride0;
/* get input */
real *ptr_input = input_data + from*istride0;
/* get output */
real *ptr_output = output_data + to*nOutputDepth*nOutputRows*nOutputCols;
/* do image, kernel convolution */
THTensor_(conv3d)(ptr_output,
alpha,
ptr_input, nInputDepth, nInputRows, nInputCols,
ptr_weight, nKernelDepth, nKernelRows, nKernelCols,
sdepth, srow, scol, vf, xc);
}
THTensor_(free)(input);
THTensor_(free)(kernel);
}
#endif
|
concattest5.c | #include <stdlib.h>
#include "concattest5.h"
void concattest5(float* v,int m,int n,float*output){
#pragma omp parallel for
for (int H8 = 0; H8 < ((n) + (64) - 1 ) / (64); H8++) {
for (int H9 = 0; H9 < ((m) + (64) - 1 ) / (64); H9++) {
for (int H16 = 0; H16 < 1; H16++) {
for (int H17 = 0; H17 < 64; H17++) {
if ((H9) * (64) + H17 < m) {
if ((H8) * (64) + H16 < n) {
output[(m) * (((1 + (64 - (1)))) * (H8) + H16) + (64) * (H9) + H17] = v[(((m)) * ((H8) * (64) + H16)) + (H9) * (64) + H17];
}
}
}
}
for (int H18 = 1; H18 < 64; H18++) {
for (int H19 = 0; H19 < 64; H19++) {
if ((H9) * (64) + H19 < m) {
if ((H8) * (64) + H18 < n) {
output[(m) * (((1 + (64 - (1)))) * (H8) + ((H18 - (1)) + 1)) + (64) * (H9) + H19] = v[(((m)) * ((H8) * (64) + H18)) + (H9) * (64) + H19];
}
}
}
}
}
}
}
|
globals.c | #include <omp.h>
#include <stdlib.h>
#include <stdio.h>
#define BONES_MIN(a,b) ((a<b) ? a : b)
#define BONES_MAX(a,b) ((a>b) ? a : b)
#define DIV_CEIL(a,b) ((a+b-1)/b)
#define DIV_FLOOR(a,b) (a/b)
// Multiple iterations for kernel measurements
#define ITERS 1
// Function to initialize the CPU platform (for fair measurements)
void bones_initialize_target(void) {
int bones_thread_count = omp_get_num_procs();
omp_set_num_threads(bones_thread_count);
#pragma omp parallel
{
int bones_thread_id = omp_get_thread_num();
}
}
// Declaration of the original function
int bones_main(void);
// New main function for initialisation and clean-up
int main(void) {
// Initialisation
bones_initialize_target();
// Original main function
int bones_return = bones_main();
// Clean-up
return bones_return;
}
|
opi.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
int main(int argc, char **argv) {
//seed random number generator
// Q2b: get the number of threads to run with from agrv and
// add OpenMP API code to set number of threads here
int Nthreads = atoi(argv[1]);
//tell OpenMP to use NumThreads threads
omp_set_num_threads(Nthreads);
struct drand48_data *drandData;
drandData = (struct drand48_data*) malloc(Nthreads*sizeof(struct drand48_data));
int rank = omp_get_thread_num();
// Q2c: add an OpenMP parallel region here, wherein each thread initializes
// one entry in drandData using srand48_r and seed based on thread number
long int seed = rank; //sets the seed based on the thread number
srand48_r(seed, drandData+rank);
long long int Ntrials = 10000000;
//need running tallies
long long int Ntotal=0;
long long int Ncircle=0;
double startTime = omp_get_wtime();
#pragma omp parallel for reduction(+:Ncircle)
for (long long int n=0; n<Ntrials; n++) {
double rand1;
double rand2;
int rank = omp_get_thread_num();
//gererate two random numbers (use the thread id to offset drandData)
drand48_r(drandData+rank, &rand1);
drand48_r(drandData+rank, &rand2);
double x = -1 + 2*rand1; //shift to [-1,1]
double y = -1 + 2*rand2;
//check if its in the circle
if (sqrt(x*x+y*y)<=1) Ncircle++;
Ntotal++;
if (n%100 ==0) {
double pi = 4.0*Ncircle/ (double) (n);
printf("Our estimate of pi is %g \n", pi);
}
}
double pi = 4.0*Ncircle/ (double) (Ntotal);
printf("Our final estimate of pi is %g \n", pi);
double endTime = omp_get_wtime();
free(drandData);
printf("Total time is %g \n", endTime - startTime);
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.